diff --git a/component_grids_nuopc.xml b/component_grids_nuopc.xml
index 1e9f77e3..d661b46f 100644
--- a/component_grids_nuopc.xml
+++ b/component_grids_nuopc.xml
@@ -89,6 +89,13 @@
+
+ 488 1
+
+ $DIN_LOC_ROOT/share/domains/domain.lnd.ne3np4_gx3v7.230718.nc
+ $DIN_LOC_ROOT/share/meshes/ne3np4_ESMFmesh_c230714_cdf5.nc
+ ne3np4 is Spectral Elem 10-deg grid:
+
486 1
$DIN_LOC_ROOT/share/meshes/ne3pg3_ESMFmesh_c221214_cdf5.nc
@@ -241,6 +248,13 @@
Test support only
+
+ 186194 1
+ $DIN_LOC_ROOT/share/meshes/POLARCAP_ne30x4_np4_ESMFmesh_cdf5_c20240222.nc
+ ne0np4.POLARCAP.ne30x4 is a Spectral Elem 1-deg grid with a 1/4 deg refined region over the Arctic and Antarctica:
+ Test support only
+
+
2562 1
@@ -320,6 +334,12 @@
tx0.66v1 is tripole v1 0.66-deg MOM6 grid:
Experimental for MOM6 experiments
+
+ 540 480
+ $DIN_LOC_ROOT/share/meshes/tx2_3v2_230415_ESMFmesh.nc
+ tx2_3v2 is tripole v2 2/3-deg MOM6 grid:
+ Experimental for MOM6 experiments
+
1440 1080
$DIN_LOC_ROOT/share/meshes/tx0.25v1_190204_ESMFmesh.nc
diff --git a/machines/Depends.cray b/machines/Depends.cray
index c337bde0..362847c1 100644
--- a/machines/Depends.cray
+++ b/machines/Depends.cray
@@ -1,4 +1,18 @@
-NOOPTOBJS= ice_boundary.o dyn_comp.o unicon.o
+NOOPTOBJS= ice_boundary.o dyn_comp.o unicon.o SnowHydrologyMod.o
+
+# RRTMGP contains openmp directives for running on GPUs. These directives need to be
+# disabled to allow building CAM with threading on CPUs enabled.
+RRTMGP_OBJS=\
+mo_fluxes_byband.o mo_rrtmgp_clr_all_sky.o \
+mo_zenith_angle_spherical_correction.o mo_gas_concentrations.o \
+mo_aerosol_optics_rrtmgp_merra.o mo_cloud_optics_rrtmgp.o \
+mo_gas_optics_rrtmgp.o mo_gas_optics_rrtmgp_kernels.o \
+mo_rte_sw.o mo_rte_lw.o \
+mo_rte_util_array_validation.o mo_rte_util_array.o \
+mo_fluxes_broadband_kernels.o
$(NOOPTOBJS): %.o: %.F90
$(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $<
+
+$(RRTMGP_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -h noomp $<
diff --git a/machines/Depends.gnu b/machines/Depends.gnu
index 2d532472..88c35fa5 100644
--- a/machines/Depends.gnu
+++ b/machines/Depends.gnu
@@ -1,2 +1,16 @@
geopk.o:geopk.F90
$(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fcray-pointer $<
+
+# RRTMGP contains openmp directives for running on GPUs. These directives need to be
+# disabled to allow building CAM with threading on CPUs enabled.
+RRTMGP_OBJS=\
+mo_fluxes_byband.o mo_rrtmgp_clr_all_sky.o \
+mo_zenith_angle_spherical_correction.o mo_gas_concentrations.o \
+mo_aerosol_optics_rrtmgp_merra.o mo_cloud_optics_rrtmgp.o \
+mo_gas_optics_rrtmgp.o mo_gas_optics_rrtmgp_kernels.o \
+mo_rte_sw.o mo_rte_lw.o \
+mo_rte_util_array_validation.o mo_rte_util_array.o \
+mo_fluxes_broadband_kernels.o
+
+$(RRTMGP_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fno-openmp $<
diff --git a/machines/Depends.intel b/machines/Depends.intel
index b024db74..8bbc3415 100644
--- a/machines/Depends.intel
+++ b/machines/Depends.intel
@@ -28,11 +28,21 @@ kissvec.o
PUMAS_MG_OBJS=\
micro_mg1_0.o \
-micro_mg3_0.o \
+micro_pumas_v1.o \
micro_pumas_data.o \
micro_pumas_utils.o \
wv_sat_methods.o
+# RRTMGP contains openmp directives for running on GPUs. These directives need to be
+# disabled to allow building CAM with threading on CPUs enabled.
+RRTMGP_OBJS=\
+mo_fluxes_byband.o mo_rrtmgp_clr_all_sky.o \
+mo_zenith_angle_spherical_correction.o mo_gas_concentrations.o \
+mo_aerosol_optics_rrtmgp_merra.o mo_cloud_optics_rrtmgp.o \
+mo_gas_optics_rrtmgp.o mo_gas_optics_rrtmgp_kernels.o \
+mo_rte_sw.o mo_rte_lw.o \
+mo_rte_util_array_validation.o mo_rte_util_array.o \
+mo_fluxes_broadband_kernels.o
ifeq ($(DEBUG),FALSE)
$(PERFOBJS): %.o: %.F90
@@ -47,5 +57,7 @@ ifeq ($(DEBUG),FALSE)
$(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) -O3 -fp-model fast $<
$(PUMAS_MG_OBJS): %.o: %.F90
$(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-fma -ftz -no-prec-sqrt -qoverride-limits -no-inline-max-total-size -inline-factor=200 -qopt-report=5 $<
-
endif
+
+$(RRTMGP_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -qno-openmp $<
diff --git a/machines/Depends.intel-oneapi b/machines/Depends.intel-oneapi
new file mode 100644
index 00000000..c453f0e8
--- /dev/null
+++ b/machines/Depends.intel-oneapi
@@ -0,0 +1,51 @@
+#
+PERFOBJS=\
+prim_advection_mod.o \
+edge_mod.o \
+derivative_mod.o \
+bndry_mod.o \
+prim_advance_mod.o
+
+# CLM's SatellitePhenologyMod is compiled incorrectly with intel 15.0.0 at -O2
+REDUCED_OPT_OBJS=\
+SatellitePhenologyMod.o
+
+# shr_wv_sat_mod does not need to have better than ~0.1% precision, and benefits
+# enormously from a lower precision in the vector functions.
+REDUCED_PRECISION_OBJS=\
+shr_wv_sat_mod.o
+
+SHR_RANDNUM_FORT_OBJS=\
+kissvec_mod.o \
+mersennetwister_mod.o \
+dSFMT_interface.o \
+shr_RandNum_mod.o
+
+SHR_RANDNUM_C_OBJS=\
+dSFMT.o \
+dSFMT_utils.o \
+kissvec.o
+
+PUMAS_MG_OBJS=\
+micro_mg1_0.o \
+micro_pumas_v1.o \
+micro_pumas_data.o \
+micro_pumas_utils.o \
+wv_sat_methods.o
+
+
+ifeq ($(DEBUG),FALSE)
+ $(PERFOBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-prec-div $<
+ $(REDUCED_OPT_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O1 $<
+ $(REDUCED_PRECISION_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fimf-precision=low -fp-model fast $<
+ $(SHR_RANDNUM_FORT_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fp-model fast -no-prec-div -no-prec-sqrt -qoverride-limits $<
+ $(SHR_RANDNUM_C_OBJS): %.o: %.c
+ $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) -O3 -fp-model fast $<
+ $(PUMAS_MG_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-fma -ftz -no-prec-sqrt -qoverride-limits -no-inline-max-total-size -inline-factor=200 -qopt-report=5 $<
+
+endif
diff --git a/machines/Depends.nvhpc b/machines/Depends.nvhpc
index 812904f7..e1c9b4e7 100644
--- a/machines/Depends.nvhpc
+++ b/machines/Depends.nvhpc
@@ -1,7 +1,78 @@
# 10/13/2022 nvhpc compiler on gust.hpc.ucar.edu produced incorrect mpitask cound when esm.F90 was optimized
+# 04/05/2023 do not apply optimization flags to "SHR_RANDNUM_C_OBJS" for the nvhpc compiler, which will fail the ensemble consistency test
#
+# Apply a different optimization level consistent with the Intel compiler
+PERFOBJS=\
+prim_advection_mod.o \
+edge_mod.o \
+derivative_mod.o \
+bndry_mod.o \
+prim_advance_mod.o
+
REDUCED_OPT_OBJS=\
+SatellitePhenologyMod.o \
esm.o
-$(REDUCED_OPT_OBJS): %.o: %.F90
- $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O1 $<
+REDUCED_PRECISION_OBJS=\
+shr_wv_sat_mod.o
+
+SHR_RANDNUM_FORT_OBJS=\
+kissvec_mod.o \
+mersennetwister_mod.o \
+dSFMT_interface.o \
+shr_RandNum_mod.o
+
+SHR_RANDNUM_C_OBJS=\
+dSFMT.o \
+dSFMT_utils.o \
+kissvec.o
+
+# Apply GPU flags to the following code
+PUMAS_OBJS=\
+micro_mg1_0.o \
+micro_pumas_v1.o \
+micro_pumas_data.o \
+micro_pumas_utils.o \
+pumas_stochastic_collect_tau.o \
+micro_pumas_cam.o \
+wv_sat_methods.o \
+wv_saturation.o \
+macrop_driver.o \
+shr_spfn_mod.o
+
+RRTMGP_OBJS=\
+rrtmgp_allsky.o \
+rrtmgp_rfmip_lw.o \
+rrtmgp_rfmip_sw.o \
+mo_fluxes_byband.o \
+mo_zenith_angle_spherical_correction.o \
+mo_rrtmgp_clr_all_sky.o \
+mo_gas_concentrations.o \
+mo_aerosol_optics_rrtmgp_merra.o \
+mo_cloud_optics_rrtmgp.o \
+mo_gas_optics_rrtmgp.o \
+mo_gas_optics_rrtmgp_kernels.o \
+mo_rte_lw.o \
+mo_rte_sw.o \
+mo_rte_util_array_validation.o \
+mo_rte_util_array.o \
+mo_fluxes_broadband_kernels.o \
+mo_rte_solver_kernels.o \
+mo_optical_props_kernels.o
+
+ifeq ($(DEBUG),FALSE)
+ $(PERFOBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -Mfprelaxed=div $<
+ $(REDUCED_OPT_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O1 $<
+ $(REDUCED_PRECISION_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fast $<
+ $(SHR_RANDNUM_FORT_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fast -Mfprelaxed=div,sqrt $<
+# $(SHR_RANDNUM_C_OBJS): %.o: %.c
+# $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) -O3 -fast $<
+ $(PUMAS_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fastsse -Mnofma -Mflushz -Mfprelaxed=sqrt $(GPUFLAGS) $<
+ $(RRTMGP_OBJS): %.o: %.F90
+ $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $(GPUFLAGS) $<
+endif
diff --git a/machines/Depends.nvhpc-gpu b/machines/Depends.nvhpc-gpu
deleted file mode 100644
index 4817ee0e..00000000
--- a/machines/Depends.nvhpc-gpu
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-PUMAS_OBJS=\
-micro_mg1_0.o \
-micro_pumas_v1.o \
-micro_pumas_data.o \
-micro_pumas_utils.o \
-micro_pumas_cam.o \
-wv_sat_methods.o \
-wv_saturation.o \
-macrop_driver.o \
-shr_spfn_mod.o
-
-ifeq ($(DEBUG),FALSE)
- $(PUMAS_OBJS): %.o: %.F90
- $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -acc -gpu=cc70,lineinfo,nofma -Minfo=accel $<
-endif
diff --git a/machines/Depends.pgi-gpu b/machines/Depends.pgi-gpu
deleted file mode 100644
index a4bb6df2..00000000
--- a/machines/Depends.pgi-gpu
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-PUMAS_MG_OBJS=\
-micro_mg1_0.o \
-micro_pumas_v1.o \
-micro_pumas_data.o \
-micro_pumas_utils.o \
-micro_pumas_cam.o \
-wv_sat_methods.o \
-wv_saturation.o \
-macrop_driver.o \
-shr_spfn_mod.o
-
-ifeq ($(DEBUG),FALSE)
- $(PUMAS_MG_OBJS): %.o: %.F90
- $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -acc -ta=tesla:cc70,lineinfo,nofma -Minfo=accel $<
-endif
diff --git a/machines/aleph/config_machines.xml b/machines/aleph/config_machines.xml
new file mode 100644
index 00000000..9d2d2daf
--- /dev/null
+++ b/machines/aleph/config_machines.xml
@@ -0,0 +1,64 @@
+
+ XC50 SkyLake, os is CNL, 40 pes/node, batch system is PBSPro
+ CNL
+ intel,gnu,cray
+ mpt,mpi-serial
+ /proj/$ENV{USER}
+ $ENV{DIN_LOC_ROOT}
+ $DIN_LOC_ROOT
+ ${CIME_OUTPUT_ROOT}/archive/$CASE
+ ${CIME_OUTPUT_ROOT}/cesm_baselines
+ 8
+ pbs
+ @ pusan.ac.kr
+ 40
+ 40
+
+ aprun
+
+ -j {{ hyperthreading }}
+ -n {{ total_tasks }}
+ -N $MAX_MPITASKS_PER_NODE
+ -S {{ tasks_per_numa }}
+ -d $ENV{OMP_NUM_THREADS}
+ --mpmd-env OMP_NUM_THREADS=$OMP_NUM_THREADS
+
+
+
+ /opt/modules/default/init/perl.pm
+ /opt/modules/default/init/python.py
+ /opt/modules/default/init/sh
+ /opt/modules/default/init/csh
+ /opt/modules/default/bin/modulecmd perl
+ /opt/modules/default/bin/modulecmd python
+ module
+ module
+
+ craype-x86-skylake
+ PrgEnv-pgi
+ PrgEnv-intel
+ PrgEnv-cray
+ PrgEnv-gnu
+ cray-netcdf
+ cray-hdf5
+ cray-parallel-netcdf
+ papi
+
+
+ PrgEnv-intel
+ craype-x86-skylake
+ craype-hugepages2M
+ perftools-base/7.0.4
+ cray-netcdf/4.6.1.3
+ cray-hdf5/1.10.2.0
+ cray-parallel-netcdf/1.11.1.1
+ papi/5.6.0.4
+ gridftp/6.0
+ cray-python/3.6.5.1
+
+
+
+ 256M
+ /home/jedwards/workflow/CESM_postprocessing
+
+
diff --git a/machines/athena/config_machines.xml b/machines/athena/config_machines.xml
new file mode 100644
index 00000000..9e9b5e30
--- /dev/null
+++ b/machines/athena/config_machines.xml
@@ -0,0 +1,84 @@
+
+
+ CMCC IBM iDataPlex, os is Linux, 16 pes/node, batch system is LSFd mpich
+ LINUX
+ intel,intel15
+ mpich2
+ /work/$USER/CESM2
+ /users/home/dp16116/CESM2/inputdata
+ $DIN_LOC_ROOT/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{CESMDATAROOT}/ccsm_baselines
+ /users/home/dp16116/CESM2/cesm2.0.1/cime/tools/cprnc/cprnc
+ /usr/lib64/perl5:/usr/share/perl5
+ 8
+ lsf
+
+ 30
+ 15
+ FALSE
+
+ mpirun_Impi5
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/sh
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+
+
+ ANACONDA2/python2.7
+ INTEL/intel_xe_2015.3.187
+ SZIP/szip-2.1_int15
+
+
+ ESMF/esmf-6.3.0rp1-intelmpi-64-g_int15
+
+
+ ESMF/esmf-6.3.0rp1-intelmpi-64-O_int15
+
+
+ ESMF/esmf-6.3.0rp1-mpiuni-64-g_int15
+
+
+ ESMF/esmf-6.3.0rp1-mpiuni-64-O_int15
+
+
+ HDF5/hdf5-1.8.15-patch1
+ NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1
+
+
+ HDF5/hdf5-1.8.15-patch1_parallel
+ NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1_parallel
+ PARALLEL_NETCDF/parallel-netcdf-1.6.1
+
+
+ CMAKE/cmake-3.3.0-rc1
+
+
+ INTEL/intel_xe_2013.5.192
+ INTEL/intel_xe_2013
+ HDF5/hdf5-1.8.10-patch1
+ INTEL/intel_xe_2015.3.187
+
+
+
+ 256M
+
+
+ gpfs
+ on
+ snb
+ lsf
+ 1
+ on
+ on
+ /users/home/models/nemo/xios-cmip6/intel_xe_2013
+
+
diff --git a/machines/aws-hpc6a/config_machines.xml b/machines/aws-hpc6a/config_machines.xml
new file mode 100644
index 00000000..6d4b9bfc
--- /dev/null
+++ b/machines/aws-hpc6a/config_machines.xml
@@ -0,0 +1,49 @@
+
+ AWS HPC6a (96-core AMD) Nodes
+ LINUX
+ intel
+ impi
+ /scratch/$USER
+ /scratch/inputdata
+ $DIN_LOC_ROOT/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{CESMDATAROOT}/cesm_baselines
+ $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc
+ 8
+ slurm
+ cseg
+
+ 96
+ 96
+ FALSE
+
+ scontrol show hostnames $SLURM_JOB_NODELIST > hostfile ; mpirun -f hostfile
+
+ -n {{ total_tasks }}
+
+
+
+ env -u I_MPI_OFI_PROVIDER mpirun
+
+ -n {{ total_tasks }}
+
+
+
+
+ 256M
+ /scratch/$USER
+ icc
+ icpc
+ ifort
+ ifort
+ /opt/ncar/software
+ /opt/ncar/esmf/lib/esmf.mk
+
+
+ ON
+ SUMMARY
+
+
+ -1
+
+
diff --git a/machines/betzy/config_machines.xml b/machines/betzy/config_machines.xml
new file mode 100644
index 00000000..d96cb97c
--- /dev/null
+++ b/machines/betzy/config_machines.xml
@@ -0,0 +1,83 @@
+
+ BullSequana XH2000 AMD® Epyc™ "Rome" 2.2GHz, 128-way nodes, os is Linux, batch system is SLURM
+ LINUX
+ intel
+ openmpi,impi
+ /cluster/work/users/$USER/noresm
+ /cluster/shared/noresm/inputdata
+ /cluster/shared/noresm/inputdata/atm/datm7
+ /cluster/work/users/$USER/archive/$CASE
+ /cluster/shared/noresm/noresm_baselines
+ /cluster/shared/noresm/tools/cprnc/cprnc
+ 8
+ slurm
+ noresmCommunity
+ 128
+ 128
+ TRUE
+
+
+
+
+ srun
+
+
+ $ENV{LMOD_PKG}/init/perl
+ $ENV{LMOD_PKG}/init/env_modules_python.py
+ $ENV{LMOD_PKG}/init/csh
+ $ENV{LMOD_PKG}/init/sh
+ $ENV{LMOD_PKG}/libexec/lmod perl
+ $ENV{LMOD_PKG}/libexec/lmod python
+ module
+ module
+
+
+ StdEnv
+ /cluster/shared/noresm/eb_mods/modules/all
+ ESMF/8.4.1-iomkl-2021b-ParallelIO-2.5.10
+ CMake/3.21.1-GCCcore-11.2.0
+ Python/3.9.6-GCCcore-11.2.0
+
+ ParMETIS/4.0.3-iompi-2021b
+
+
+
+ StdEnv
+ /cluster/shared/noresm/eb_mods/modules/all
+ ESMF/8.4.1-intel-2021b-ParallelIO-2.5.10
+ CMake/3.21.1-GCCcore-11.2.0
+ Python/3.9.6-GCCcore-11.2.0
+ ParMETIS/4.0.3-iimpi-2021b
+
+
+
+ $ENV{EBROOTESMF}/lib/esmf.mk
+ ON
+ SUMMARY
+ mlx5_0:1
+ 64M
+ 5
+ 2
+ $ENV{EBROOTPARALLELIO}/lib
+ $ENV{EBROOTPARALLELIO}/include
+ pnetcdf,netcdf,netcdf4p,netcdf4c
+ 1
+ self,vader
+ 1
+ 1
+ ^fca
+ 95
+ 8
+ ompio
+ 1048576
+ 8
+ ^lockedfile,individual
+ lustre
+ on
+
+
+
+
+ -1
+
+
diff --git a/machines/casper/config_machines.xml b/machines/casper/config_machines.xml
new file mode 100644
index 00000000..afd46f00
--- /dev/null
+++ b/machines/casper/config_machines.xml
@@ -0,0 +1,125 @@
+
+ NCAR GPU platform, os is Linux, 36 pes/node, batch system is pbs
+ LINUX
+ nvhpc,intel
+ openmpi
+ /glade/scratch/$USER
+ $ENV{CESMDATAROOT}/inputdata
+ /glade/p/cgd/tss/CTSM_datm_forcing_data
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{CESMDATAROOT}/cesm_baselines
+ $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc
+ 8
+ pbs
+ ASAP/CISL
+ 36
+ 8
+ 36
+ 36
+ none,v100
+ none,openacc,openmp,combined
+ TRUE
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ /glade/u/apps/casper/23.10/spack/opt/spack/lmod/8.7.24/gcc/7.5.0/m4jx/lmod/lmod/init/perl
+ /glade/u/apps/casper/23.10/spack/opt/spack/lmod/8.7.24/gcc/7.5.0/m4jx/lmod/lmod/init/env_modules_python.py
+ /glade/u/apps/casper/23.10/spack/opt/spack/lmod/8.7.24/gcc/7.5.0/m4jx/lmod/lmod/init/sh
+ /glade/u/apps/casper/23.10/spack/opt/spack/lmod/8.7.24/gcc/7.5.0/m4jx/lmod/lmod/init/csh
+ /glade/u/apps/casper/23.10/spack/opt/spack/lmod/8.7.24/gcc/7.5.0/m4jx/lmod/lmod/libexec/lmod perl
+ /glade/u/apps/casper/23.10/spack/opt/spack/lmod/8.7.24/gcc/7.5.0/m4jx/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ ncarenv/23.10
+ cmake/3.26.3
+
+
+ nvhpc/23.7
+
+
+ intel/2023.2.1
+ mkl/2023.2.0
+
+
+ cuda/12.2.1
+
+
+ openmpi/4.1.6
+ netcdf-mpi/4.9.2
+ parallel-netcdf/1.12.3
+
+
+ netcdf/4.9.2
+
+
+ openmpi/4.1.6
+ netcdf-mpi/4.9.2
+ parallel-netcdf/1.12.3
+
+
+ netcdf/4.9.2
+
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
+ esmf-8.4.0b08_casper-ncdfio-openmpi-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
+ esmf-8.4.0b08_casper-ncdfio-openmpi-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2/
+ esmf-8.4.1b01-ncdfio-openmpi-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2/
+ esmf-8.4.1b01-ncdfio-openmpi-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
+ esmf-8.4.0b08_casper-ncdfio-openmpi-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
+ esmf-8.2.0b11_casper-ncdfio-openmpi-O
+
+
+ ncarcompilers/0.5.0
+
+
+ parallelio/2.6.2
+ esmf/8.5.0
+ ncarcompilers/1.0.0
+
+
+
+ /glade/u/apps/dav/modulefiles/default/compilers:/glade/u/apps/dav/modulefiles/default/idep
+ 256M
+ /glade/scratch/$USER
+ /glade/p/cesmdata/cseg
+ $ENV{NETCDF}
+
+
+ ON
+ SUMMARY
+
+
+ openacc
+
+
+ -1
+
+
diff --git a/machines/centos7-linux/config_machines.xml b/machines/centos7-linux/config_machines.xml
new file mode 100644
index 00000000..786fb1be
--- /dev/null
+++ b/machines/centos7-linux/config_machines.xml
@@ -0,0 +1,61 @@
+
+
+ Example port to centos7 linux system with gcc, netcdf, pnetcdf and mpich
+ using modules from http://www.admin-magazine.com/HPC/Articles/Environment-Modules
+
+
+ LINUX
+ https://howto.get.out
+ gnu
+ mpich
+ none
+
+ $ENV{HOME}/cesm/scratch
+ $ENV{HOME}/cesm/inputdata
+ $ENV{HOME}/cesm/inputdata/lmwg
+ $ENV{HOME}/cesm/archive/$CASE
+ $ENV{HOME}/cesm/cesm_baselines
+ $ENV{HOME}/cesm/tools/cime/tools/cprnc/cprnc
+ make
+ 8
+ none
+ me@my.address
+ 8
+ 8
+ FALSE
+
+ mpiexec
+
+ -np {{ total_tasks }}
+
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/sh
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+ lang/python/3.7.0
+
+
+ compiler/gnu/8.2.0
+ mpi/3.3/gnu-8.2.0
+ tool/netcdf/4.7.4/gcc-8.2.0
+ tool/parallel-netcdf/1.12.1/mpich
+
+
+
+ 256M
+ $ENV{HOME}/ESMF_8_2_0b22/lib/libg/Linux.gfortran.64.mvapich2.default/esmf.mk
+
+
+ -1
+
+
diff --git a/machines/cheyenne/config_machines.xml b/machines/cheyenne/config_machines.xml
new file mode 100644
index 00000000..c84c7502
--- /dev/null
+++ b/machines/cheyenne/config_machines.xml
@@ -0,0 +1,287 @@
+
+ NCAR SGI platform, os is Linux, 36 pes/node, batch system is PBS
+
+ MPT: Launcher network accept (MPI_LAUNCH_TIMEOUT) timed out
+ 10
+ LINUX
+ intel,gnu,nvhpc,pgi
+ mpt,openmpi
+ openmpi,mpt
+ openmpi,mpt
+ mpt,openmpi
+ /glade/scratch/$USER
+ $ENV{CESMDATAROOT}/inputdata
+ /glade/p/cgd/tss/CTSM_datm_forcing_data
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{CESMDATAROOT}/cesm_baselines
+ $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc
+ 8
+ pbs
+ cseg
+
+ 36
+ 36
+ TRUE
+
+
+ mpiexec_mpt
+
+ -p "%g:"
+ -np {{ total_tasks }}
+
+ omplace -tm open64
+
+
+
+ mpirun `hostname`
+
+ -np {{ total_tasks }}
+
+ omplace -tm open64
+
+
+
+ mpiexec_mpt
+
+ -p "%g:"
+ -np {{ total_tasks }}
+
+ omplace -tm open64 -vv
+
+
+
+ mpirun `hostname`
+
+ -np {{ total_tasks }}
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+ --tag-output
+
+
+
+
+ /opt/sgi/mpt/mpt-2.15/bin/mpirun $ENV{UNIT_TEST_HOST} -np 1
+
+
+ /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/perl
+ /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/env_modules_python.py
+ /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/csh
+ /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/sh
+ /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod perl
+ /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ ncarenv/1.3
+ python/3.7.9
+ cmake/3.22.0
+
+
+ intel/19.1.1
+ esmf_libs
+ mkl
+
+
+ gnu/10.1.0
+ openblas/0.3.9
+
+
+ pgi/20.4
+
+
+ nvhpc/22.2
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
+ esmf-8.5.0-ncdfio-mpt-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
+ esmf-8.5.0-ncdfio-mpt-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
+ esmf-8.5.0-ncdfio-openmpi-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
+ esmf-8.5.0-ncdfio-openmpi-O
+
+
+ mpi-serial/2.3.0
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
+ esmf-8.5.0-ncdfio-mpiuni-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
+ esmf-8.5.0-ncdfio-mpiuni-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
+ esmf-8.5.0-ncdfio-mpt-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
+ esmf-8.5.0-ncdfio-mpt-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
+ esmf-8.5.0-ncdfio-openmpi-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
+ esmf-8.5.0-ncdfio-openmpi-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
+ esmf-8.5.0-ncdfio-mpiuni-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
+ esmf-8.5.0-ncdfio-mpiuni-O
+
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
+ esmf-8.2.0b23-ncdfio-mpt-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
+ esmf-8.2.0b23-ncdfio-mpt-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
+ esmf-8.5.0-ncdfio-mpt-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
+ esmf-8.5.0-ncdfio-mpt-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
+ esmf-8.5.0-ncdfio-openmpi-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
+ esmf-8.5.0-ncdfio-openmpi-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
+ esmf-8.2.0b23-ncdfio-openmpi-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
+ esmf-8.2.0b23-ncdfio-openmpi-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
+ esmf-8.2.0b23-ncdfio-mpiuni-g
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
+ esmf-8.2.0b23-ncdfio-mpiuni-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
+ esmf-8.5.0-ncdfio-mpiuni-O
+
+
+ /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
+ esmf-8.5.0-ncdfio-mpiuni-O
+
+
+ mpt/2.25
+ netcdf-mpi/4.9.0
+ pnetcdf/1.12.3
+
+
+ mpt/2.22
+ netcdf-mpi/4.8.1
+ pnetcdf/1.12.2
+
+
+ mpt/2.22
+ netcdf-mpi/4.7.4
+ pnetcdf/1.12.1
+
+
+ mpt/2.25
+ netcdf-mpi/4.9.0
+ pnetcdf/1.12.3
+
+
+ openmpi/4.1.4
+ netcdf-mpi/4.9.0
+ pnetcdf/1.12.3
+
+
+ openmpi/4.1.4
+ netcdf-mpi/4.9.0
+ pnetcdf/1.12.3
+
+
+ openmpi/4.0.5
+ netcdf-mpi/4.7.4
+
+
+ openmpi/4.1.4
+ netcdf-mpi/4.9.0
+ pnetcdf/1.12.3
+
+
+ ncarcompilers/0.5.0
+
+
+ netcdf/4.9.0
+
+
+ netcdf/4.9.0
+
+
+ netcdf/4.9.0
+
+
+ netcdf/4.9.0
+
+
+ pio/2.5.10
+
+
+ pio/2.5.10d
+
+
+
+ 1024M
+ /glade/scratch/$USER
+ 16
+
+
+
+ ON
+ SUMMARY
+ /glade/work/turuncu/FV3GFS/benchmark-inputs/2012010100/gfs/fcst
+ /glade/work/turuncu/FV3GFS/fix_am
+ /glade/work/turuncu/FV3GFS/addon
+ PASSIVE
+ true
+
+
+ false
+
+
+ /glade/scratch/$USER
+
+
+ -1
+
+
diff --git a/machines/cmake_macros/derecho.cmake b/machines/cmake_macros/derecho.cmake
new file mode 100644
index 00000000..0dd51b4a
--- /dev/null
+++ b/machines/cmake_macros/derecho.cmake
@@ -0,0 +1,14 @@
+if (COMP_NAME STREQUAL gptl)
+ string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY")
+endif()
+set(MPI_SERIAL_PATH "$ENV{NCAR_ROOT_MPI_SERIAL}")
+set(NETCDF_PATH "$ENV{NETCDF}")
+set(PIO_FILESYSTEM_HINTS "lustre")
+set(PNETCDF_PATH "$ENV{PNETCDF}")
+if(DEFINED ENV{PIO})
+ set(PIO_LIBDIR "$ENV{PIO}/lib")
+ set(PIO_INCDIR "$ENV{PIO}/include")
+endif()
+# If we want to use cray-libsci instead of mkl uncomment this line as well as the module in config_machines.xml
+string(REPLACE "-mkl=cluster" "" SLIBS "${SLIBS}")
+string(APPEND CPPDEFS " -DHAVE_GETTID")
diff --git a/machines/cmake_macros/gnu.cmake b/machines/cmake_macros/gnu.cmake
index 621740c5..bea6c8e9 100644
--- a/machines/cmake_macros/gnu.cmake
+++ b/machines/cmake_macros/gnu.cmake
@@ -1,4 +1,8 @@
-string(APPEND CFLAGS " -std=gnu99")
+if (COMP_NAME STREQUAL mpi-serial)
+ string(APPEND CFLAGS " -std=gnu89")
+else()
+ string(APPEND CFLAGS " -std=gnu99")
+endif()
if (compile_threaded)
string(APPEND CFLAGS " -fopenmp")
endif()
diff --git a/machines/cmake_macros/gust.cmake b/machines/cmake_macros/gust.cmake
index 5332d19c..2772a168 100644
--- a/machines/cmake_macros/gust.cmake
+++ b/machines/cmake_macros/gust.cmake
@@ -5,6 +5,7 @@ set(NETCDF_PATH "$ENV{NETCDF}")
set(PIO_FILESYSTEM_HINTS "lustre")
set(PNETCDF_PATH "$ENV{PNETCDF}")
# If we want to use cray-libsci instead of mkl uncomment this line as well as the module in config_machines.xml
-#string(REPLACE "-mkl=cluster" "" SLIBS "${SLIBS}")
-string(REPLACE "-mkl=cluster" "-qmkl=cluster" SLIBS "${SLIBS}")
-#string(APPEND CPPDEFS " -DNO_SHR_VMATH ")
\ No newline at end of file
+string(REPLACE "-mkl=cluster" "" SLIBS "${SLIBS}")
+#string(REPLACE "-mkl=cluster" "-qmkl=cluster" SLIBS "${SLIBS}")
+#string(APPEND CPPDEFS " -DNO_SHR_VMATH ")
+string(APPEND CPPDEFS " -DHAVE_GETTID")
diff --git a/machines/cmake_macros/intel-oneapi.cmake b/machines/cmake_macros/intel-oneapi.cmake
new file mode 120000
index 00000000..9e889dc6
--- /dev/null
+++ b/machines/cmake_macros/intel-oneapi.cmake
@@ -0,0 +1 @@
+intel.cmake
\ No newline at end of file
diff --git a/machines/cmake_macros/intel-oneapi_derecho.cmake b/machines/cmake_macros/intel-oneapi_derecho.cmake
new file mode 120000
index 00000000..2cb843be
--- /dev/null
+++ b/machines/cmake_macros/intel-oneapi_derecho.cmake
@@ -0,0 +1 @@
+intel_derecho.cmake
\ No newline at end of file
diff --git a/machines/cmake_macros/intel.cmake b/machines/cmake_macros/intel.cmake
index 7328c85e..3c25605f 100644
--- a/machines/cmake_macros/intel.cmake
+++ b/machines/cmake_macros/intel.cmake
@@ -8,6 +8,7 @@ endif()
if (DEBUG)
string(APPEND CFLAGS " -O0 -g")
endif()
+string(APPEND CFLAGS " -no-fma")
string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DCPRINTEL")
string(APPEND CXX_LDFLAGS " -cxxlib")
set(CXX_LINKER "FORTRAN")
@@ -22,6 +23,7 @@ endif()
if (NOT DEBUG)
string(APPEND FFLAGS " -O2 -debug minimal")
endif()
+string(APPEND FFLAGS " -no-fma")
set(FFLAGS_NOOPT "-O0")
set(FIXEDFLAGS "-fixed")
set(FREEFLAGS "-free")
diff --git a/machines/cmake_macros/intel_aws-hpc6a.cmake b/machines/cmake_macros/intel_aws-hpc6a.cmake
index 0e4d2815..2c7921c6 100644
--- a/machines/cmake_macros/intel_aws-hpc6a.cmake
+++ b/machines/cmake_macros/intel_aws-hpc6a.cmake
@@ -1,5 +1,5 @@
-string(APPEND CFLAGS " -qopt-report -march=core-avx2 -mtune=core-avx2 -no-fma")
-string(APPEND FFLAGS " -qopt-report -march=core-avx2 -mtune=core-avx2 -no-fma")
+string(APPEND CFLAGS " -qopt-report -march=core-avx2 -mtune=core-avx2")
+string(APPEND FFLAGS " -qopt-report -march=core-avx2 -mtune=core-avx2")
if (DEBUG)
string(APPEND CMAKE_OPTS " -DPIO_ENABLE_LOGGING=ON")
endif()
diff --git a/machines/cmake_macros/intel_bluewaters.cmake b/machines/cmake_macros/intel_bluewaters.cmake
index 29b1da06..3cb50907 100644
--- a/machines/cmake_macros/intel_bluewaters.cmake
+++ b/machines/cmake_macros/intel_bluewaters.cmake
@@ -1,3 +1,3 @@
set(HAS_F2008_CONTIGUOUS "FALSE")
-string(APPEND FFLAGS " -dynamic -mkl=sequential -no-fma")
-string(APPEND CFLAGS " -dynamic -mkl=sequential -no-fma")
+string(APPEND FFLAGS " -dynamic -mkl=sequential")
+string(APPEND CFLAGS " -dynamic -mkl=sequential")
diff --git a/machines/cmake_macros/intel_casper.cmake b/machines/cmake_macros/intel_casper.cmake
index 08617ce8..b10bf277 100644
--- a/machines/cmake_macros/intel_casper.cmake
+++ b/machines/cmake_macros/intel_casper.cmake
@@ -1,5 +1,5 @@
-string(APPEND CFLAGS " -qopt-report -xCORE_AVX2 -no-fma")
-string(APPEND FFLAGS " -qopt-report -xCORE_AVX2 -no-fma")
+string(APPEND CFLAGS " -qopt-report -xCORE_AVX2")
+string(APPEND FFLAGS " -qopt-report -xCORE_AVX2")
if (DEBUG)
string(APPEND CMAKE_OPTS " -DPIO_ENABLE_LOGGING=ON")
endif()
diff --git a/machines/cmake_macros/intel_cheyenne.cmake b/machines/cmake_macros/intel_cheyenne.cmake
index 08617ce8..b10bf277 100644
--- a/machines/cmake_macros/intel_cheyenne.cmake
+++ b/machines/cmake_macros/intel_cheyenne.cmake
@@ -1,5 +1,5 @@
-string(APPEND CFLAGS " -qopt-report -xCORE_AVX2 -no-fma")
-string(APPEND FFLAGS " -qopt-report -xCORE_AVX2 -no-fma")
+string(APPEND CFLAGS " -qopt-report -xCORE_AVX2")
+string(APPEND FFLAGS " -qopt-report -xCORE_AVX2")
if (DEBUG)
string(APPEND CMAKE_OPTS " -DPIO_ENABLE_LOGGING=ON")
endif()
diff --git a/machines/cmake_macros/intel_derecho.cmake b/machines/cmake_macros/intel_derecho.cmake
new file mode 100644
index 00000000..bdc0366f
--- /dev/null
+++ b/machines/cmake_macros/intel_derecho.cmake
@@ -0,0 +1,14 @@
+set(CONFIG_ARGS "--host=cray")
+string(APPEND CFLAGS " -qopt-report -march=core-avx2")
+string(APPEND FFLAGS " -qopt-report -march=core-avx2")
+if (COMP_NAME STREQUAL gptl)
+ string(APPEND CPPDEFS " -DHAVE_SLASHPROC")
+endif()
+if (COMP_NAME STREQUAL mpi-serial)
+ string(APPEND CFLAGS " -std=c89 ")
+endif()
+if (MPILIB STREQUAL mpi-serial AND NOT compile_threaded)
+ set(PFUNIT_PATH "$ENV{CESMDATAROOT}/tools/pFUnit/pFUnit4.8.0_derecho_Intel2023.2.1_noMPI_noOpenMP")
+endif()
+set(SCC icx)
+set(SFC ifort)
diff --git a/machines/cmake_macros/intel_gust.cmake b/machines/cmake_macros/intel_gust.cmake
index f3943efe..1edce09d 100644
--- a/machines/cmake_macros/intel_gust.cmake
+++ b/machines/cmake_macros/intel_gust.cmake
@@ -4,3 +4,8 @@ string(APPEND FFLAGS " -march=core-avx2")
if (COMP_NAME STREQUAL gptl)
string(APPEND CPPDEFS " -DHAVE_SLASHPROC")
endif()
+if (COMP_NAME STREQUAL mpi-serial)
+ string(APPEND CFLAGS " -std=c89 ")
+endif()
+set(SCC icx)
+set(SFC ifort)
\ No newline at end of file
diff --git a/machines/cmake_macros/intel_stampede2-knl.cmake b/machines/cmake_macros/intel_stampede2-knl.cmake
index 6cdd9fe9..7836ea9d 100644
--- a/machines/cmake_macros/intel_stampede2-knl.cmake
+++ b/machines/cmake_macros/intel_stampede2-knl.cmake
@@ -1,5 +1,5 @@
-string(APPEND CFLAGS " -xCOMMON-AVX512 -no-fma")
-string(APPEND FFLAGS " -xCOMMON-AVX512 -no-fma")
+string(APPEND CFLAGS " -xCOMMON-AVX512")
+string(APPEND FFLAGS " -xCOMMON-AVX512")
if (MPILIB STREQUAL mpi-serial)
string(APPEND FFLAGS " -mcmodel medium")
endif()
diff --git a/machines/cmake_macros/intel_stampede2-skx.cmake b/machines/cmake_macros/intel_stampede2-skx.cmake
index 6cdd9fe9..7836ea9d 100644
--- a/machines/cmake_macros/intel_stampede2-skx.cmake
+++ b/machines/cmake_macros/intel_stampede2-skx.cmake
@@ -1,5 +1,5 @@
-string(APPEND CFLAGS " -xCOMMON-AVX512 -no-fma")
-string(APPEND FFLAGS " -xCOMMON-AVX512 -no-fma")
+string(APPEND CFLAGS " -xCOMMON-AVX512")
+string(APPEND FFLAGS " -xCOMMON-AVX512")
if (MPILIB STREQUAL mpi-serial)
string(APPEND FFLAGS " -mcmodel medium")
endif()
diff --git a/machines/cmake_macros/intel_zeus.cmake b/machines/cmake_macros/intel_zeus.cmake
index 281ae04b..be4fc013 100644
--- a/machines/cmake_macros/intel_zeus.cmake
+++ b/machines/cmake_macros/intel_zeus.cmake
@@ -2,14 +2,14 @@ set(AR "xiar")
set(ARFLAGS "cru")
if (MPILIB STREQUAL mpi-serial)
string(APPEND FFLAGS " -mcmodel medium")
- string(APPEND FFLAGS " -mkl -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma")
- string(APPEND CFLAGS " -mkl -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma")
+ string(APPEND FFLAGS " -mkl -xSKYLAKE-AVX512 -qopt-zmm-usage=high")
+ string(APPEND CFLAGS " -mkl -xSKYLAKE-AVX512 -qopt-zmm-usage=high")
string(APPEND LDFLAGS " -mkl -lstdc++")
string(APPEND SLIBS " -lstdc++")
endif()
if (MPILIB STREQUAL impi)
- string(APPEND FFLAGS " -mkl=cluster -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma")
- string(APPEND CFLAGS " -mkl=cluster -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma")
+ string(APPEND FFLAGS " -mkl=cluster -xSKYLAKE-AVX512 -qopt-zmm-usage=high")
+ string(APPEND CFLAGS " -mkl=cluster -xSKYLAKE-AVX512 -qopt-zmm-usage=high")
string(APPEND LDFLAGS " -mkl=cluster")
string(APPEND SLIBS " -lstdc++")
endif()
diff --git a/machines/cmake_macros/nvhpc-gpu.cmake b/machines/cmake_macros/nvhpc-gpu.cmake
deleted file mode 100644
index 679e81b2..00000000
--- a/machines/cmake_macros/nvhpc-gpu.cmake
+++ /dev/null
@@ -1,46 +0,0 @@
-string(APPEND CFLAGS " -gopt -time")
-if (compile_threaded)
- string(APPEND CFLAGS " -mp")
-endif()
-string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI")
-set(CXX_LINKER "CXX")
-set(FC_AUTO_R8 "-r8")
-string(APPEND FFLAGS " -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee")
-if (compile_threaded)
- string(APPEND FFLAGS " -mp")
-endif()
-if (DEBUG)
- string(APPEND FFLAGS " -O0 -g -Ktrap=fp -Mbounds -Kieee")
-endif()
-if (COMP_NAME STREQUAL datm)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL dlnd)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL drof)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL dwav)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL dice)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL docn)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-set(FFLAGS_NOOPT "-O0")
-set(FIXEDFLAGS "-Mfixed")
-set(FREEFLAGS "-Mfree")
-set(HAS_F2008_CONTIGUOUS "FALSE")
-set(LDFLAGS "-time -Wl,--allow-multiple-definition")
-if (compile_threaded)
- string(APPEND LDFLAGS " -mp")
-endif()
-set(MPICC "mpicc")
-set(MPICXX "mpicxx")
-set(MPIFC "mpif90")
-set(SCC "nvc")
-set(SCXX "nvc++")
-set(SFC "nvfortran")
diff --git a/machines/cmake_macros/nvhpc-gpu_casper.cmake b/machines/cmake_macros/nvhpc-gpu_casper.cmake
deleted file mode 100644
index ed2adbc6..00000000
--- a/machines/cmake_macros/nvhpc-gpu_casper.cmake
+++ /dev/null
@@ -1,15 +0,0 @@
-if (NOT DEBUG)
- string(APPEND CFLAGS " -O -tp=skylake -Mnofma")
-endif()
-if (NOT DEBUG)
- string(APPEND FFLAGS " -O -tp=skylake -Mnofma")
-endif()
-string(APPEND FFLAGS " -I$(EXEROOT)/ocn/obj/FMS")
-if (NOT DEBUG)
- string(APPEND LDFLAGS " -O -tp=skylake -Mnofma -acc -gpu=cc70,lineinfo,nofma -Minfo=accel")
-endif()
-string(APPEND SLIBS " -llapack -lblas")
-if (MPILIB STREQUAL mpi-serial)
- string(APPEND SLIBS " -ldl")
-endif()
-string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff")
diff --git a/machines/cmake_macros/nvhpc.cmake b/machines/cmake_macros/nvhpc.cmake
index 3e1b4516..d541457e 100644
--- a/machines/cmake_macros/nvhpc.cmake
+++ b/machines/cmake_macros/nvhpc.cmake
@@ -3,9 +3,11 @@ if (compile_threaded)
string(APPEND CFLAGS " -mp")
endif()
if (NOT DEBUG)
- string(APPEND CFLAGS " -O ")
- string(APPEND FFLAGS " -O -Mnofma")
+ string(APPEND CFLAGS " -O")
+ string(APPEND FFLAGS " -O")
endif()
+string(APPEND CFLAGS " -Mnofma")
+string(APPEND FFLAGS " -Mnofma")
string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI")
set(CXX_LINKER "CXX")
@@ -49,3 +51,21 @@ set(MPIFC "mpif90")
set(SCC "nvc")
set(SCXX "nvc++")
set(SFC "nvfortran")
+if (GPU_TYPE STREQUAL v100 AND GPU_OFFLOAD STREQUAL openacc)
+ string(APPEND GPUFLAGS " -acc -gpu=cc70,lineinfo,nofma -Minfo=accel ")
+endif()
+if (GPU_TYPE STREQUAL v100 AND GPU_OFFLOAD STREQUAL openmp)
+ string(APPEND GPUFLAGS " -mp=gpu -gpu=cc70,lineinfo,nofma -Minfo=accel ")
+endif()
+if (GPU_TYPE STREQUAL v100 AND GPU_OFFLOAD STREQUAL combined)
+ string(APPEND GPUFLAGS " -acc -gpu=cc70,lineinfo,nofma -mp=gpu -Minfo=accel ")
+endif()
+if (GPU_TYPE STREQUAL a100 AND GPU_OFFLOAD STREQUAL openacc)
+ string(APPEND GPUFLAGS " -acc -gpu=cc80,lineinfo,nofma -Minfo=accel ")
+endif()
+if (GPU_TYPE STREQUAL a100 AND GPU_OFFLOAD STREQUAL openmp)
+ string(APPEND GPUFLAGS " -mp=gpu -gpu=cc80,lineinfo,nofma -Minfo=accel ")
+endif()
+if (GPU_TYPE STREQUAL a100 AND GPU_OFFLOAD STREQUAL combined)
+ string(APPEND GPUFLAGS " -acc -gpu=cc80,lineinfo,nofma -mp=gpu -Minfo=accel")
+endif()
diff --git a/machines/cmake_macros/nvhpc_casper.cmake b/machines/cmake_macros/nvhpc_casper.cmake
index f3eb207d..c789907f 100644
--- a/machines/cmake_macros/nvhpc_casper.cmake
+++ b/machines/cmake_macros/nvhpc_casper.cmake
@@ -1,8 +1,8 @@
if (NOT DEBUG)
- string(APPEND CFLAGS " -O -tp=skylake -Mnofma")
+ string(APPEND CFLAGS " -O -tp=skylake")
endif()
if (NOT DEBUG)
- string(APPEND FFLAGS " -O -tp=skylake -Mnofma")
+ string(APPEND FFLAGS " -O -tp=skylake")
endif()
string(APPEND FFLAGS " -I$(EXEROOT)/ocn/obj/FMS")
if (NOT DEBUG)
@@ -13,3 +13,4 @@ if (MPILIB STREQUAL mpi-serial)
string(APPEND SLIBS " -ldl")
endif()
string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff")
+message("GPU_TYPE is ${GPU_TYPE} GPU_OFFLOAD is ${GPU_OFFLOAD}")
diff --git a/machines/cmake_macros/nvhpc_derecho.cmake b/machines/cmake_macros/nvhpc_derecho.cmake
new file mode 100644
index 00000000..f0b3cd87
--- /dev/null
+++ b/machines/cmake_macros/nvhpc_derecho.cmake
@@ -0,0 +1,7 @@
+if (COMP_NAME STREQUAL mpi-serial)
+ string(APPEND CFLAGS " -std=c89 ")
+endif()
+if (NOT DEBUG)
+ string(APPEND FFLAGS " -tp=zen3")
+endif()
+message("GPU_TYPE is ${GPU_TYPE} GPU_OFFLOAD is ${GPU_OFFLOAD}")
diff --git a/machines/cmake_macros/nvhpc_gust.cmake b/machines/cmake_macros/nvhpc_gust.cmake
index 46113731..b7fdbd69 100644
--- a/machines/cmake_macros/nvhpc_gust.cmake
+++ b/machines/cmake_macros/nvhpc_gust.cmake
@@ -1,3 +1,6 @@
+if (COMP_NAME STREQUAL mpi-serial)
+ string(APPEND CFLAGS " -std=c89 ")
+endif()
if (NOT DEBUG)
string(APPEND FFLAGS " -target=zen3")
endif()
diff --git a/machines/cmake_macros/pgi-gpu.cmake b/machines/cmake_macros/pgi-gpu.cmake
deleted file mode 100644
index 85657479..00000000
--- a/machines/cmake_macros/pgi-gpu.cmake
+++ /dev/null
@@ -1,46 +0,0 @@
-string(APPEND CFLAGS " -gopt -time")
-if (compile_threaded)
- string(APPEND CFLAGS " -mp")
-endif()
-string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI")
-set(CXX_LINKER "CXX")
-set(FC_AUTO_R8 "-r8")
-string(APPEND FFLAGS " -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee")
-if (compile_threaded)
- string(APPEND FFLAGS " -mp")
-endif()
-if (DEBUG)
- string(APPEND FFLAGS " -O0 -g -Ktrap=fp -Mbounds -Kieee")
-endif()
-if (COMP_NAME STREQUAL datm)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL dlnd)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL drof)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL dwav)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL dice)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-if (COMP_NAME STREQUAL docn)
- string(APPEND FFLAGS " -Mnovect")
-endif()
-set(FFLAGS_NOOPT "-O0")
-set(FIXEDFLAGS "-Mfixed")
-set(FREEFLAGS "-Mfree")
-set(HAS_F2008_CONTIGUOUS "FALSE")
-set(LDFLAGS "-time -Wl,--allow-multiple-definition")
-if (compile_threaded)
- string(APPEND LDFLAGS " -mp")
-endif()
-set(MPICC "mpicc")
-set(MPICXX "mpicxx")
-set(MPIFC "mpif90")
-set(SCC "pgcc")
-set(SCXX "pgc++")
-set(SFC "pgf95")
diff --git a/machines/cmake_macros/pgi-gpu_casper.cmake b/machines/cmake_macros/pgi-gpu_casper.cmake
deleted file mode 100644
index 600521a1..00000000
--- a/machines/cmake_macros/pgi-gpu_casper.cmake
+++ /dev/null
@@ -1,15 +0,0 @@
-if (NOT DEBUG)
- string(APPEND CFLAGS " -O -tp=skylake -Mnofma")
-endif()
-if (NOT DEBUG)
- string(APPEND FFLAGS " -O -tp=skylake -Mnofma")
-endif()
-string(APPEND FFLAGS " -I$(EXEROOT)/ocn/obj/FMS")
-if (NOT DEBUG)
- string(APPEND LDFLAGS " -O -tp=skylake -Mnofma -acc -ta=tesla:cc70,lineinfo,nofma -Minfo=accel")
-endif()
-string(APPEND SLIBS " -llapack -lblas")
-if (MPILIB STREQUAL mpi-serial)
- string(APPEND SLIBS " -ldl")
-endif()
-string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff")
diff --git a/machines/coeus/config_machines.xml b/machines/coeus/config_machines.xml
new file mode 100644
index 00000000..f66ff9da
--- /dev/null
+++ b/machines/coeus/config_machines.xml
@@ -0,0 +1,56 @@
+
+
+ Portland State University Coeus Cluster Dec 2019 CentOS 7
+
+ LINUX
+ gnu
+ mvapich2
+ none
+ $ENV{CESMDATAROOT}/$USER
+ $ENV{CESMDATAROOT}/inputdata
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{CESMDATAROOT}/cesm_baselines
+ /vol/apps/hpc/src/cesm-2.1.0/cime/tools/cprnc/cprnc
+ make
+ 8
+ slurm
+ oit-rc-groups@pdx.edu
+ 40
+ 20
+ FALSE
+
+ srun
+
+ --ntasks={{ total_tasks }}
+ --cpu_bind=sockets --cpu_bind=verbose
+ --kill-on-bad-exit
+
+
+
+
+
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/sh
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+
+
+ gcc-6.3.0
+ mvapich2-2.2-psm/gcc-6.3.0
+ General/netcdf/4.4.1.1/gcc-6.3.0
+ Python/2.7.13/gcc-6.3.0
+
+
+
+ 256M
+ /vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0/
+
+
+ -1
+
+
+
diff --git a/machines/config_batch.xml b/machines/config_batch.xml
index 38453f5f..414e196e 100644
--- a/machines/config_batch.xml
+++ b/machines/config_batch.xml
@@ -156,7 +156,6 @@
-r {{ rerunnable }}
-j oe
- -V
@@ -184,6 +183,8 @@
-l nodes={{ num_nodes }}
-q iccp
+ -V
+
iccp
@@ -209,94 +210,27 @@
-
-
- (\d+.bw)$
-
- -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}:xe
- -S {{ shell }}
-
-
- normal
- debug
-
-
-
qsub
-
-
-
-
- -S /glade/u/apps/dav/opt/nvidia-mps/mps_bash
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }}
- -l gpu_type=v100
-
-
-
-
-
- -S /glade/u/apps/dav/opt/nvidia-mps/mps_bash
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }}
- -l gpu_type=v100
-
-
-
- -S {{ shell }}
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }}
-
-
-
+
+ -l gpu_type=$GPU_TYPE
+
+
-S {{ shell }}
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }}
+ -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }}:mps=1
-
-
+
-S {{ shell }}
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }}
+ -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=300GB:ngpus={{ ngpus_per_node }}
-
casper
-
casper
-
-
- -S {{ shell }}
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}
-
-
-
- -S {{ shell }}
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}
-
-
-
- -S {{ shell }}
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}
-
-
-
- -S {{ shell }}
- -l select=1:mpiprocs={{ total_tasks }}:ompthreads={{ thread_count }}
-
-
-
- regular
-
-
- regular
- premium
- share
- economy
-
-
-
squeue
@@ -330,48 +264,34 @@
-
+
sbatch
--time $JOB_WALLCLOCK_TIME
- -q $JOB_QUEUE
+ -p $JOB_QUEUE
--account $PROJECT
-
- -C haswell
-
- regular
-
+ default
-
- sbatch
+
+ qsub
- --time $JOB_WALLCLOCK_TIME
- -q $JOB_QUEUE
- --account $PROJECT
+ -l job_priority=$JOB_PRIORITY
-
- -C knl,quad,cache
- -S 2
+
+ -S {{ shell }}
+ -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=230GB
+
+
+ -S {{ shell }}
+ -l select={{ num_nodes }}:ncpus={{ max_cputasks_per_gpu_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=480GB:ngpus={{ ngpus_per_node }}:mps=1
- regular
-
-
-
-
-
- sbatch
-
- --time $JOB_WALLCLOCK_TIME
- -p $JOB_QUEUE
- --account $PROJECT
-
-
- default
+ develop
+ main
@@ -435,7 +355,7 @@
development
- normal
+ normal
large
@@ -450,6 +370,7 @@
-d $RUNDIR
-o $RUNDIR/$CASE.out
-S /bin/bash
+ -V
debug
@@ -489,38 +410,32 @@
qsub
-
+
+ -l gpu_type=$GPU_TYPE
+
+
-S {{ shell }}
-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}
+
+ -S {{ shell }}
+ -l select={{ num_nodes }}:ncpus={{ max_cputasks_per_gpu_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=430GB:ngpus={{ ngpus_per_node }}
+
+
+
main
bigcpu
-
-
-
- -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}
- -S {{ shell }}
-
-
- short
- medium
- long
- verylong
- overnight
- monster
-
-
-
-
+
qsub
(\d+.izumi.cgd.ucar.edu)$
-l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}
-S {{ shell }}
+ -V
short
@@ -532,16 +447,6 @@
-
-
- -S {{ shell }}
- -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}
-
-
- regular
-
-
-
sbatch
@@ -569,17 +474,12 @@
-
-
- default
-
-
-
-l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}
-S {{ shell }}
+ -V
batch
@@ -623,6 +523,7 @@
-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=bro
-l place=scatter:excl
-S {{ shell }}
+ -V
normal
@@ -638,6 +539,7 @@
-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=has
-l place=scatter:excl
-S {{ shell }}
+ -V
normal
@@ -653,6 +555,7 @@
-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=ivy
-l place=scatter:excl
-S {{ shell }}
+ -V
normal
@@ -668,6 +571,7 @@
-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=san
-l place=scatter:excl
-S {{ shell }}
+ -V
normal
@@ -704,6 +608,7 @@
-l nodes={{ num_nodes }}
+ -V
default
diff --git a/machines/config_machines.xml b/machines/config_machines.xml
index 44a8d1a0..776786ee 100644
--- a/machines/config_machines.xml
+++ b/machines/config_machines.xml
@@ -48,3785 +48,38 @@ This allows using a different mpirun command to launch unit tests
-->
-
-
- XC50 SkyLake, os is CNL, 40 pes/node, batch system is PBSPro
- .*eth\d
- CNL
- intel,gnu,cray
- mpt,mpi-serial
- /proj/$ENV{USER}
- $ENV{DIN_LOC_ROOT}
- $DIN_LOC_ROOT
- ${CIME_OUTPUT_ROOT}/archive/$CASE
- ${CIME_OUTPUT_ROOT}/cesm_baselines
- 8
- pbs
- @ pusan.ac.kr
- 40
- 40
-
- aprun
-
- -j {{ hyperthreading }}
- -n {{ total_tasks }}
- -N $MAX_MPITASKS_PER_NODE
- -S {{ tasks_per_numa }}
- -d $ENV{OMP_NUM_THREADS}
- --mpmd-env OMP_NUM_THREADS=$OMP_NUM_THREADS
-
-
-
- /opt/modules/default/init/perl.pm
- /opt/modules/default/init/python.py
- /opt/modules/default/init/sh
- /opt/modules/default/init/csh
- /opt/modules/default/bin/modulecmd perl
- /opt/modules/default/bin/modulecmd python
- module
- module
-
- craype-x86-skylake
- PrgEnv-pgi
- PrgEnv-intel
- PrgEnv-cray
- PrgEnv-gnu
- cray-netcdf
- cray-hdf5
- cray-parallel-netcdf
- papi
-
-
- PrgEnv-intel
- craype-x86-skylake
- craype-hugepages2M
- perftools-base/7.0.4
- cray-netcdf/4.6.1.3
- cray-hdf5/1.10.2.0
- cray-parallel-netcdf/1.11.1.1
- papi/5.6.0.4
- gridftp/6.0
- cray-python/3.6.5.1
-
-
-
- 256M
- /home/jedwards/workflow/CESM_postprocessing
-
-
+
+
+ .*eth\d
+ .*.cluster.net
+ regular-dy-computehpc6a-.*
+ $ENV{NCAR_HOST}:casper
+ .*.?betzy\d?.sigma2.no
+ .*.?cheyenne\d?.ucar.edu
+ (login[1,2].cluster|compute[0-9]*.cluster)
+ $ENV{NCAR_HOST}:derecho
+ $HOSTNAME
+ .*frontera
+ gplogin\d.gp.local
+ gu.*.hpc.ucar.edu
+ ^i.*\.ucar\.edu
+ lobata
+ .*ls5\.tacc\.utexas\.edu
+ (melvin|watson)
+ $ENV{NERSC_HOST}:perlmutter
+ $HOSTNAME
+ (s999964|climate|penn)
+ (skybridge|chama)-login
+ .*stampede2
+ swan.*
+ tfe
+ theta.*
+ .*.thunder.ucar.edu
+ $ENV{CIME_TEST_PLATFORM}:ubuntu-latest
+ (login[1,2]-ib|n[0-9][0-9][0-9]-ib)
+
-
- CMCC IBM iDataPlex, os is Linux, 16 pes/node, batch system is LSFd mpich
- .*.cluster.net
- LINUX
- intel,intel15
- mpich2
- /work/$USER/CESM2
- /users/home/dp16116/CESM2/inputdata
- $DIN_LOC_ROOT/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/ccsm_baselines
- /users/home/dp16116/CESM2/cesm2.0.1/cime/tools/cprnc/cprnc
- /usr/lib64/perl5:/usr/share/perl5
- 8
- lsf
-
- 30
- 15
- FALSE
-
- mpirun_Impi5
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/python.py
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/sh
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
-
-
- ANACONDA2/python2.7
- INTEL/intel_xe_2015.3.187
- SZIP/szip-2.1_int15
-
-
- ESMF/esmf-6.3.0rp1-intelmpi-64-g_int15
-
-
- ESMF/esmf-6.3.0rp1-intelmpi-64-O_int15
-
-
- ESMF/esmf-6.3.0rp1-mpiuni-64-g_int15
-
-
- ESMF/esmf-6.3.0rp1-mpiuni-64-O_int15
-
-
- HDF5/hdf5-1.8.15-patch1
- NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1
-
-
- HDF5/hdf5-1.8.15-patch1_parallel
- NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1_parallel
- PARALLEL_NETCDF/parallel-netcdf-1.6.1
-
-
- CMAKE/cmake-3.3.0-rc1
-
-
- INTEL/intel_xe_2013.5.192
- INTEL/intel_xe_2013
- HDF5/hdf5-1.8.10-patch1
- INTEL/intel_xe_2015.3.187
-
-
-
- 256M
-
-
- gpfs
- on
- snb
- lsf
- 1
- on
- on
- /users/home/models/nemo/xios-cmip6/intel_xe_2013
-
-
-
- AWS HPC6a (96-core AMD) Nodes
- regular-dy-computehpc6a-.*
- LINUX
- intel
- impi
- /scratch/$USER
- /scratch/inputdata
- $DIN_LOC_ROOT/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/cesm_baselines
- $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc
- 8
- slurm
- cseg
-
- 96
- 96
- FALSE
-
- scontrol show hostnames $SLURM_JOB_NODELIST > hostfile ; mpirun -f hostfile
-
- -n {{ total_tasks }}
-
-
-
- env -u I_MPI_OFI_PROVIDER mpirun
-
- -n {{ total_tasks }}
-
-
-
-
- 256M
- /scratch/$USER
- icc
- icpc
- ifort
- ifort
- /opt/ncar/software
- /opt/ncar/esmf/lib/esmf.mk
-
-
- ON
- SUMMARY
-
-
- -1
-
-
-
-
- ORNL XE6, os is CNL, 32 pes/node, batch system is PBS
- h2o
- CNL
- intel,pgi,cray,gnu
- mpich
- banu
- /scratch/sciteam/$USER
- $ENV{CESMDATAROOT}/inputdata
- $ENV{CESMDATAROOT}/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/ccsm_baselines
- $ENV{CESMDATAROOT}/tools/cprnc
- 8
- pbs
- cseg
- 32
- 16
- TRUE
-
- aprun
-
- -n {{ total_tasks }}
-
- -N $MAX_MPITASKS_PER_NODE
- -d $ENV{OMP_NUM_THREADS}
-
-
-
- /opt/modules/default/init/perl.pm
- /opt/modules/default/init/python.py
- /opt/modules/default/init/sh
- /opt/modules/default/init/csh
- /opt/modules/3.2.10.3/bin/modulecmd perl
- /opt/modules/3.2.10.3/bin/modulecmd python
- module
- module
-
- PrgEnv-pgi
- PrgEnv-intel
- PrgEnv-cray
- PrgEnv-gnu
- pgi
- cray
- intel
- cray-netcdf
- gcc
-
-
- PrgEnv-intel
- intel
- intel/18.0.3.222
-
- gcc
-
-
- PrgEnv-pgi
- pgi pgi/18.7.0
-
-
- PrgEnv-gnu
- gcc gcc/6.3.0
-
-
- PrgEnv-cray
- cce cce/8.5.8
-
-
- papi/5.5.1.1
- cray-mpich cray-mpich/7.7.1
- cray-libsci cray-libsci/18.04.1
- torque/6.0.4
-
-
- cray-hdf5-parallel/1.10.2.0
- cray-netcdf-hdf5parallel/4.6.1.0
- cray-parallel-netcdf/1.8.1.3
-
-
- cray-netcdf/4.6.1.0
-
-
- cmake/3.1.3
- darshan
- /sw/modulefiles/CESM
- CESM-ENV
-
-
-
- 64M
- $ENV{HOME}/bin:$ENV{PATH}
-
-
-
-
-
- Example port to centos7 linux system with gcc, netcdf, pnetcdf and mpich
- using modules from http://www.admin-magazine.com/HPC/Articles/Environment-Modules
-
- regex.expression.matching.your.machine
- LINUX
- https://howto.get.out
- gnu
- mpich
- none
-
- $ENV{HOME}/cesm/scratch
- $ENV{HOME}/cesm/inputdata
- $ENV{HOME}/cesm/inputdata/lmwg
- $ENV{HOME}/cesm/archive/$CASE
- $ENV{HOME}/cesm/cesm_baselines
- $ENV{HOME}/cesm/tools/cime/tools/cprnc/cprnc
- make
- 8
- none
- me@my.address
- 8
- 8
- FALSE
-
- mpiexec
-
- -np {{ total_tasks }}
-
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/python.py
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/sh
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
- lang/python/3.7.0
-
-
- compiler/gnu/8.2.0
- mpi/3.3/gnu-8.2.0
- tool/netcdf/4.7.4/gcc-8.2.0
- tool/parallel-netcdf/1.12.1/mpich
-
-
-
- 256M
- $ENV{HOME}/ESMF_8_2_0b22/lib/libg/Linux.gfortran.64.mvapich2.default/esmf.mk
-
-
- -1
-
-
-
-
- NCAR GPU platform, os is Linux, 36 pes/node, batch system is pbs
- casper*
- LINUX
- pgi,intel,nvhpc,pgi-gpu,nvhpc-gpu
- openmpi
- /glade/scratch/$USER
- $ENV{CESMDATAROOT}/inputdata
- /glade/p/cgd/tss/CTSM_datm_forcing_data
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/cesm_baselines
- 8
- pbs
- ASAP/CISL
- 36
- 8
- 36
- TRUE
-
- mpirun
-
- -np {{ total_tasks }}
-
-
-
- mpirun
-
- -np {{ total_tasks }}
-
-
-
- /glade/u/apps/dav/opt/lmod/7.7.29/init/perl
- /glade/u/apps/dav/opt/lmod/7.7.29/init/env_modules_python.py
- /glade/u/apps/dav/opt/lmod/7.7.29/init/sh
- /glade/u/apps/dav/opt/lmod/7.7.29/init/csh
- /glade/u/apps/dav/opt/lmod/7.7.29/libexec/lmod perl
- /glade/u/apps/dav/opt/lmod/7.7.29/libexec/lmod python
- module
- module
-
-
- ncarenv/1.3
- cmake/3.18.2
-
-
- pgi/20.4
-
-
- pgi/20.4
-
-
- nvhpc/22.2
-
-
- nvhpc/22.2
-
-
- intel/19.1.1
- mkl/2020.0.1
-
-
- openmpi/4.1.0
- netcdf-mpi/4.8.0
- pnetcdf/1.12.2
-
-
- netcdf/4.8.0
-
-
- openmpi/4.1.0
- netcdf-mpi/4.7.4
- pnetcdf/1.12.2
- cuda/11.0.3
-
-
- netcdf/4.7.4
-
-
- openmpi/4.1.1
- netcdf-mpi/4.8.1
- pnetcdf/1.12.2
-
-
- netcdf/4.8.1
-
-
- openmpi/4.1.1
- netcdf-mpi/4.8.1
- pnetcdf/1.12.2
- cuda/11.4.0
-
-
- netcdf/4.8.1
-
-
- openmpi/4.1.1
- netcdf-mpi/4.8.1
- pnetcdf/1.12.2
-
-
- netcdf/4.7.4
-
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
- esmf-8.4.0b08_casper-ncdfio-openmpi-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
- esmf-8.4.0b08_casper-ncdfio-openmpi-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2/
- esmf-8.4.0b08_casper-ncdfio-openmpi-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2/
- esmf-8.4.0b08_casper-ncdfio-openmpi-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
- esmf-8.4.0b08_casper-ncdfio-openmpi-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
- esmf-8.2.0b11_casper-ncdfio-openmpi-O
-
-
- ncarcompilers/0.5.0
-
-
- pio/2.5.7
-
-
- pio/2.5.7d
-
-
-
- /glade/u/apps/dav/modulefiles/default/compilers:/glade/u/apps/dav/modulefiles/default/idep
- 256M
- /glade/scratch/$USER
- /glade/p/cesmdata/cseg
- $ENV{NETCDF}
-
-
- ON
- SUMMARY
-
-
- -1
-
-
-
-
- NCAR SGI platform, os is Linux, 36 pes/node, batch system is PBS
- .*.?cheyenne\d?.ucar.edu
-
- MPT: Launcher network accept (MPI_LAUNCH_TIMEOUT) timed out
- 10
- LINUX
- intel,gnu,nvhpc,pgi
- mpt,openmpi
- openmpi,mpt
- openmpi,mpt
- mpt,openmpi
- /glade/scratch/$USER
- $ENV{CESMDATAROOT}/inputdata
- /glade/p/cgd/tss/CTSM_datm_forcing_data
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/cesm_baselines
- $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc.cheyenne
- 8
- pbs
- cseg
-
- 36
- 36
- TRUE
-
-
- mpiexec_mpt
-
- -p "%g:"
- -np {{ total_tasks }}
-
- omplace -tm open64
-
-
-
- mpirun `hostname`
-
- -np {{ total_tasks }}
-
- omplace -tm open64
-
-
-
- mpiexec_mpt
-
- -p "%g:"
- -np {{ total_tasks }}
-
- omplace -tm open64 -vv
-
-
-
- mpirun `hostname`
-
- -np {{ total_tasks }}
-
-
-
- mpirun
-
- -np {{ total_tasks }}
- --tag-output
-
-
-
-
- /opt/sgi/mpt/mpt-2.15/bin/mpirun $ENV{UNIT_TEST_HOST} -np 1
-
-
- /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/perl
- /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/env_modules_python.py
- /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/csh
- /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/sh
- /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod perl
- /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod python
- module
- module
-
-
- ncarenv/1.3
- python/3.7.9
- cmake/3.22.0
-
-
- intel/19.1.1
- esmf_libs
- mkl
-
-
- gnu/10.1.0
- openblas/0.3.9
-
-
- pgi/20.4
-
-
- nvhpc/22.2
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
- esmf-8.5.0b19-ncdfio-mpt-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
- esmf-8.5.0b19-ncdfio-mpt-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
- esmf-8.5.0b19-ncdfio-openmpi-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
- esmf-8.5.0b19-ncdfio-openmpi-O
-
-
- mpi-serial/2.3.0
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
- esmf-8.5.0b19-ncdfio-mpiuni-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/
- esmf-8.5.0b19-ncdfio-mpiuni-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
- esmf-8.5.0b19-ncdfio-mpt-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
- esmf-8.5.0b19-ncdfio-mpt-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
- esmf-8.5.0b19-ncdfio-openmpi-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
- esmf-8.5.0b19-ncdfio-openmpi-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
- esmf-8.5.0b19-ncdfio-mpiuni-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/
- esmf-8.5.0b19-ncdfio-mpiuni-O
-
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
- esmf-8.2.0b23-ncdfio-mpt-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
- esmf-8.2.0b23-ncdfio-mpt-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
- esmf-8.5.0b19-ncdfio-mpt-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
- esmf-8.5.0b19-ncdfio-mpt-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
- esmf-8.5.0b19-ncdfio-openmpi-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
- esmf-8.5.0b19-ncdfio-openmpi-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
- esmf-8.2.0b23-ncdfio-openmpi-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
- esmf-8.2.0b23-ncdfio-openmpi-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
- esmf-8.2.0b23-ncdfio-mpiuni-g
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/
- esmf-8.2.0b23-ncdfio-mpiuni-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
- esmf-8.5.0.b19-ncdfio-mpiuni-O
-
-
- /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/nvhpc/22.2
- esmf-8.5.0b19-ncdfio-mpiuni-O
-
-
- mpt/2.25
- netcdf-mpi/4.9.0
- pnetcdf/1.12.3
-
-
- mpt/2.22
- netcdf-mpi/4.8.1
- pnetcdf/1.12.2
-
-
- mpt/2.22
- netcdf-mpi/4.7.4
- pnetcdf/1.12.1
-
-
- mpt/2.25
- netcdf-mpi/4.9.0
- pnetcdf/1.12.3
-
-
- openmpi/4.1.4
- netcdf-mpi/4.9.0
- pnetcdf/1.12.3
-
-
- openmpi/4.1.4
- netcdf-mpi/4.9.0
- pnetcdf/1.12.3
-
-
- openmpi/4.0.5
- netcdf-mpi/4.7.4
-
-
- openmpi/4.1.4
- netcdf-mpi/4.9.0
- pnetcdf/1.12.3
-
-
- ncarcompilers/0.5.0
-
-
- netcdf/4.9.0
-
-
- netcdf/4.9.0
-
-
- netcdf/4.9.0
-
-
- netcdf/4.9.0
-
-
- pio/2.5.10
-
-
- pio/2.5.10d
-
-
-
- 1024M
- /glade/scratch/$USER
- 16
-
-
-
- ON
- SUMMARY
- /glade/work/turuncu/FV3GFS/benchmark-inputs/2012010100/gfs/fcst
- /glade/work/turuncu/FV3GFS/fix_am
- /glade/work/turuncu/FV3GFS/addon
- PASSIVE
- true
-
-
- false
-
-
- /glade/scratch/$USER
-
-
- -1
-
-
-
-
-
- Portland State University Coeus Cluster Dec 2019 CentOS 7
-
- (login[1,2].cluster|compute[0-9]*.cluster)
- LINUX
- gnu
- mvapich2
- none
- $ENV{CESMDATAROOT}/$USER
- $ENV{CESMDATAROOT}/inputdata
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/cesm_baselines
- /vol/apps/hpc/src/cesm-2.1.0/cime/tools/cprnc/cprnc
- make
- 8
- slurm
- oit-rc-groups@pdx.edu
- 40
- 20
- FALSE
-
- srun
-
- --ntasks={{ total_tasks }}
- --cpu_bind=sockets --cpu_bind=verbose
- --kill-on-bad-exit
-
-
-
-
-
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/sh
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
-
-
- gcc-6.3.0
- mvapich2-2.2-psm/gcc-6.3.0
- General/netcdf/4.4.1.1/gcc-6.3.0
- Python/2.7.13/gcc-6.3.0
-
-
-
- 256M
- /vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0/
-
-
- -1
-
-
-
-
- PNL Haswell cluster, OS is Linux, batch system is SLURM
- LINUX
- intel,pgi
- mvapich2,openmpi,intelmpi,mvapich
- /pic/scratch/$USER
- /pic/scratch/tcraig/IRESM/inputdata
- /pic/scratch/tcraig/IRESM/inputdata/atm/datm7
- /pic/scratch/$USER/cases/archive/$CASE
- /pic/scratch/tcraig/IRESM/ccsm_baselines
- /people/tcraig/bin/cprnc
- 8
- slurm
- tcraig -at- ucar.edu
- 24
- 24
- FALSE
-
- srun
-
- --mpi=none
- --ntasks={{ total_tasks }}
- --cpu_bind=sockets --cpu_bind=verbose
- --kill-on-bad-exit
-
-
-
- srun
-
- --ntasks={{ total_tasks }}
- --cpu_bind=sockets --cpu_bind=verbose
- --kill-on-bad-exit
-
-
-
- mpirun
-
- -n {{ total_tasks }}
-
-
-
- mpirun
-
- -n {{ total_tasks }}
-
-
-
- /share/apps/modules/Modules/3.2.10/init/perl.pm
- /etc/profile.d/modules.csh
- /etc/profile.d/modules.sh
- /share/apps/modules/Modules/3.2.10/bin/modulecmd perl
- module
- module
-
-
-
-
- perl/5.20.0
- cmake/2.8.12
-
-
- intel/15.0.1
- netcdf/4.3.2
- mkl/15.0.1
-
-
- pgi/14.10
- netcdf/4.3.2
-
-
- mvapich2/2.1
-
-
- mvapich2/2.1
-
-
- intelmpi/5.0.1.035
-
-
- openmpi/1.8.3
-
-
-
- 64M
-
-
- $MLIB_LIB
- /share/apps/netcdf/4.3.2/intel/15.0.1
-
-
- /share/apps/netcdf/4.3.2/pgi/14.10
-
-
-
-
-
- Containerized development environment (Docker/Singularity) for CESM w/ GNU compilers
-
- LINUX
- gnu
- mpich
- $ENV{HOME}/scratch
- $ENV{CESMDATAROOT}/inputdata
- $DIN_LOC_ROOT/atm/datm7
- $ENV{HOME}/archive/$CASE
- make
- 4
- none
- cgd
- 4
- 4
- FALSE
-
- mpiexec
-
- -n {{ total_tasks }}
-
-
-
-
-
- /usr/local
- /usr/local
- /usr/lib64
- /usr/lib64
-
-
- -1
-
-
-
-
-
-
-
- NERSC XC40 Haswell, os is CNL, 32 pes/node, batch system is Slurm
- cori
- CNL
- intel,gnu,cray
- mpt
- $ENV{SCRATCH}
- /project/projectdirs/ccsm1/inputdata
- /project/projectdirs/ccsm1/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /project/projectdirs/ccsm1/ccsm_baselines
- /project/projectdirs/ccsm1/tools/cprnc.corip1/cprnc
- 8
- slurm
- cseg
- 64
- 32
-
- srun
-
- --label
- -n {{ total_tasks }}
- -c {{ srun_binding }}
-
-
-
- /opt/modules/default/init/perl.pm
- /opt/modules/default/init/python.py
- /opt/modules/default/init/sh
- /opt/modules/default/init/csh
- /opt/modules/default/bin/modulecmd perl
- /opt/modules/default/bin/modulecmd python
- module
- module
-
- PrgEnv-intel
- PrgEnv-cray
- PrgEnv-gnu
- intel
- cce
- cray-parallel-netcdf
- cray-parallel-hdf5
- pmi
- cray-libsci
- cray-mpich2
- cray-mpich
- cray-netcdf
- cray-hdf5
- cray-netcdf-hdf5parallel
- craype-sandybridge
- craype-ivybridge
- craype
-
-
-
- PrgEnv-intel
- intel intel/19.1.3.304
- /global/project/projectdirs/ccsm1/modulefiles/cori
-
-
- esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-haswell
-
-
- esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-haswell
-
-
-
- PrgEnv-cray
- cce cce/10.0.3
-
-
- PrgEnv-gnu
- gcc gcc/10.1.0
-
-
- cray-memkind
- craype craype/2.7.2
-
-
- cray-libsci/20.09.1
-
-
- cray-mpich/7.7.16
-
-
- cray-netcdf-hdf5parallel
- cray-hdf5-parallel
- cray-parallel-netcdf
- cray-hdf5/1.12.0.0
- cray-netcdf/4.7.4.0
-
-
- cray-netcdf-hdf5parallel/4.7.4.0
- cray-hdf5-parallel/1.12.0.0
- cray-parallel-netcdf/1.12.1.0
-
-
- cmake/3.21.3
-
-
-
- 256M
- spread
- threads
-
-
-
-
-
-
- NERSC XC* KNL, os is CNL, 68 pes/node, batch system is Slurm
- CNL
- intel,gnu,cray
- mpt
- $ENV{SCRATCH}
- /project/projectdirs/ccsm1/inputdata
- /project/projectdirs/ccsm1/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /project/projectdirs/ccsm1/ccsm_baselines
- /project/projectdirs/ccsm1/tools/cprnc.corip1/cprnc
- 8
- slurm
- cseg
- 256
- 64
- 68
-
- srun
-
- --label
- -n {{ total_tasks }}
- -c {{ srun_binding }} --cpu_bind=cores
-
-
-
- /opt/modules/default/init/perl.pm
- /opt/modules/default/init/python.py
- /opt/modules/default/init/sh
- /opt/modules/default/init/csh
- /opt/modules/default/bin/modulecmd perl
- /opt/modules/default/bin/modulecmd python
- module
- module
-
- craype-mic-knl
- craype-haswell
- PrgEnv-intel
- PrgEnv-cray
- PrgEnv-gnu
- intel
- cce
- cray-parallel-netcdf
- cray-parallel-hdf5
- pmi
- cray-libsci
- cray-mpich2
- cray-mpich
- cray-netcdf
- cray-hdf5
- cray-netcdf-hdf5parallel
-
-
-
- PrgEnv-intel
- intel intel/19.1.3.304
- /global/project/projectdirs/ccsm1/modulefiles/cori
-
-
- esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-knl
-
-
- esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-knl
-
-
-
- PrgEnv-cray
- cce cce/10.0.3
-
-
- PrgEnv-gnu
- gcc gcc/10.1.0
-
-
- cray-memkind
- craype craype/2.7.2
- craype-mic-knl
-
-
- cray-libsci/20.09.1
-
-
- cray-mpich/7.7.16
-
-
- cray-netcdf-hdf5parallel
- cray-hdf5-parallel
- cray-parallel-netcdf
- cray-hdf5/1.12.0.0
- cray-netcdf/4.7.4.0
-
-
- cray-netcdf-hdf5parallel/4.7.4.0
- cray-hdf5-parallel/1.12.0.0
- cray-parallel-netcdf/1.12.1.0
-
-
-
- 256M
- spread
- threads
-
-
-
-
- CSCS Cray XC50, os is SUSE SLES, 12 pes/node, batch system is SLURM
- CNL
- pgi,cray,gnu
- mpich
- /scratch/snx3000/$USER
- /project/s824/cesm_inputdata
- /project/s824/cesm_inputdata/atm/datm7
- /project/s824/$USER/archive/$CASE
- /project/s824/ccsm_baselines
- /project/s824/cesm_tools/ccsm_cprnc/cprnc
- 12
- slurm
- edouard.davin -at- env.ethz.ch
- 12
- 12
-
- srun
-
- -n {{ total_tasks }}
- -d $ENV{OMP_NUM_THREADS}
-
-
-
-
- 64M
-
-
-
-
- PNL IBM Xeon cluster, os is Linux (pgi), batch system is SLURM
- LINUX
- pgi,intel
- mvapich2,mvapich
- /lustre/$USER
- /lustre/tcraig/IRESM/inputdata
- /lustre/tcraig/IRESM/inputdata/atm/datm7
- /lustre/$USER/archive/$CASE
- /lustre/tcraig/IRESM/ccsm_baselines
- /lustre/tcraig/IRESM/tools/cprnc/cprnc
- 8
- slurm
- tcraig -at- ucar.edu
- 12
- 12
-
- srun
-
- --ntasks={{ total_tasks }}
- --cpu_bind=sockets
- --cpu_bind=verbose
- --kill-on-bad-exit
-
-
-
- srun
-
- --mpi=none
- --ntasks={{ total_tasks }}
- --cpu_bind=sockets
- --cpu_bind=verbose
- --kill-on-bad-exit
-
-
-
- /etc/profile.d/modules.perl
- /etc/profile.d/modules.sh
- /etc/profile.d/modules.csh
- /share/apps/modules/Modules/3.2.7/bin/modulecmd perl
- module
- module
-
-
- perl/5.20.7
- cmake/3.0.0
- pgi/15.5
- mpi/mvapich2/1.5.1p1/pgi11.3
- netcdf/4.1.2/pgi
-
-
-
- 64M
-
-
-
-
- Euler II Linux Cluster ETH, 24 pes/node, InfiniBand, XeonE5_2680v3, batch system LSF
- LINUX
- intel,pgi
- openmpi,mpich
- /cluster/work/climate/$USER
- /cluster/work/climate/cesm/inputdata
- /cluster/work/climate/cesm/inputdata/atm/datm7
- /cluster/work/climate/$USER/archive/$CASE
- /cluster/work/climate/cesm/ccsm_baselines
- /cluster/work/climate/cesm/tools/cprnc/cprnc
- 1
- lsf
- urs.beyerle -at- env.ethz.ch
- 24
- 24
-
- mpirun
-
- -hostfile $ENV{PBS_JOBID}
- -ppn $MAX_MPITASKS_PER_NODE
- -n {{ total_tasks }}
-
-
-
- mpirun
-
-
-
-
- /cluster/apps/modules/init/python.py
- /etc/profile.d/modules.sh
- /etc/profile.d/modules.csh
- /cluster/apps/modules/bin/modulecmd python
- module
- module
-
-
-
-
- new
-
-
- intel/2018.1
-
-
- netcdf/4.3.1
-
-
- pgi/14.1
-
-
- mvapich2/1.8.1
-
-
- open_mpi/1.6.5
-
-
-
- 64M
-
-
-
-
- Euler III Linux Cluster ETH, 4 pes/node, Ethernet, XeonE3_1585Lv5, batch system LSF
- LINUX
- intel,pgi
- openmpi,mpich
- /cluster/work/climate/$USER
- /cluster/work/climate/cesm/inputdata
- /cluster/work/climate/cesm/inputdata/atm/datm7
- /cluster/work/climate/$USER/archive/$CASE
- /cluster/work/climate/cesm/ccsm_baselines
- /cluster/work/climate/cesm/tools/cprnc/cprnc
- 1
- lsf
- urs.beyerle -at- env.ethz.ch
- 4
- 4
-
- mpirun
-
- -hostfile $ENV{PBS_JOBID}
- -ppn $MAX_MPITASKS_PER_NODE
- -n {{ total_tasks }}
-
-
-
- mpirun
-
-
-
-
- /cluster/apps/modules/init/python.py
- /etc/profile.d/modules.sh
- /etc/profile.d/modules.csh
- /cluster/apps/modules/bin/modulecmd python
- module
- module
-
-
-
-
- new
-
-
- interconnect/ethernet
-
-
- intel/2018.1
-
-
- netcdf/4.3.1
-
-
- pgi/14.1
-
-
- mvapich2/1.8.1
-
-
- open_mpi/1.6.5
-
-
-
- 64M
-
-
-
-
- Euler IV Linux Cluster ETH, 36 pes/node, InfiniBand, XeonGold_6150, batch system LSF
- LINUX
- intel,pgi
- openmpi,mpich
- /cluster/work/climate/$USER
- /cluster/work/climate/cesm/inputdata
- /cluster/work/climate/cesm/inputdata/atm/datm7
- /cluster/work/climate/$USER/archive/$CASE
- /cluster/work/climate/cesm/ccsm_baselines
- /cluster/work/climate/cesm/tools/cprnc/cprnc
- 1
- lsf
- urs.beyerle -at- env.ethz.ch
- 36
- 36
-
- mpirun
-
- -hostfile $ENV{PBS_JOBID}
- -ppn $MAX_MPITASKS_PER_NODE
- -n {{ total_tasks }}
-
-
-
- mpirun
-
-
-
-
- /cluster/apps/modules/init/python.py
- /etc/profile.d/modules.sh
- /etc/profile.d/modules.csh
- /cluster/apps/modules/bin/modulecmd python
- module
- module
-
-
-
-
- new
-
-
- intel/2018.1
-
-
-
- 64M
-
-
-
-
- Intel Xeon Cascade Lake,56 cores, batch system is SLURM
- .*frontera
- LINUX
- intel
- impi,mvapich
- ATM20005
- $ENV{SCRATCH}
- /work/02503/edwardsj/CESM/inputdata/
- /work/02503/edwardsj/CESM/inputdata/lmwg
- $ENV{SCRATCH}/archive/$CASE
- /work/02503/edwardsj/CESM/cesm_baselines
- /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc
- 4
- slurm
- cseg
- 112
- 56
-
- sbcast ${EXEROOT}/cesm.exe /tmp/cesm.exe; ibrun
-
- -n {{ total_tasks }}
-
- /tmp/cesm.exe
-
-
- ibrun
-
- -n {{ total_tasks }}
-
-
-
- /opt/apps/lmod/lmod/init/perl
- /opt/apps/lmod/lmod/init/env_modules_python.py
- /opt/apps/lmod/lmod/init/sh
- /opt/apps/lmod/lmod/init/csh
- /opt/apps/lmod/lmod/libexec/lmod perl
- /opt/apps/lmod/lmod/libexec/lmod python
- module
- module
-
-
- TACC
- /scratch1/projects/compilers/modulefiles
- intel/22.2.0
- cmake/3.24.2
-
-
- impi/22.2.0
- pnetcdf/1.11.2
- parallel-netcdf/4.6.2
-
-
- impi
- mvapich2-x/2.3
- pnetcdf/1.11.2
- parallel-netcdf/4.6.2
-
-
- netcdf/4.6.2
-
-
-
- 256M
- 20
- /scratch3/projects/csa_gettelman/esmf-8.4.1b02_intel22.2.0_impi22.2.0/lib/libO/Linux.intel.64.intelmpi.default/esmf.mk
- ON
- SUMMARY
- /scratch3/projects/csa_gettelman/intel22.2.0_impi22.2.0/pio2_5_10_avx512
-
-
- -prepend-rank
- 4
- 3
- 3
- 3
- 2
- enable
-
-
-
- spread
- hybrid
- 0
- 1
- 4096
- 4096
- mvapich2_ssh
-
-
-
-
- NOAA XE6, os is CNL, 24 pes/node, batch system is PBS
- CNL
- pgi
- mpich
- /lustre/fs/scratch/Julio.T.Bacmeister
- /lustre/fs/scratch/Julio.T.Bacmeister/inputdata
- /lustre/fs/scratch/Julio.T.Bacmeister/inputdata
- /lustre/fs/scratch/Julio.T.Bacmeister/archive/$CASE
- UNSET
- UNSET
- 8
- pbs
- julio -at- ucar.edu
- 24
- 24
-
- aprun
-
- -j {{ hyperthreading }}
- -n {{ total_tasks }}
- -S {{ tasks_per_numa }}
- -N $MAX_MPITASKS_PER_NODE
- -d $ENV{OMP_NUM_THREADS}
-
-
-
- /opt/modules/default/init/perl.pm
- /opt/modules/default/init/csh
- /opt/modules/default/init/sh
- /opt/modules/default/bin/modulecmd perl
- module
- module
-
- PrgEnv-pgi
- PrgEnv-cray
- PrgEnv-gnu
- pgi
- cray
-
-
- PrgEnv-pgi
- pgi pgi/12.5.0
-
-
- PrgEnv-gnu
- torque
-
-
- PrgEnv-cray/4.0.36
- cce/8.0.2
-
-
- torque/4.1.3
- netcdf-hdf5parallel/4.2.0
- parallel-netcdf/1.2.0
-
-
-
- 64M
- 1
-
-
-
-
- UCI Linux Cluster; 16 pes/node, batch system is slurm
- LINUX
-
-
- intel
- openmpi
-
-
-
-
-
- /DFS-L/SCRATCH/moore/$USER/cesm_runs
- /DFS-L/DATA/moore/cesm/inputdata
- /DFS-L/DATA/moore/cesm/inputdata
- $CIME_OUTPUT_ROOT/archive/$CASE
- /DFS-L/DATA/moore/cesm/baselines
-
- /DFS-L/DATA/moore/cesm/tools/cprnc/cprnc
- gmake
- 16
- slurm
- mlevy@ucar.edu
- 16
- 16
- FALSE
-
-
-
- mpirun
-
- -np {{ total_tasks }}
-
-
-
- mpirun
-
- -np {{ total_tasks }}
-
-
-
- /usr/share/lmod/lmod/init/perl
- /usr/share/lmod/lmod/init/env_modules_python.py
- /usr/share/lmod/lmod/init/csh
- /usr/share/lmod/lmod/init/sh
- /usr/share/lmod/lmod/libexec/lmod perl
- /usr/share/lmod/lmod/libexec/lmod python
- module
- module
-
-
-
-
- intel/2018.3
- netcdf/4.7.0
-
-
- openmpi/3.1.6
- pnetcdf/1.10.0
-
-
-
-
- 256M
- 16
-
-
-
- -1
-
-
-
-
- UCI Linux Cluster; 40 pes/node, batch system is slurm
- gplogin\d.gp.local
- LINUX
-
-
- intel
- openmpi
-
-
-
-
-
- /DFS-L/SCRATCH/moore/$USER/cesm_runs
- /DFS-L/DATA/moore/cesm/inputdata
- /DFS-L/DATA/moore/cesm/inputdata
- $CIME_OUTPUT_ROOT/archive/$CASE
- /DFS-L/DATA/moore/cesm/baselines
-
- /DFS-L/DATA/moore/cesm/tools/cprnc/cprnc
- gmake
- 16
- slurm
- mlevy@ucar.edu
- 40
- 40
- FALSE
-
-
-
- mpirun
-
- -np {{ total_tasks }}
-
-
-
- mpirun
-
- -np {{ total_tasks }}
-
-
-
- /usr/share/lmod/lmod/init/perl
- /usr/share/lmod/lmod/init/env_modules_python.py
- /usr/share/lmod/lmod/init/csh
- /usr/share/lmod/lmod/init/sh
- /usr/share/lmod/lmod/libexec/lmod perl
- /usr/share/lmod/lmod/libexec/lmod python
- module
- module
-
-
-
-
- intel/2018.3
- netcdf/4.7.0
-
-
- openmpi/3.1.6
- pnetcdf/1.10.0
-
-
-
-
- 256M
- 16
-
-
-
- -1
-
-
-
-
- NCAR AMD EPYC test system 16 CPU nodes 2 GPU nodes
- gu.*.hpc.ucar.edu
- CNL
- intel,gnu,cray,nvhpc,oneapi
- mpich
- /glade/gust/scratch/$ENV{USER}
- /glade/p/cesmdata/inputdata
- /glade/p/cesmdata/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /glade/p/cesmdata/ccsm_baselines
- /glade/p/cesmdata/cprnc/cprnc
- 16
- pbs
- cseg
- 128
- 128
- TRUE
-
- mpiexec
-
- --label
- -n {{ total_tasks }}
-
-
-
- /glade/u/apps/gust/default/spack/opt/spack/lmod/8.7.2/gcc/7.5.0/lmod/lmod/init/perl
- /glade/u/apps/gust/default/spack/opt/spack/lmod/8.7.2/gcc/7.5.0/lmod/lmod/init/env_modules_python.py
- /glade/u/apps/gust/default/spack/opt/spack/lmod/8.7.2/gcc/7.5.0/lmod/lmod/init/sh
- /glade/u/apps/gust/default/spack/opt/spack/lmod/8.7.2/gcc/7.5.0/lmod/lmod/init/csh
- /glade/u/apps/gust/default/spack/opt/spack/lmod/8.7.2/gcc/7.5.0/lmod/lmod/libexec/lmod perl
- /glade/u/apps/gust/default/spack/opt/spack/lmod/8.7.2/gcc/7.5.0/lmod/lmod/libexec/lmod python
- module
- module
-
-
- /opt/cray/pe/craype-targets/default/modulefiles
-
-
-
- intel/2021.6.0
-
-
- oneapi/2022.1.0
-
-
- cce/15.0.0
-
-
- gcc/12.1.0
-
-
- nvhpc/22.7
-
-
- cray-mpich/8.1.19
-
-
- cray-mpich/8.1.21
-
-
- mpi-serial/2.3.0
-
-
-
- cmake/3.23.2
- cray-libsci/22.08.1.1
-
-
-
- mkl/2022.1.0
-
-
-
- mkl/2022.1.0
-
-
- netcdf/4.9.0
-
-
-
- hdf5
- netcdf
- netcdf-mpi/4.9.0
- parallel-netcdf/1.12.3
-
-
- parallelio/2.5.9-debug
- esmf/8.4.0b20-debug
-
-
- parallelio/2.5.9
- esmf/8.4.0b20
-
-
- esmf/8.4.0b21-debug
-
-
- esmf/8.4.0b21
-
-
-
-
- 64M
- hybrid
-
-
- *:romio_cb_read=enable:romio_cb_write=enable:striping_factor=2
-
-
-
- ON
- SUMMARY
-
-
-
-
- NCAR CGD Linux Cluster 48 pes/node, batch system is PBS
- ^h.*\.cgd\.ucar\.edu
- LINUX
- intel,pgi,nag,gnu
- mvapich2,openmpi
- /scratch/cluster/$USER
- /fs/cgd/csm/inputdata
- /project/tss
- /scratch/cluster/$USER/archive/$CASE
- /fs/cgd/csm/ccsm_baselines
- /fs/cgd/csm/tools/cime/tools/cprnc/cprnc
- gmake --output-sync
- 4
- pbs
- cseg
- 48
- 48
-
- mpiexec
-
- --machinefile $ENV{PBS_NODEFILE}
- -n {{ total_tasks }}
-
-
-
- mpiexec
-
- -n {{ total_tasks }}
-
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/python.py
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/sh
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
-
-
- compiler/intel/18.0.3
- tool/netcdf/4.6.1/intel
-
-
- mpi/intel/mvapich2-2.3rc2-intel-18.0.3
-
-
- compiler/pgi/18.1
- tool/netcdf/4.6.1/pgi
-
-
- compiler/nag/6.2
- tool/netcdf/4.6.1/nag
-
-
- mpi/nag/mvapich2-2.3rc2
-
-
- mpi/nag/openmpi-3.1.0
-
-
- compiler/gnu/8.1.0
- tool/netcdf/4.6.1/gcc
-
-
- mpi/gcc/openmpi-3.1.0a
-
-
- mpi/gcc/mvapich2-2.3rc2-qlc
-
-
-
- 64M
-
- $ENV{PATH}:/cluster/torque/bin
- /home/dunlap/ESMF-INSTALL/8.0.0bs16/lib/libg/Linux.intel.64.mvapich2.default/esmf.mk
-
-
- -1
-
-
-
-
-
-
- Customize these fields as appropriate for your system,
- particularly changing MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE to the
- number of cores on your machine. You may also want to change
- instances of '$ENV{HOME}/projects' to your desired directory
- organization. You can use this in either of two ways: (1)
- Without making any changes, by adding `--machine homebrew` to
- create_newcase or create_test (2) Copying this into a
- config_machines.xml file in your personal .cime directory and
- then changing the machine name (MACH="homebrew") to
- your machine name and the NODENAME_REGEX to something matching
- your machine's hostname. With (2), you should not need the
- `--machine` argument, because the machine should be determined
- automatically. However, with (2), you will also need to copy the
- homebrew-specific settings in config_compilers.xml into a
- config_compilers.xml file in your personal .cime directory, again
- changing the machine name (MACH="homebrew") to your machine name.
-
-
- something.matching.your.machine.hostname
- Darwin
- gnu
- mpich
- $ENV{HOME}/projects/scratch
- $ENV{HOME}/projects/cesm-inputdata
- $ENV{HOME}/projects/ptclm-data
- $ENV{HOME}/projects/scratch/archive/$CASE
- $ENV{HOME}/projects/baselines
- $ENV{HOME}/cesm/tools/cprnc/cprnc
- make
- 4
- none
- __YOUR_NAME_HERE__
- 8
- 4
-
- mpirun
-
- -np {{ total_tasks }}
- -prepend-rank
-
-
-
-
- /usr/local
-
-
-
-
- NCAR CGD Linux Cluster 48 pes/node, batch system is PBS
- ^i.*\.ucar\.edu
- LINUX
- intel,nag,gnu
- mvapich2,openmpi
- /scratch/cluster/$USER
- /fs/cgd/csm/inputdata
- /project/tss
- /scratch/cluster/$USER/archive/$CASE
- /fs/cgd/csm/ccsm_baselines
- /fs/cgd/csm/tools/cime/tools/cprnc/cprnc
- gmake --output-sync
- 4
- pbs
- cseg
- 48
- 48
-
- mpiexec
-
- --machinefile $ENV{PBS_NODEFILE}
- -n {{ total_tasks }}
-
-
-
- mpiexec
-
- -n {{ total_tasks }}
- --tag-output
-
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/python.py
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/sh
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
- lang/python/3.7.0
- /fs/cgd/data0/modules/modulefiles
-
-
- compiler/gnu/9.3.0
- tool/netcdf/4.7.4/gnu/9.3.0
-
-
- openmpi/4.0.3/gnu/9.3.0
-
-
- mpi/2.3.3/gnu/9.3.0
-
-
- compiler/intel/20.0.1
- tool/netcdf/4.7.4/intel/20.0.1
-
-
- mpi/2.3.3/intel/20.0.1
-
-
- compiler/nag/6.2-8.1.0
- tool/netcdf/c4.6.1-f4.4.4/nag-gnu/6.2-8.1.0
-
-
- mpi/2.3.3/nag/6.2
-
-
- mpi
-
-
- esmfpkgs/gfortran/9.3.0/esmf-8.4.1b05-ncdfio-mvapich2-O
- mvapich2/2.3.3/gnu/9.3.0/pio/2_5_10
-
-
- esmfpkgs/gfortran/9.3.0/esmf-8.4.1b05-ncdfio-mvapich2-g
- mvapich2/2.3.3/gnu/9.3.0/pio/2_5_10
-
-
- esmfpkgs/gfortran/9.3.0/esmf-8.4.1b05-ncdfio-mpiuni-O
- mpi-serial/2.3.0/gnu/9.3.0/pio/2_5_10
-
-
- esmfpkgs/gfortran/9.3.0/esmf-8.4.1b05-ncdfio-mpiuni-g
- mpi-serial/2.3.0/gnu/9.3.0/pio/2_5_10
-
-
-
- esmfpkgs/nag/6.2/esmf-8.4.1b05-ncdfio-mvapich2-O
- mvapich2/2.3.3/nag/6.2/pio/2_5_10
-
-
- esmfpkgs/nag/6.2/esmf-8.4.1b05-ncdfio-mvapich2-g
- mvapich2/2.3.3/nag/6.2/pio/2_5_10
-
-
- esmfpkgs/nag/6.2/esmf-8.4.1b05-ncdfio-mpiuni-g
- mpi-serial/2.3.0/nag/6.2/pio/2_5_10
-
-
- esmfpkgs/nag/6.2/esmf-8.4.1b05-ncdfio-mpiuni-O
- mpi-serial/2.3.0/nag/6.2/pio/2_5_10
-
-
-
- esmfpkgs/intel/20.0.1/esmf-8.4.1b05-ncdfio-mpiuni-g
- mpi-serial/2.3.0/intel/20.0.1/pio/2_5_10
-
-
- esmfpkgs/intel/20.0.1/esmf-8.4.1b05-ncdfio-mpiuni-O
- mpi-serial/2.3.0/intel/20.0.1/pio/2_5_10
-
-
- esmfpkgs/intel/20.0.1/esmf-8.4.1b05-ncdfio-mvapich2-g
- mvapich2/2.3.3/intel/20.0.1/pio/2_5_10
-
-
- esmfpkgs/intel/20.0.1/esmf-8.4.1b05-ncdfio-mvapich2-O
- mvapich2/2.3.3/intel/20.0.1/pio/2_5_10
-
-
-
- 64M
-
- $ENV{PATH}:/cluster/torque/bin
-
-
- -1
-
-
-
-
- NCAR SGI test platform, os is Linux, 36 pes/node, batch system is PBS
- .*.laramie.ucar.edu
- LINUX
- intel,gnu
- mpt
- /picnic/scratch/$USER
- $ENV{CESMDATAROOT}/inputdata
- $ENV{CESMDATAROOT}/lmwg
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/cesm_baselines
- $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc
- 8
- pbs
- cseg
-
- 36
- 36
- FALSE
-
- mpiexec_mpt
-
- -p "%g:"
- omplace
-
-
-
- /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/perl
- /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/env_modules_python.py
- /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/csh
- /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/sh
- /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/libexec/lmod perl
- /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/libexec/lmod python
- module
- module
-
-
- ncarenv/1.3
- cmake/3.16.4
-
-
- intel/19.0.5
- mkl
-
-
- gnu/9.1.0
- openblas/0.3.6
-
-
- mpt/2.21
- netcdf-mpi/4.7.3
-
-
- pnetcdf/1.12.1
- pio/2.4.4
-
-
- openmpi/3.1.4
- netcdf-mpi/4.7.3
-
-
- ncarcompilers/0.5.0
-
-
- netcdf/4.7.3
-
-
-
- 256M
- 16
-
-
-
-
- Lawrencium LR3 cluster at LBL, OS is Linux (intel), batch system is SLURM
- LINUX
- intel
- openmpi
- /global/scratch/$ENV{USER}
- /global/scratch/$ENV{USER}/cesm_input_datasets/
- /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7
- $CIME_OUTPUT_ROOT/cesm_archive/$CASE
- $CIME_OUTPUT_ROOT/cesm_baselines
- /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc
- 4
- slurm
- rgknox at lbl dot gov and glemieux at lbl dot gov
- 16
- 16
- TRUE
-
- mpirun
-
- -np {{ total_tasks }}
- -npernode $MAX_MPITASKS_PER_NODE
-
-
-
- /etc/profile.d/modules.sh
- /etc/profile.d/modules.csh
- /usr/Modules/init/perl.pm
- /usr/Modules/python.py
- module
- module
- /usr/Modules/bin/modulecmd perl
- /usr/Modules/bin/modulecmd python
-
-
- cmake
- perl xml-libxml switch python/3.6
-
-
- intel/2016.4.072
- mkl
-
-
- netcdf/4.4.1.1-intel-s
-
-
- openmpi
- netcdf/4.4.1.1-intel-p
-
-
-
-
-
- FATES development machine at LBNL, System76 Thelio Massive Workstation Pop!_OS 20.04
- lobata
- LINUX
- gnu
- openmpi
- $ENV{HOME}/scratch/
- /data/cesmdataroot/inputdata
- /data/cesmdataroot/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{HOME}/scratch/ctsm-baselines
- /home/glemieux/Repos/cime/tools/cprnc/cprnc
- make
- 16
- none
- glemieux at lbl dot gov
- 4
- 4
- FALSE
-
- mpirun
-
- -np {{ total_tasks }}
- --map-by ppr:{{ tasks_per_node }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread
-
-
-
- /usr/share/modules/init/python.py
- /usr/share/modules/init/perl.pm
- /usr/share/modules/init/sh
- /usr/share/modules/init/csh
- /usr/bin/modulecmd python
- /usr/bin/modulecmd perl
- module
- module
-
-
- hdf5
- netcdf-c
- netcdf-fortran
- esmf
-
-
-
-
-
-
- Lonestar5 cluster at TACC, OS is Linux (intel), batch system is SLURM
- .*ls5\.tacc\.utexas\.edu
- LINUX
- intel
- mpich
- $ENV{SCRATCH}
- /work/02503/edwardsj/CESM/inputdata
- /work/02503/edwardsj/CESM/inputdata/lmwg
- $CIME_OUTPUT_ROOT/cesm_archive/$CASE
- /work/02503/edwardsj/CESM/cesm_baselines
- /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc
- 4
- slurm
- cseg
- 48
- 24
- FALSE
-
- srun
-
- --ntasks={{ total_tasks }}
-
-
-
-
- /opt/apps/lmod/lmod/init/perl
- /opt/apps/lmod/lmod/init/env_modules_python.py
- /opt/apps/lmod/lmod/init/sh
- /opt/apps/lmod/lmod/init/csh
- /opt/apps/lmod/lmod/libexec/lmod perl
- /opt/apps/lmod/lmod/libexec/lmod python
- module
- module
-
-
-
- cmake
-
-
- intel/18.0.2
-
-
- netcdf/4.6.2
-
-
- cray_mpich
-
-
- pnetcdf/1.8.0
- parallel-netcdf/4.6.2
-
-
-
-
-
-
- Linux workstation for Jenkins testing
- (melvin|watson)
- LINUX
- sonproxy.sandia.gov:80
- gnu
- openmpi
- /sems-data-store/ACME/timings
- $ENV{HOME}/acme/scratch
- /sems-data-store/ACME/inputdata
- /sems-data-store/ACME/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /sems-data-store/ACME/baselines
- /sems-data-store/ACME/cprnc/build/cprnc
- make
- 32
- acme_developer
- none
- jgfouca at sandia dot gov
- 64
- 64
-
- mpirun
-
- -np {{ total_tasks }}
- --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread
-
-
-
- /usr/share/Modules/init/python.py
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/sh
- /usr/share/Modules/init/csh
- /usr/bin/modulecmd python
- /usr/bin/modulecmd perl
- module
- module
-
-
- sems-env
- acme-env
- sems-git
- sems-python/2.7.9
- sems-cmake/2.8.12
-
-
- sems-gcc/5.3.0
-
-
- sems-intel/16.0.3
-
-
- sems-netcdf/4.4.1/exo
- acme-pfunit/3.2.8/base
-
-
- sems-openmpi/1.8.7
- sems-netcdf/4.4.1/exo_parallel
-
-
-
- $ENV{SEMS_NETCDF_ROOT}
- 64M
- spread
- threads
-
-
- $ENV{SEMS_NETCDF_ROOT}
-
-
-
-
- ANL IBM BG/Q, os is BGP, 16 pes/node, batch system is cobalt
- .*.fst.alcf.anl.gov
- BGQ
- ibm
- ibm
- /projects/$PROJECT/usr/$ENV{USER}
- /projects/ccsm/inputdata
- /projects/ccsm/inputdata/atm/datm7
- /projects/$PROJECT/usr/$USER/archive/$CASE
- /projects/ccsm/ccsm_baselines/
- /projects/ccsm/tools/cprnc/cprnc
- 4
- cobalt
- cseg
- 64
- 8
- TRUE
-
- /usr/bin/runjob
-
- --label short
-
- --ranks-per-node $MAX_MPITASKS_PER_NODE
-
- --np {{ total_tasks }}
- --block $COBALT_PARTNAME --envs OMP_WAIT_POLICY=active --envs BG_SMP_FAST_WAKEUP=yes $LOCARGS
- --envs BG_THREADLAYOUT=1
- --envs OMP_STACKSIZE=32M
- --envs OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS}
-
-
-
- /etc/profile.d/00softenv.csh
- /etc/profile.d/00softenv.sh
- soft
- soft
-
- +mpiwrapper-xl
- @ibm-compilers-2015-02
- +cmake
- +python
-
-
-
- 10000
- FALSE
- 64M
- /soft/libraries/hdf5/1.8.14/cnk-xl/current
-
-
-
-
- Medium sized linux cluster at BNL, torque scheduler.
- LINUX
- gnu
- openmpi,mpi-serial
- /data/$ENV{USER}
- /data/Model_Data/cesm_input_datasets/
- /data/Model_Data/cesm_input_datasets/atm/datm7
- $CIME_OUTPUT_ROOT/cesm_archive/$CASE
- $CIME_OUTPUT_ROOT/cesm_baselines
- /data/software/cesm_tools/cprnc/cprnc
- 4
- pbs
- rgknox at lbl dot gov and sserbin at bnl gov
- 12
- 12
- 12
- FALSE
-
- mpirun
-
- -np {{ total_tasks }}
- -npernode $MAX_TASKS_PER_NODE
-
-
-
- /etc/profile.d/modules.sh
- /etc/profile.d/modules.csh
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/python.py
- module
- module
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
-
-
- perl/5.22.1
- libxml2/2.9.2
- maui/3.3.1
- python/2.7.13
-
-
- gcc/5.4.0
- gfortran/5.4.0
- hdf5/1.8.19fates
- netcdf/4.4.1.1-gnu540-fates
- openmpi/2.1.1-gnu540
-
-
- openmpi/2.1.1-gnu540
-
-
-
- /data/software/hdf5/1.8.19fates
- /data/software/netcdf/4.4.1.1-gnu540-fates
-
-
-
-
- PNL cluster, os is Linux (pgi), batch system is SLURM
- LINUX
- pgi
- mpich
- /pic/scratch/$USER
- /pic/scratch/tcraig/IRESM/inputdata
- /pic/scratch/tcraig/IRESM/inputdata/atm/datm7
- /pic/scratch/$USER/archive/$CASE
- /pic/scratch/tcraig/IRESM/ccsm_baselines
- /pic/scratch/tcraig/IRESM/tools/cprnc/cprnc
- 8
- slurm
- tcraig -at- ucar.edu
- 32
- 32
- FALSE
-
- mpiexec_mpt
-
- --mpi=none
- -n={{ total_tasks }}
- --kill-on-bad-exit
-
-
-
- /share/apps/modules/Modules/3.2.7/init/perl.pm
- /share/apps/modules/Modules/3.2.7/init/csh
- /share/apps/modules/Modules/3.2.7/init/sh
- /share/apps/modules/Modules/3.2.7/bin/modulecmd perl
- module
- module
-
-
- precision/i4
- pgi/11.8
- mvapich2/1.7
- netcdf/4.1.3
-
-
-
- 64M
-
-
-
-
- NERSC EX AMD EPYC, os is CNL, 64 pes/node, batch system is Slurm
- $ENV{NERSC_HOST}:perlmutter
- CNL
- gnu,cray,nvidia,aocc
- mpich
- mp9_g
- $ENV{SCRATCH}
- /global/cfs/cdirs/ccsm1/inputdata
- /global/cfs/cdirs/ccsm1/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /global/cfs/cdirs/ccsm1/ccsm_baselines
- /global/cfs/cdirs/ccsm1/tools/cprnc.perlmutter/cprnc
- 8
- slurm
- cseg
- 128
- 4
- 64
- TRUE
-
- srun
-
- --label
- -n {{ total_tasks }}
- -c {{ srun_binding }}
-
-
-
- /usr/share/lmod/lmod/init/perl
- /usr/share/lmod/lmod/init/env_modules_python.py
- /usr/share/lmod/lmod/init/sh
- /usr/share/lmod/lmod/init/csh
- /usr/share/lmod/lmod/libexec/lmod perl
- /usr/share/lmod/lmod/libexec/lmod python
- module
- module
-
- PrgEnv-nvidia
- PrgEnv-cray
- PrgEnv-aocc
- PrgEnv-gnu
- nvidia
- cce
- gnu
- aocc
- cray-parallel-netcdf
- cray-hdf5-parallel
- cray-libsci
- cray-mpich
- cray-hdf5
- cray-netcdf-hdf5parallel
- cray-netcdf
- craype
-
-
-
- PrgEnv-cray
- cce cce/12.0.3
-
-
- PrgEnv-gnu
- gcc gcc/11.2.0
-
-
- craype craype/2.7.10
-
-
- cray-libsci/21.08.1.2
-
-
- cray-mpich/8.1.9
-
-
- cray-netcdf-hdf5parallel
- cray-hdf5-parallel
- cray-parallel-netcdf
- cray-hdf5/1.12.0.7
- cray-netcdf/4.7.4.7
-
-
- cray-hdf5-parallel/1.12.0.7
- cray-netcdf-hdf5parallel/4.7.4.7
- cray-parallel-netcdf/1.12.1.7
-
-
- cmake/3.20.5
-
-
-
- 256M
-
-
-
-
-
- NASA/AMES Linux Cluster, Linux (ia64), 2.4 GHz Broadwell Intel Xeon E5-2680v4 processors, 28 pes/node (two 14-core processors) and 128 GB of memory/node, batch system is PBS
- LINUX
- intel
- mpt
- /nobackup/$USER
- /nobackup/fvitt/csm/inputdata
- /nobackup/fvitt/csm/inputdata/atm/datm7
- /nobackup/$USER/archive/$CASE
- /nobackup/fvitt/cesm_baselines
- /u/fvitt/bin/cprnc
- 8
- pbs
- fvitt -at- ucar.edu
- 28
- 28
- TRUE
-
- mpiexec_mpt
-
- -n {{ total_tasks }}
-
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/sh
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/python.py
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
- nas
- pkgsrc
- python3
- comp-intel/2020.4.304
- mpi-hpe/mpt.2.25
- szip/2.1.1
- hdf4/4.2.12
- hdf5/1.8.18_mpt
- netcdf/4.4.1.1_mpt
-
-
-
- 1024
- 100000
- 16
- 256M
- /home6/fvitt/esmf-8_2_0/lib/libO/Linux.intel.64.mpt.default/esmf.mk
-
-
-
-
- NASA/AMES Linux Cluster, Linux (ia64), 2.5 GHz Haswell Intel Xeon E5-2680v3 processors, 24 pes/node (two 12-core processors) and 128 GB of memory/node, batch system is PBS
- LINUX
- intel
- mpt
- /nobackup/$USER
- /nobackup/fvitt/csm/inputdata
- /nobackup/fvitt/csm/inputdata/atm/datm7
- /nobackup/$USER/archive/$CASE
- /nobackup/fvitt/cesm_baselines
- /u/fvitt/bin/cprnc
- 8
- pbs
- fvitt -at- ucar.edu
- 24
- 24
- TRUE
-
- mpiexec_mpt
-
- -n {{ total_tasks }}
-
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/sh
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/python.py
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
- nas
- pkgsrc
- python3
- comp-intel/2020.4.304
- mpi-hpe/mpt.2.25
- szip/2.1.1
- hdf4/4.2.12
- hdf5/1.8.18_mpt
- netcdf/4.4.1.1_mpt
-
-
-
- 1024
- 100000
- 16
- 256M
- /home6/fvitt/esmf-8_2_0/lib/libO/Linux.intel.64.mpt.default/esmf.mk
-
-
-
-
- NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.6 GHz Sandy Bridge processors, 16 cores/node and 32 GB of memory, batch system is PBS
- LINUX
- intel
- mpt
- /nobackup/$USER
- /nobackup/fvitt/csm/inputdata
- /nobackup/fvitt/csm/inputdata/atm/datm7
- /nobackup/$USER/archive/$CASE
- /nobackup/fvitt/cesm_baselines
- /u/fvitt/bin/cprnc
- 8
- pbs
- fvitt -at- ucar.edu
- 16
- 16
- TRUE
-
- mpiexec_mpt
-
- -n {{ total_tasks }}
-
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/sh
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/python.py
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
- nas
- pkgsrc
- python3
- comp-intel/2020.4.304
- mpi-hpe/mpt.2.25
- szip/2.1.1
- hdf4/4.2.12
- hdf5/1.8.18_mpt
- netcdf/4.4.1.1_mpt
-
-
-
- 1024
- 100000
- 16
- 256M
- /home6/fvitt/esmf-8_2_0/lib/libO/Linux.intel.64.mpt.default/esmf.mk
-
-
-
-
- NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.8 GHz Ivy Bridge processors, 20 cores/node and 3.2 GB of memory per core, batch system is PBS
- LINUX
- intel
- mpich
- /nobackup/$USER
- /nobackup/fvitt/csm/inputdata
- /nobackup/fvitt/csm/inputdata/atm/datm7
- /nobackup/$USER/archive/$CASE
- /nobackup/fvitt/cesm_baselines
- /u/fvitt/bin/cprnc
- 8
- pbs
- fvitt -at- ucar.edu
- 20
- 20
- TRUE
-
- mpiexec_mpt
-
- -n {{ total_tasks }}
-
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/sh
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/python.py
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
- nas
- pkgsrc
- python3
- comp-intel/2020.4.304
- mpi-hpe/mpt.2.25
- szip/2.1.1
- hdf4/4.2.12
- hdf5/1.8.18_mpt
- netcdf/4.4.1.1_mpt
-
-
-
- 1024
- 100000
- 16
- 256M
- /home6/fvitt/esmf-8_2_0/lib/libO/Linux.intel.64.mpt.default/esmf.mk
-
-
-
-
- Linux workstation at Sandia on SRN with SEMS TPL modules
- (s999964|climate|penn)
- LINUX
- wwwproxy.sandia.gov:80
- gnu
- openmpi
- /sems-data-store/ACME/timings
- $ENV{HOME}/acme/scratch
- /sems-data-store/ACME/inputdata
- /sems-data-store/ACME/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /sems-data-store/ACME/baselines
- /sems-data-store/ACME/cprnc/build/cprnc
- make
- 32
- acme_developer
- none
- jgfouca at sandia dot gov
- 64
- 64
-
- mpirun
-
- -np {{ total_tasks }}
-
-
-
- /usr/share/Modules/init/python.py
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/sh
- /usr/share/Modules/init/csh
- /usr/bin/modulecmd python
- /usr/bin/modulecmd perl
- module
- module
-
-
- sems-env
- sems-git
- sems-python/2.7.9
- sems-gcc/5.1.0
- sems-openmpi/1.8.7
- sems-cmake/2.8.12
- sems-netcdf/4.3.2/parallel
-
-
-
- $ENV{SEMS_NETCDF_ROOT}
- $ENV{SEMS_NETCDF_ROOT}
-
-
-
-
- SNL clust
- (skybridge|chama)-login
- LINUX
- wwwproxy.sandia.gov:80
- intel
- openmpi
- /projects/ccsm/timings
- /gscratch/$USER/acme_scratch/$MACH
- /projects/ccsm/inputdata
- /projects/ccsm/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /projects/ccsm/ccsm_baselines
- /projects/ccsm/cprnc/build/cprnc_wrap
- 8
- acme_integration
- slurm
- jgfouca at sandia dot gov
- 16
- 16
- TRUE
-
-
- mpirun
-
- -np {{ total_tasks }}
- -npernode $MAX_MPITASKS_PER_NODE
-
-
-
- /usr/share/Modules/init/python.py
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/sh
- /usr/share/Modules/init/csh
- /usr/bin/modulecmd python
- /usr/bin/modulecmd perl
- module
- module
-
-
- sems-env
- sems-git
- sems-python/2.7.9
- gnu/4.9.2
- intel/intel-15.0.3.187
- libraries/intel-mkl-15.0.2.164
- libraries/intel-mkl-15.0.2.164
-
-
- openmpi-intel/1.8
- sems-hdf5/1.8.12/parallel
- sems-netcdf/4.3.2/parallel
- sems-hdf5/1.8.12/base
- sems-netcdf/4.3.2/base
-
-
-
- $ENV{SEMS_NETCDF_ROOT}
- 64M
-
-
- $ENV{SEMS_NETCDF_ROOT}
-
-
-
-
- Intel Xeon Platinum 8160 ("Skylake"),48 cores on two sockets (24 cores/socket) , batch system is SLURM
- .*stampede2
- LINUX
- intel
- impi,mvapich2
- $ENV{SCRATCH}
- /work/02503/edwardsj/CESM/inputdata
- /work/02503/edwardsj/CESM/inputdata/lmwg
- $ENV{WORK}/archive/$CASE
- /work/02503/edwardsj/CESM/cesm_baselines
- /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc
- 4
- slurm
- cseg
- 96
- 48
-
- ibrun
-
- -n {{ total_tasks }}
-
-
-
- ibrun
-
- -n {{ total_tasks }}
-
-
-
- /opt/apps/lmod/lmod/init/perl
- /opt/apps/lmod/lmod/init/env_modules_python.py
- /opt/apps/lmod/lmod/init/sh
- /opt/apps/lmod/lmod/init/csh
- /opt/apps/lmod/lmod/libexec/lmod perl
- /opt/apps/lmod/lmod/libexec/lmod python
- module
- module
-
-
- TACC
- python/2.7.13
- intel/18.0.2
- cmake/3.16.1
-
-
- mvapich2/2.3.1
- pnetcdf/1.11
- parallel-netcdf/4.6.2
-
-
- mvapich2
- impi/18.0.2
- pnetcdf/1.11
- parallel-netcdf/4.6.2
-
-
- netcdf/4.3.3.1
-
-
-
- 256M
-
-
- /work/01118/tg803972/stampede2/ESMF-INSTALL/8.0.0bs38/lib/libO/Linux.intel.64.intelmpi.default/esmf.mk
-
-
- ON
- SUMMARY
- /work/06242/tg855414/stampede2/FV3GFS/benchmark-inputs/2012010100/gfs/fcst
- /work/06242/tg855414/stampede2/FV3GFS/fix_am
- /work/06242/tg855414/stampede2/FV3GFS/addon
-
-
-
-
-
- Intel Xeon Phi 7250 ("Knights Landing") , batch system is SLURM
- LINUX
- intel
- impi,mvapich2
- $ENV{SCRATCH}
- /work/02503/edwardsj/CESM/inputdata
- /work/02503/edwardsj/CESM/inputdata/lmwg
- $ENV{WORK}/archive/$CASE
- /work/02503/edwardsj/CESM/cesm_baselines
- /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc
- 4
- slurm
- cseg
- 256
- 64
-
- ibrun
-
-
- ibrun
-
-
- /opt/apps/lmod/lmod/init/perl
- /opt/apps/lmod/lmod/init/env_modules_python.py
- /opt/apps/lmod/lmod/init/sh
- /opt/apps/lmod/lmod/init/csh
- /opt/apps/lmod/lmod/libexec/lmod perl
- /opt/apps/lmod/lmod/libexec/lmod python
- module
- module
-
-
- TACC
- python/2.7.13
- intel/18.0.2
- cmake/3.16.1
-
-
- mvapich2/2.3.1
- pnetcdf/1.11
- parallel-netcdf/4.6.2
-
-
- mvapich2
- impi/18.0.2
- pnetcdf/1.11
- parallel-netcdf/4.6.2
-
-
- netcdf/4.3.3.1
-
-
-
- 256M
-
-
-
-
- Cray test platform
- swan.*
- CNL
- cray, intel
- mpt
-
- /lus/scratch/$USER
- /lus/scratch/$USER/inputdata
- /lus/scratch/$USER/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /lus/scratch/$USER/cesm/baselines
- /lus/scratch/$USER/cesm/tools/cprnc/cprnc
- 8
- pbs
- jedwards
- 64
- 64
- FALSE
-
- aprun
-
- -n {{ total_tasks }}
- -N {{ tasks_per_node }}
- --cc depth -d $OMP_NUM_THREADS
- -e OMP_STACKSIZE=64M
- -e OMP_NUM_THREADS=$OMP_NUM_THREADS
-
-
-
- /opt/modules/default/init/perl.pm
- /opt/modules/default/init/python.py
- /opt/modules/default/init/sh
- /opt/modules/default/init/csh
- /opt/modules/default/bin/modulecmd perl
- /opt/modules/default/bin/modulecmd python
- module
- module
-
- PrgEnv-cray/6.0.10
- cce cce/12.0.3
-
-
- perftools-base
- craype craype/2.7.10
-
-
- cray-libsci/20.09.1
-
-
- cray-mpich/7.7.18
-
-
- cray-netcdf-hdf5parallel
- cray-hdf5-parallel
- cray-parallel-netcdf
-
-
-
- ON
- SUMMARY
- /home/users/p62939/esmf/lib/libg/Unicos.cce.64.mpi.default/esmf.mk
-
-
-
-
- theia
- tfe
- LINUX
- intel
- impi
- nems
-
- /scratch4/NCEPDEV/nems/noscrub/$USER/cimecases
- /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata
- /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/BASELINES
- /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/tools/cprnc
- make
- 8
- slurm
- cseg
- 24
- 24
- TRUE
-
- srun
-
- -n $TOTALPES
-
-
-
-
-
-
- /apps/lmod/lmod/init/sh
- /apps/lmod/lmod/init/csh
- module
- module
- /apps/lmod/lmod/libexec/lmod python
-
-
- intel/15.1.133
- impi/5.1.1.109
- netcdf/4.3.0
- pnetcdf
- /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/soft/modulefiles
- yaml-cpp
- esmf/8.0.0bs29g
-
-
-
- ON
- SUMMARY
- /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/benchmark-inputs/2012010100/gfs/fcst
- /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/fix_am
- /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/addon
-
-
-
-
- ALCF Cray XC* KNL, os is CNL, 64 pes/node, batch system is cobalt
- theta.*
- CNL
- intel,gnu,cray
- mpt
- CESM_Highres_Testing
- /projects/CESM_Highres_Testing/cesm/scratch/$USER
- /projects/CESM_Highres_Testing/cesm/inputdata
- /projects/CESM_Highres_Testing/cesm/inputdata/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- /projects/CESM_Highres_Testing/cesm/baselines
- /projects/CESM_Highres_Testing/cesm/tools/cprnc/cprnc
- 8
- cobalt_theta
- cseg
- 64
- 64
- TRUE
-
- aprun
-
- -n {{ total_tasks }}
- -N {{ tasks_per_node }}
- --cc depth -d $OMP_NUM_THREADS
- -e OMP_STACKSIZE=64M
- -e OMP_NUM_THREADS=$OMP_NUM_THREADS
-
-
-
- /opt/modules/default/init/perl.pm
- /opt/modules/default/init/python.py
- /opt/modules/default/init/sh
- /opt/modules/default/init/csh
- /opt/modules/default/bin/modulecmd perl
- /opt/modules/default/bin/modulecmd python
- module
- module
-
- craype-mic-knl
- PrgEnv-intel
- PrgEnv-cray
- PrgEnv-gnu
- intel
- cce
- cray-parallel-netcdf
- cray-hdf5-parallel
- pmi
- cray-libsci
- cray-mpich
- cray-netcdf
- cray-hdf5
- cray-netcdf-hdf5parallel
- craype
- papi
-
-
-
- PrgEnv-intel/6.0.4
- intel intel/18.0.0.128
- cray-libsci
-
-
-
- PrgEnv-cray/6.0.4
- cce cce/8.7.0
-
-
- PrgEnv-gnu/6.0.4
- gcc gcc/7.3.0
-
-
- papi/5.6.0.1
- craype craype/2.5.14
-
-
- cray-libsci/18.04.1
-
-
- cray-mpich/7.7.0
-
-
- cray-netcdf-hdf5parallel/4.4.1.1.6
- cray-hdf5-parallel/1.10.1.1
- cray-parallel-netcdf/1.8.1.3
-
-
-
-
-
- NCAR ARM platform, os is Linux, 64/128 pes/node, batch system is SLURM
- .*.thunder.ucar.edu
- LINUX
-
- armgcc,gnu,arm
- openmpi
- /glade/scratch/$USER
- $ENV{CESMDATAROOT}/inputdata
- $DIN_LOC_ROOT/CTSM_datm_forcing_data
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/cesm_baselines
- $ENV{CESMDATAROOT}/tools/cprnc/cprnc
- 16
- slurm
- cseg
- 64
- 128
-
- mpiexec
-
- --tag-output
- -np {{ total_tasks }}
-
-
-
- /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/perl
- /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/env_modules_python.py
- /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/csh
- /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/sh
- /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/libexec/lmod perl
- /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/libexec/lmod python
- module
- module
-
-
- ncarenv/1.3
- cmake/3.14.4
-
-
- arm/19.3
-
-
- armgcc/8.2.0
-
-
- gnu/9.1.0
- openblas/0.3.6
- esmf_libs/8.0.0
-
-
-
- ncarcompilers/0.5.0
-
-
- openmpi/4.0.3
- netcdf-mpi/4.7.1
- pnetcdf/1.12.1
-
-
- netcdf/4.7.1
-
-
- esmf-8.0.0-ncdfio-uni-g
-
-
- esmf-8.0.0-ncdfio-uni-O
-
-
-
- 256M
- $ENV{NETCDF}
-
-
-
-
-
- used for github testing
-
- $ENV{CIME_TEST_PLATFORM}:ubuntu-latest
- LINUX
-
- gnu
- openmpi
- none
-
- $ENV{HOME}/cesm/scratch
- $ENV{HOME}/cesm/inputdata
- $ENV{HOME}/cesm/inputdata/lmwg
- $ENV{HOME}/cesm/archive/$CASE
- $ENV{HOME}/cesm/cesm_baselines
-
- make
- 8
- none
- jedwards
- 4
- 4
- FALSE
-
- mpiexec
-
- -n {{ total_tasks }}
-
-
-
-
-
-
- CMCC Lenovo ThinkSystem SD530, os is Linux, 36 pes/node, batch system is LSF
- (login[1,2]-ib|n[0-9][0-9][0-9]-ib)
- LINUX
- intel
- impi,mpi-serial
- R000
- /work/$ENV{DIVISION}/$ENV{USER}/CESM2
- $ENV{CESMDATAROOT}/inputdata
- $DIN_LOC_ROOT/atm/datm7
- $CIME_OUTPUT_ROOT/archive/$CASE
- $ENV{CESMDATAROOT}/ccsm_baselines
- $ENV{CESMDATAROOT}/cesm2_tools/cprnc/cprnc
- /usr/lib64/perl5:/usr/share/perl5
- 8
- lsf
- cmcc
- 72
- 36
- TRUE
-
- mpirun
-
-
- /usr/share/Modules/init/perl.pm
- /usr/share/Modules/init/python.py
- /usr/share/Modules/init/csh
- /usr/share/Modules/init/sh
- /usr/bin/modulecmd perl
- /usr/bin/modulecmd python
- module
- module
-
-
-
-
- intel20.1/20.1.217
- intel20.1/szip/2.1.1
- cmake/3.17.3
- curl/7.70.0
-
-
- intel20.1/hdf5/1.12.0
- intel20.1/netcdf/C_4.7.4-F_4.5.3_CXX_4.3.1
-
-
- impi20.1/19.7.217
- impi20.1/hdf5/1.12.0
- impi20.1/netcdf/C_4.7.4-F_4.5.3_CXX_4.3.1
- impi20.1/parallel-netcdf/1.12.1
-
-
- impi20.1/esmf/8.1.1-intelmpi-64-g
-
-
- impi20.1/esmf/8.1.1-intelmpi-64-O
-
-
- intel20.1/esmf/8.1.1-mpiuni-64-g
-
-
- intel20.1/esmf/8.1.1-mpiuni-64-O
-
-
-
- /data/inputs/CESM/xios-2.5
-
-
- 1
- gpfs
- 0
- 60
- skx
- skx_avx512
- lsf
- 1
- {{ num_nodes }}
-
-
-
-
- BullSequana XH2000 AMD® Epyc™ "Rome" 2.2GHz, 128-way nodes, os is Linux, batch system is SLURM
- .*.?betzy\d?.sigma2.no
- LINUX
- intel
- openmpi,impi
- /cluster/work/users/$USER/noresm
- /cluster/shared/noresm/inputdata
- /cluster/shared/noresm/inputdata/atm/datm7
- /cluster/work/users/$USER/archive/$CASE
- /cluster/shared/noresm/noresm_baselines
- /cluster/shared/noresm/tools/cprnc/cprnc
- 8
- slurm
- noresmCommunity
- 128
- 128
- TRUE
-
-
-
-
- srun
-
-
- $ENV{LMOD_PKG}/init/perl
- $ENV{LMOD_PKG}/init/env_modules_python.py
- $ENV{LMOD_PKG}/init/csh
- $ENV{LMOD_PKG}/init/sh
- $ENV{LMOD_PKG}/libexec/lmod perl
- $ENV{LMOD_PKG}/libexec/lmod python
- module
- module
-
-
- StdEnv
- /cluster/shared/noresm/eb_mods/modules/all
- ESMF/8.4.1-iomkl-2021b-ParallelIO-2.5.10
- CMake/3.21.1-GCCcore-11.2.0
- Python/3.9.6-GCCcore-11.2.0
-
- ParMETIS/4.0.3-iompi-2021b
-
-
-
- StdEnv
- /cluster/shared/noresm/eb_mods/modules/all
- ESMF/8.4.1-intel-2021b-ParallelIO-2.5.10
- CMake/3.21.1-GCCcore-11.2.0
- Python/3.9.6-GCCcore-11.2.0
- ParMETIS/4.0.3-iimpi-2021b
-
-
-
- $ENV{EBROOTESMF}/lib/esmf.mk
- ON
- SUMMARY
- mlx5_0:1
- 64M
- 5
- 2
- $ENV{EBROOTPARALLELIO}/lib
- $ENV{EBROOTPARALLELIO}/include
- pnetcdf,netcdf,netcdf4p,netcdf4c
- 1
- self,vader
- 1
- 1
- ^fca
- 95
- 8
- ompio
- 1048576
- 8
- ^lockedfile,individual
- lustre
- on
-
-
-
-
- -1
-
-
-
-
- Lenovo NeXtScale M5, 32-way nodes, dual 16-core Xeon E5-2683@2.10GHz, 64 GiB per node, os is Linux, batch system is SLURM
- $HOSTNAME
- LINUX
- intel
- impi, mpi-serial
- /cluster/work/users/$USER/noresm
- /cluster/shared/noresm/inputdata
- /cluster/shared/noresm/inputdata/atm/datm7
- /cluster/work/users/$USER/archive/$CASE
- UNSET
- /cluster/shared/noresm/tools/cprnc/cprnc
- 8
- slurm
- noresmCommunity
- 32
- 32
- TRUE
-
-
-
-
- mpirun
-
-
- $ENV{LMOD_PKG}/init/perl
- $ENV{LMOD_PKG}/init/env_modules_python.py
- $ENV{LMOD_PKG}/init/csh
- $ENV{LMOD_PKG}/init/sh
- $ENV{LMOD_PKG}/libexec/lmod perl
- $ENV{LMOD_PKG}/libexec/lmod python
- module
- module
-
-
- StdEnv
- /cluster/shared/noresm/eb_mods/modules/all/
- CMake/3.23.1-GCCcore-11.3.0
- Python/3.10.4-GCCcore-11.3.0
- XML-LibXML/2.0207-GCCcore-11.3.0
- ESMF/8.4.1-intel-2022a-ParallelIO-2.5.10
-
-
-
-
- 256M
- lustre
- on
- $ENV{EBROOTESMF}/lib/esmf.mk
- $ENV{EBROOTESMF}/lib
- ON
- SUMMARY
- 2
- $ENV{EBROOTPARALLELIO}/lib
- $ENV{EBROOTPARALLELIO}/include
- pnetcdf,netcdf,netcdf4p,netcdf4c
-
-
- -1
-
-
-
-
- Hewlett Packard Enterprise - CentOS Linux release 7.6.1810 (Core)
- $HOSTNAME
- LINUX
- intel
- impi
- /cluster/work/users/$USER/noresm
- /cluster/shared/noresm/inputdata
- /cluster/shared/noresm/inputdata/atm/datm7
- /cluster/work/users/$USER/archive/$CASE
- UNSET
- UNSET
- 8
- slurm
- noresmCommunity
- 40
- 40
- TRUE
-
- mpirun
-
-
- $ENV{LMOD_PKG}/init/env_modules_python.py
- $ENV{LMOD_PKG}/init/sh
- $ENV{LMOD_PKG}/libexec/lmod python
- module
-
-
- StdEnv
- CMake/3.22.1-GCCcore-11.2.0
- XML-LibXML/2.0207-GCCcore-11.2.0
- ESMF/8.2.0-intel-2021b
-
-
-
- 256M
- lustre
- on
- $ENV{EBROOTESMF}/lib/esmf.mk
- $ENV{EBROOTESMF}/lib
- ON
- SUMMARY
-
-
- -1
-
-
${EXEROOT}/cesm.exe
diff --git a/machines/config_pio.xml b/machines/config_pio.xml
index 9cc84029..fc325d4b 100644
--- a/machines/config_pio.xml
+++ b/machines/config_pio.xml
@@ -78,13 +78,14 @@
-->
-
+
+
+ memhooks
+
+
+
+
+
+ ON
+ SUMMARY
+
+
+ -lcuda -lcudart
+
+
diff --git a/machines/eastwind/config_machines.xml b/machines/eastwind/config_machines.xml
new file mode 100644
index 00000000..e43aaf70
--- /dev/null
+++ b/machines/eastwind/config_machines.xml
@@ -0,0 +1,55 @@
+
+ PNL IBM Xeon cluster, os is Linux (pgi), batch system is SLURM
+ LINUX
+ pgi,intel
+ mvapich2,mvapich
+ /lustre/$USER
+ /lustre/tcraig/IRESM/inputdata
+ /lustre/tcraig/IRESM/inputdata/atm/datm7
+ /lustre/$USER/archive/$CASE
+ /lustre/tcraig/IRESM/ccsm_baselines
+ /lustre/tcraig/IRESM/tools/cprnc/cprnc
+ 8
+ slurm
+ tcraig -at- ucar.edu
+ 12
+ 12
+
+ srun
+
+ --ntasks={{ total_tasks }}
+ --cpu_bind=sockets
+ --cpu_bind=verbose
+ --kill-on-bad-exit
+
+
+
+ srun
+
+ --mpi=none
+ --ntasks={{ total_tasks }}
+ --cpu_bind=sockets
+ --cpu_bind=verbose
+ --kill-on-bad-exit
+
+
+
+ /etc/profile.d/modules.perl
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /share/apps/modules/Modules/3.2.7/bin/modulecmd perl
+ module
+ module
+
+
+ perl/5.20.7
+ cmake/3.0.0
+ pgi/15.5
+ mpi/mvapich2/1.5.1p1/pgi11.3
+ netcdf/4.1.2/pgi
+
+
+
+ 64M
+
+
diff --git a/machines/euler2/config_machines.xml b/machines/euler2/config_machines.xml
new file mode 100644
index 00000000..4d26fbd5
--- /dev/null
+++ b/machines/euler2/config_machines.xml
@@ -0,0 +1,62 @@
+
+ Euler II Linux Cluster ETH, 24 pes/node, InfiniBand, XeonE5_2680v3, batch system LSF
+ LINUX
+ intel,pgi
+ openmpi,mpich
+ /cluster/work/climate/$USER
+ /cluster/work/climate/cesm/inputdata
+ /cluster/work/climate/cesm/inputdata/atm/datm7
+ /cluster/work/climate/$USER/archive/$CASE
+ /cluster/work/climate/cesm/ccsm_baselines
+ /cluster/work/climate/cesm/tools/cprnc/cprnc
+ 1
+ lsf
+ urs.beyerle -at- env.ethz.ch
+ 24
+ 24
+
+ mpirun
+
+ -hostfile $ENV{PBS_JOBID}
+ -ppn $MAX_MPITASKS_PER_NODE
+ -n {{ total_tasks }}
+
+
+
+ mpirun
+
+
+
+
+ /cluster/apps/modules/init/python.py
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /cluster/apps/modules/bin/modulecmd python
+ module
+ module
+
+
+
+
+ new
+
+
+ intel/2018.1
+
+
+ netcdf/4.3.1
+
+
+ pgi/14.1
+
+
+ mvapich2/1.8.1
+
+
+ open_mpi/1.6.5
+
+
+
+ 64M
+
+
diff --git a/machines/euler3/config_machines.xml b/machines/euler3/config_machines.xml
new file mode 100644
index 00000000..712fa2d9
--- /dev/null
+++ b/machines/euler3/config_machines.xml
@@ -0,0 +1,66 @@
+
+
+ Euler III Linux Cluster ETH, 4 pes/node, Ethernet, XeonE3_1585Lv5, batch system LSF
+ LINUX
+ intel,pgi
+ openmpi,mpich
+ /cluster/work/climate/$USER
+ /cluster/work/climate/cesm/inputdata
+ /cluster/work/climate/cesm/inputdata/atm/datm7
+ /cluster/work/climate/$USER/archive/$CASE
+ /cluster/work/climate/cesm/ccsm_baselines
+ /cluster/work/climate/cesm/tools/cprnc/cprnc
+ 1
+ lsf
+ urs.beyerle -at- env.ethz.ch
+ 4
+ 4
+
+ mpirun
+
+ -hostfile $ENV{PBS_JOBID}
+ -ppn $MAX_MPITASKS_PER_NODE
+ -n {{ total_tasks }}
+
+
+
+ mpirun
+
+
+
+
+ /cluster/apps/modules/init/python.py
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /cluster/apps/modules/bin/modulecmd python
+ module
+ module
+
+
+
+
+ new
+
+
+ interconnect/ethernet
+
+
+ intel/2018.1
+
+
+ netcdf/4.3.1
+
+
+ pgi/14.1
+
+
+ mvapich2/1.8.1
+
+
+ open_mpi/1.6.5
+
+
+
+ 64M
+
+
diff --git a/machines/euler4/config_machines.xml b/machines/euler4/config_machines.xml
new file mode 100644
index 00000000..7a58b7a0
--- /dev/null
+++ b/machines/euler4/config_machines.xml
@@ -0,0 +1,51 @@
+
+
+ Euler IV Linux Cluster ETH, 36 pes/node, InfiniBand, XeonGold_6150, batch system LSF
+ LINUX
+ intel,pgi
+ openmpi,mpich
+ /cluster/work/climate/$USER
+ /cluster/work/climate/cesm/inputdata
+ /cluster/work/climate/cesm/inputdata/atm/datm7
+ /cluster/work/climate/$USER/archive/$CASE
+ /cluster/work/climate/cesm/ccsm_baselines
+ /cluster/work/climate/cesm/tools/cprnc/cprnc
+ 1
+ lsf
+ urs.beyerle -at- env.ethz.ch
+ 36
+ 36
+
+ mpirun
+
+ -hostfile $ENV{PBS_JOBID}
+ -ppn $MAX_MPITASKS_PER_NODE
+ -n {{ total_tasks }}
+
+
+
+ mpirun
+
+
+
+
+ /cluster/apps/modules/init/python.py
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /cluster/apps/modules/bin/modulecmd python
+ module
+ module
+
+
+
+
+ new
+
+
+ intel/2018.1
+
+
+
+ 64M
+
+
diff --git a/machines/fram/config_machines.xml b/machines/fram/config_machines.xml
new file mode 100644
index 00000000..762f3681
--- /dev/null
+++ b/machines/fram/config_machines.xml
@@ -0,0 +1,60 @@
+
+ Lenovo NeXtScale M5, 32-way nodes, dual 16-core Xeon E5-2683@2.10GHz, 64 GiB per node, os is Linux, batch system is SLURM
+ LINUX
+ intel
+ impi, mpi-serial
+ /cluster/work/users/$USER/noresm
+ /cluster/shared/noresm/inputdata
+ /cluster/shared/noresm/inputdata/atm/datm7
+ /cluster/work/users/$USER/archive/$CASE
+ UNSET
+ /cluster/shared/noresm/tools/cprnc/cprnc
+ 8
+ slurm
+ noresmCommunity
+ 32
+ 32
+ TRUE
+
+
+
+
+ mpirun
+
+
+ $ENV{LMOD_PKG}/init/perl
+ $ENV{LMOD_PKG}/init/env_modules_python.py
+ $ENV{LMOD_PKG}/init/csh
+ $ENV{LMOD_PKG}/init/sh
+ $ENV{LMOD_PKG}/libexec/lmod perl
+ $ENV{LMOD_PKG}/libexec/lmod python
+ module
+ module
+
+
+ StdEnv
+ /cluster/shared/noresm/eb_mods/modules/all/
+ CMake/3.23.1-GCCcore-11.3.0
+ Python/3.10.4-GCCcore-11.3.0
+ XML-LibXML/2.0207-GCCcore-11.3.0
+ ESMF/8.4.1-intel-2022a-ParallelIO-2.5.10
+
+
+
+
+ 256M
+ lustre
+ on
+ $ENV{EBROOTESMF}/lib/esmf.mk
+ $ENV{EBROOTESMF}/lib
+ ON
+ SUMMARY
+ 2
+ $ENV{EBROOTPARALLELIO}/lib
+ $ENV{EBROOTPARALLELIO}/include
+ pnetcdf,netcdf,netcdf4p,netcdf4c
+
+
+ -1
+
+
diff --git a/machines/frontera/config_machines.xml b/machines/frontera/config_machines.xml
new file mode 100644
index 00000000..3c4a324b
--- /dev/null
+++ b/machines/frontera/config_machines.xml
@@ -0,0 +1,89 @@
+
+ Intel Xeon Cascade Lake,56 cores, batch system is SLURM
+ LINUX
+ intel
+ impi,mvapich
+ ATM20005
+ $ENV{SCRATCH}
+ /work/02503/edwardsj/CESM/inputdata/
+ /work/02503/edwardsj/CESM/inputdata/lmwg
+ $ENV{SCRATCH}/archive/$CASE
+ /work/02503/edwardsj/CESM/cesm_baselines
+ /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc
+ 4
+ slurm
+ cseg
+ 112
+ 56
+
+ sbcast ${EXEROOT}/cesm.exe /tmp/cesm.exe; ibrun
+
+ -n {{ total_tasks }}
+
+ /tmp/cesm.exe
+
+
+ ibrun
+
+ -n {{ total_tasks }}
+
+
+
+ /opt/apps/lmod/lmod/init/perl
+ /opt/apps/lmod/lmod/init/env_modules_python.py
+ /opt/apps/lmod/lmod/init/sh
+ /opt/apps/lmod/lmod/init/csh
+ /opt/apps/lmod/lmod/libexec/lmod perl
+ /opt/apps/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ TACC
+ /scratch1/projects/compilers/modulefiles
+
+ cmake/3.24.2
+
+
+ impi/19.0.9
+ pnetcdf/1.12.3
+ parallel-netcdf/4.9.0
+
+
+ impi
+ mvapich2-x/2.3
+ pnetcdf/1.12.3
+ parallel-netcdf/4.9.0
+
+
+ netcdf/4.9.0
+
+
+
+ 256M
+ 20
+ /scratch3/projects/csa_gettelman/esmf-8.4.1b02_intel22.2.0_impi22.2.0/lib/libO/Linux.intel.64.intelmpi.default/esmf.mk
+ ON
+ SUMMARY
+ /scratch3/projects/csa_gettelman/intel22.2.0_impi22.2.0/pio2_5_10_avx512
+
+
+ -prepend-rank
+ 4
+ 3
+ 3
+ 3
+ 2
+ enable
+
+
+
+ spread
+ hybrid
+ 0
+ 1
+ 4096
+ 4096
+ mvapich2_ssh
+
+
diff --git a/machines/gaea/config_machines.xml b/machines/gaea/config_machines.xml
new file mode 100644
index 00000000..c9182491
--- /dev/null
+++ b/machines/gaea/config_machines.xml
@@ -0,0 +1,63 @@
+
+ NOAA XE6, os is CNL, 24 pes/node, batch system is PBS
+ CNL
+ pgi
+ mpich
+ /lustre/fs/scratch/Julio.T.Bacmeister
+ /lustre/fs/scratch/Julio.T.Bacmeister/inputdata
+ /lustre/fs/scratch/Julio.T.Bacmeister/inputdata
+ /lustre/fs/scratch/Julio.T.Bacmeister/archive/$CASE
+ UNSET
+ UNSET
+ 8
+ pbs
+ julio -at- ucar.edu
+ 24
+ 24
+
+ aprun
+
+ -j {{ hyperthreading }}
+ -n {{ total_tasks }}
+ -S {{ tasks_per_numa }}
+ -N $MAX_MPITASKS_PER_NODE
+ -d $ENV{OMP_NUM_THREADS}
+
+
+
+ /opt/modules/default/init/perl.pm
+ /opt/modules/default/init/csh
+ /opt/modules/default/init/sh
+ /opt/modules/default/bin/modulecmd perl
+ module
+ module
+
+ PrgEnv-pgi
+ PrgEnv-cray
+ PrgEnv-gnu
+ pgi
+ cray
+
+
+ PrgEnv-pgi
+ pgi pgi/12.5.0
+
+
+ PrgEnv-gnu
+ torque
+
+
+ PrgEnv-cray/4.0.36
+ cce/8.0.2
+
+
+ torque/4.1.3
+ netcdf-hdf5parallel/4.2.0
+ parallel-netcdf/1.2.0
+
+
+
+ 64M
+ 1
+
+
diff --git a/machines/greenplanet-sib29/config_machines.xml b/machines/greenplanet-sib29/config_machines.xml
new file mode 100644
index 00000000..7fadfc50
--- /dev/null
+++ b/machines/greenplanet-sib29/config_machines.xml
@@ -0,0 +1,78 @@
+
+ UCI Linux Cluster; 16 pes/node, batch system is slurm
+ LINUX
+
+
+ intel
+ openmpi
+
+
+
+
+
+ /DFS-L/SCRATCH/moore/$USER/cesm_runs
+ /DFS-L/DATA/moore/cesm/inputdata
+ /DFS-L/DATA/moore/cesm/inputdata
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /DFS-L/DATA/moore/cesm/baselines
+
+ /DFS-L/DATA/moore/cesm/tools/cprnc/cprnc
+ gmake
+ 16
+ slurm
+ mlevy@ucar.edu
+ 16
+ 16
+ FALSE
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ /usr/share/lmod/lmod/init/perl
+ /usr/share/lmod/lmod/init/env_modules_python.py
+ /usr/share/lmod/lmod/init/csh
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/libexec/lmod perl
+ /usr/share/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+
+
+ intel/2018.3
+ netcdf/4.7.0
+
+
+ openmpi/3.1.6
+ pnetcdf/1.10.0
+
+
+
+
+ 256M
+ 16
+
+
+
+ -1
+
+
diff --git a/machines/greenplanet-sky24/config_machines.xml b/machines/greenplanet-sky24/config_machines.xml
new file mode 100644
index 00000000..9b46fb06
--- /dev/null
+++ b/machines/greenplanet-sky24/config_machines.xml
@@ -0,0 +1,79 @@
+
+ UCI Linux Cluster; 40 pes/node, batch system is slurm
+ LINUX
+
+
+ intel
+ openmpi
+
+
+
+
+
+ /DFS-L/SCRATCH/moore/$USER/cesm_runs
+ /DFS-L/DATA/moore/cesm/inputdata
+ /DFS-L/DATA/moore/cesm/inputdata
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /DFS-L/DATA/moore/cesm/baselines
+
+ /DFS-L/DATA/moore/cesm/tools/cprnc/cprnc
+ gmake
+ 16
+ slurm
+ mlevy@ucar.edu
+ 40
+ 40
+ FALSE
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ /usr/share/lmod/lmod/init/perl
+ /usr/share/lmod/lmod/init/env_modules_python.py
+ /usr/share/lmod/lmod/init/csh
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/libexec/lmod perl
+ /usr/share/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+
+
+ intel/2018.3
+ netcdf/4.7.0
+
+
+ openmpi/3.1.6
+ pnetcdf/1.10.0
+
+
+
+
+ 256M
+ 16
+
+
+
+ -1
+
+
+
diff --git a/machines/gust/config_machines.xml b/machines/gust/config_machines.xml
new file mode 100644
index 00000000..7998a3d3
--- /dev/null
+++ b/machines/gust/config_machines.xml
@@ -0,0 +1,110 @@
+
+ NCAR AMD EPYC test system 16 CPU nodes 2 GPU nodes
+ CNL
+ intel,gnu,cray,nvhpc,intel-oneapi,intel-classic
+ mpich
+ $ENV{SCRATCH}
+ /glade/p/cesmdata/inputdata
+ /glade/p/cesmdata/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /glade/p/cesmdata/ccsm_baselines
+ /glade/p/cesmdata/cprnc/cprnc
+ 16
+ pbs
+ cseg
+ 128
+ 4
+ 128
+ 64
+ none,a100
+ none,openacc,openmp,combined
+ TRUE
+
+ mpiexec
+
+ --label
+ -n {{ total_tasks }}
+
+
+
+ $ENV{LMOD_ROOT}/lmod/init/perl
+ $ENV{LMOD_ROOT}/lmod/init/env_modules_python.py
+ $ENV{LMOD_ROOT}/lmod/init/sh
+ $ENV{LMOD_ROOT}/lmod/init/csh
+ $ENV{LMOD_ROOT}/lmod/libexec/lmod perl
+ $ENV{LMOD_ROOT}/lmod/libexec/lmod python
+ module
+ module
+
+ cesmdev/1.0
+ ncarenv/23.04
+
+ craype
+
+
+ intel/2023.0.0
+ mkl
+
+
+ intel-oneapi/2023.0.0
+ mkl
+
+
+ intel-classic/2023.0.0
+ mkl
+
+
+ cce/15.0.1
+ cray-libsci/23.02.1.1
+
+
+ gcc/12.2.0
+ cray-libsci/23.02.1.1
+
+
+ nvhpc/23.1
+
+
+ ncarcompilers/0.8.0
+ cmake
+
+
+ cray-mpich/8.1.25
+
+
+ mpi-serial/2.3.0
+
+
+
+ netcdf/4.9.1
+
+
+
+ netcdf-mpi/4.9.1
+ parallel-netcdf/1.12.3
+
+
+
+ parallelio/2.5.10-debug
+ esmf/8.5.0b21-debug
+
+
+
+ parallelio/2.5.10
+ esmf/8.5.0b21
+
+
+
+
+ 64M
+ hybrid
+
+
+ *:romio_cb_read=enable:romio_cb_write=enable:striping_factor=2
+
+
+
+ ON
+ SUMMARY
+
+
diff --git a/machines/homebrew/config_machines.xml b/machines/homebrew/config_machines.xml
new file mode 100644
index 00000000..95adb826
--- /dev/null
+++ b/machines/homebrew/config_machines.xml
@@ -0,0 +1,51 @@
+
+
+
+ Customize these fields as appropriate for your system,
+ particularly changing MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE to the
+ number of cores on your machine. You may also want to change
+ instances of '$ENV{HOME}/projects' to your desired directory
+ organization. You can use this in either of two ways: (1)
+ Without making any changes, by adding `--machine homebrew` to
+ create_newcase or create_test (2) Copying this into a
+ config_machines.xml file in your personal .cime directory and
+ then changing the machine name (MACH="homebrew") to
+ your machine name and the NODENAME_REGEX to something matching
+ your machine's hostname. With (2), you should not need the
+ `--machine` argument, because the machine should be determined
+ automatically. However, with (2), you will also need to copy the
+ homebrew-specific settings in config_compilers.xml into a
+ config_compilers.xml file in your personal .cime directory, again
+ changing the machine name (MACH="homebrew") to your machine name.
+
+
+
+ Darwin
+ gnu
+ mpich
+ $ENV{HOME}/projects/scratch
+ $ENV{HOME}/projects/cesm-inputdata
+ $ENV{HOME}/projects/ptclm-data
+ $ENV{HOME}/projects/scratch/archive/$CASE
+ $ENV{HOME}/projects/baselines
+ $ENV{HOME}/cesm/tools/cprnc/cprnc
+ make
+ 4
+ none
+ __YOUR_NAME_HERE__
+ 8
+ 4
+
+ mpirun
+
+ -np {{ total_tasks }}
+ -prepend-rank
+
+
+
+
+ /usr/local
+
+
diff --git a/machines/izumi/config_machines.xml b/machines/izumi/config_machines.xml
new file mode 100644
index 00000000..10a0e078
--- /dev/null
+++ b/machines/izumi/config_machines.xml
@@ -0,0 +1,133 @@
+
+ NCAR CGD Linux Cluster 48 pes/node, batch system is PBS
+ LINUX
+ intel,nag,gnu
+ mvapich2,openmpi
+ /scratch/cluster/$USER
+ /fs/cgd/csm/inputdata
+ /project/tss
+ /scratch/cluster/$USER/archive/$CASE
+ /fs/cgd/csm/ccsm_baselines
+ /fs/cgd/csm/tools/cime/tools/cprnc/cprnc
+ gmake --output-sync
+ 4
+ pbs
+ cseg
+ 48
+ 48
+
+ mpiexec
+
+ --machinefile $ENV{PBS_NODEFILE}
+ -n {{ total_tasks }}
+
+
+
+ mpiexec
+
+ -n {{ total_tasks }}
+ --tag-output
+
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/sh
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+ lang/python/3.11.5
+ /fs/cgd/data0/modules/modulefiles
+
+
+ compiler/gnu/9.3.0
+ tool/netcdf/4.7.4/gnu/9.3.0
+
+
+ openmpi/4.0.3/gnu/9.3.0
+
+
+ mpi/2.3.3/gnu/9.3.0
+
+
+ compiler/intel/20.0.1
+ tool/netcdf/4.7.4/intel/20.0.1
+
+
+ mpi/2.3.3/intel/20.0.1
+
+
+ compiler/nag/6.2-8.1.0
+ tool/netcdf/c4.6.1-f4.4.4/nag-gnu/6.2-8.1.0
+
+
+ mpi/2.3.3/nag/6.2
+
+
+ mpi
+
+
+ esmfpkgs/gfortran/9.3.0/esmf-8.5.0-ncdfio-mvapich2-O
+ mvapich2/2.3.3/gnu/9.3.0/pio/2_5_10
+
+
+ esmfpkgs/gfortran/9.3.0/esmf-8.5.0-ncdfio-mvapich2-g
+ mvapich2/2.3.3/gnu/9.3.0/pio/2_5_10
+
+
+ esmfpkgs/gfortran/9.3.0/esmf-8.5.0-ncdfio-mpiuni-O
+ mpi-serial/2.3.0/gnu/9.3.0/pio/2_5_10
+
+
+ esmfpkgs/gfortran/9.3.0/esmf-8.5.0-ncdfio-mpiuni-g
+ mpi-serial/2.3.0/gnu/9.3.0/pio/2_5_10
+
+
+
+ esmfpkgs/nag/6.2/esmf-8.5.0-ncdfio-mvapich2-O
+ mvapich2/2.3.3/nag/6.2/pio/2_5_10
+
+
+ esmfpkgs/nag/6.2/esmf-8.5.0-ncdfio-mvapich2-g
+ mvapich2/2.3.3/nag/6.2/pio/2_5_10
+
+
+ esmfpkgs/nag/6.2/esmf-8.5.0-ncdfio-mpiuni-g
+ mpi-serial/2.3.0/nag/6.2/pio/2_5_10
+
+
+ esmfpkgs/nag/6.2/esmf-8.5.0-ncdfio-mpiuni-O
+ mpi-serial/2.3.0/nag/6.2/pio/2_5_10
+
+
+
+ esmfpkgs/intel/20.0.1/esmf-8.5.0-ncdfio-mpiuni-g
+ mpi-serial/2.3.0/intel/20.0.1/pio/2_5_10
+
+
+ esmfpkgs/intel/20.0.1/esmf-8.5.0-ncdfio-mpiuni-O
+ mpi-serial/2.3.0/intel/20.0.1/pio/2_5_10
+
+
+ esmfpkgs/intel/20.0.1/esmf-8.5.0-ncdfio-mvapich2-g
+ mvapich2/2.3.3/intel/20.0.1/pio/2_5_10
+
+
+ esmfpkgs/intel/20.0.1/esmf-8.5.0-ncdfio-mvapich2-O
+ mvapich2/2.3.3/intel/20.0.1/pio/2_5_10
+
+
+
+ 64M
+
+ $ENV{PATH}:/cluster/torque/bin
+
+
+ -1
+
+
+
diff --git a/machines/lawrencium-lr3/config_machines.xml b/machines/lawrencium-lr3/config_machines.xml
new file mode 100644
index 00000000..68af79ed
--- /dev/null
+++ b/machines/lawrencium-lr3/config_machines.xml
@@ -0,0 +1,53 @@
+
+
+ Lawrencium LR3 cluster at LBL, OS is Linux (intel), batch system is SLURM
+ LINUX
+ intel
+ openmpi
+ /global/scratch/$ENV{USER}
+ /global/scratch/$ENV{USER}/cesm_input_datasets/
+ /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7
+ $CIME_OUTPUT_ROOT/cesm_archive/$CASE
+ $CIME_OUTPUT_ROOT/cesm_baselines
+ /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc
+ 4
+ slurm
+ rgknox at lbl dot gov and glemieux at lbl dot gov
+ 16
+ 16
+ TRUE
+
+ mpirun
+
+ -np {{ total_tasks }}
+ -npernode $MAX_MPITASKS_PER_NODE
+
+
+
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /usr/Modules/init/perl.pm
+ /usr/Modules/python.py
+ module
+ module
+ /usr/Modules/bin/modulecmd perl
+ /usr/Modules/bin/modulecmd python
+
+
+ cmake
+ perl xml-libxml switch python/3.6
+
+
+ intel/2016.4.072
+ mkl
+
+
+ netcdf/4.4.1.1-intel-s
+
+
+ openmpi
+ netcdf/4.4.1.1-intel-p
+
+
+
+
diff --git a/machines/lobata/config_machines.xml b/machines/lobata/config_machines.xml
new file mode 100644
index 00000000..bdca2528
--- /dev/null
+++ b/machines/lobata/config_machines.xml
@@ -0,0 +1,44 @@
+
+ FATES development machine at LBNL, System76 Thelio Massive Workstation Pop!_OS 20.04
+ LINUX
+ gnu
+ openmpi
+ $ENV{HOME}/scratch/
+ /data/cesmdataroot/inputdata
+ /data/cesmdataroot/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{HOME}/scratch/ctsm-baselines
+ /home/glemieux/Repos/cime/tools/cprnc/cprnc
+ make
+ 16
+ none
+ glemieux at lbl dot gov
+ 4
+ 4
+ FALSE
+
+ mpirun
+
+ -np {{ total_tasks }}
+ --map-by ppr:{{ tasks_per_node }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread
+
+
+
+ /usr/share/modules/init/python.py
+ /usr/share/modules/init/perl.pm
+ /usr/share/modules/init/sh
+ /usr/share/modules/init/csh
+ /usr/bin/modulecmd python
+ /usr/bin/modulecmd perl
+ module
+ module
+
+
+ hdf5
+ netcdf-c
+ netcdf-fortran
+ esmf
+
+
+
+
diff --git a/machines/lonestar5/config_machines.xml b/machines/lonestar5/config_machines.xml
new file mode 100644
index 00000000..6c9f368f
--- /dev/null
+++ b/machines/lonestar5/config_machines.xml
@@ -0,0 +1,54 @@
+
+ Lonestar5 cluster at TACC, OS is Linux (intel), batch system is SLURM
+ LINUX
+ intel
+ mpich
+ $ENV{SCRATCH}
+ /work/02503/edwardsj/CESM/inputdata
+ /work/02503/edwardsj/CESM/inputdata/lmwg
+ $CIME_OUTPUT_ROOT/cesm_archive/$CASE
+ /work/02503/edwardsj/CESM/cesm_baselines
+ /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc
+ 4
+ slurm
+ cseg
+ 48
+ 24
+ FALSE
+
+ srun
+
+ --ntasks={{ total_tasks }}
+
+
+
+
+ /opt/apps/lmod/lmod/init/perl
+ /opt/apps/lmod/lmod/init/env_modules_python.py
+ /opt/apps/lmod/lmod/init/sh
+ /opt/apps/lmod/lmod/init/csh
+ /opt/apps/lmod/lmod/libexec/lmod perl
+ /opt/apps/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+
+ cmake
+
+
+ intel/18.0.2
+
+
+ netcdf/4.6.2
+
+
+ cray_mpich
+
+
+ pnetcdf/1.8.0
+ parallel-netcdf/4.6.2
+
+
+
+
diff --git a/machines/melvin/config_machines.xml b/machines/melvin/config_machines.xml
new file mode 100644
index 00000000..4439d06b
--- /dev/null
+++ b/machines/melvin/config_machines.xml
@@ -0,0 +1,70 @@
+
+ Linux workstation for Jenkins testing
+ LINUX
+ sonproxy.sandia.gov:80
+ gnu
+ openmpi
+ /sems-data-store/ACME/timings
+ $ENV{HOME}/acme/scratch
+ /sems-data-store/ACME/inputdata
+ /sems-data-store/ACME/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /sems-data-store/ACME/baselines
+ /sems-data-store/ACME/cprnc/build/cprnc
+ make
+ 32
+ acme_developer
+ none
+ jgfouca at sandia dot gov
+ 64
+ 64
+
+ mpirun
+
+ -np {{ total_tasks }}
+ --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread
+
+
+
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/bin/modulecmd python
+ /usr/bin/modulecmd perl
+ module
+ module
+
+
+ sems-env
+ acme-env
+ sems-git
+ sems-python/2.7.9
+ sems-cmake/2.8.12
+
+
+ sems-gcc/5.3.0
+
+
+ sems-intel/16.0.3
+
+
+ sems-netcdf/4.4.1/exo
+ acme-pfunit/3.2.8/base
+
+
+ sems-openmpi/1.8.7
+ sems-netcdf/4.4.1/exo_parallel
+
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+ 64M
+ spread
+ threads
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+
+
+
diff --git a/machines/modex/config_machines.xml b/machines/modex/config_machines.xml
new file mode 100644
index 00000000..e3d64699
--- /dev/null
+++ b/machines/modex/config_machines.xml
@@ -0,0 +1,57 @@
+
+ Medium sized linux cluster at BNL, torque scheduler.
+ LINUX
+ gnu
+ openmpi,mpi-serial
+ /data/$ENV{USER}
+ /data/Model_Data/cesm_input_datasets/
+ /data/Model_Data/cesm_input_datasets/atm/datm7
+ $CIME_OUTPUT_ROOT/cesm_archive/$CASE
+ $CIME_OUTPUT_ROOT/cesm_baselines
+ /data/software/cesm_tools/cprnc/cprnc
+ 4
+ pbs
+ rgknox at lbl dot gov and sserbin at bnl gov
+ 12
+ 12
+ 12
+ FALSE
+
+ mpirun
+
+ -np {{ total_tasks }}
+ -npernode $MAX_TASKS_PER_NODE
+
+
+
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ module
+ module
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+
+
+ perl/5.22.1
+ libxml2/2.9.2
+ maui/3.3.1
+ python/2.7.13
+
+
+ gcc/5.4.0
+ gfortran/5.4.0
+ hdf5/1.8.19fates
+ netcdf/4.4.1.1-gnu540-fates
+ openmpi/2.1.1-gnu540
+
+
+ openmpi/2.1.1-gnu540
+
+
+
+ /data/software/hdf5/1.8.19fates
+ /data/software/netcdf/4.4.1.1-gnu540-fates
+
+
diff --git a/machines/mpi_run_gpu.casper b/machines/mpi_run_gpu.casper
deleted file mode 100755
index ade12d50..00000000
--- a/machines/mpi_run_gpu.casper
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-unset CUDA_VISIBLE_DEVICES
-let dev_id=$OMPI_COMM_WORLD_LOCAL_RANK%{{ ngpus_per_node }}
-export ACC_DEVICE_NUM=$dev_id
-export CUDA_VISIBLE_DEVICES=$dev_id
-exec $*
diff --git a/machines/olympus/config_machines.xml b/machines/olympus/config_machines.xml
new file mode 100644
index 00000000..db7da51c
--- /dev/null
+++ b/machines/olympus/config_machines.xml
@@ -0,0 +1,45 @@
+
+ PNL cluster, os is Linux (pgi), batch system is SLURM
+ LINUX
+ pgi
+ mpich
+ /pic/scratch/$USER
+ /pic/scratch/tcraig/IRESM/inputdata
+ /pic/scratch/tcraig/IRESM/inputdata/atm/datm7
+ /pic/scratch/$USER/archive/$CASE
+ /pic/scratch/tcraig/IRESM/ccsm_baselines
+ /pic/scratch/tcraig/IRESM/tools/cprnc/cprnc
+ 8
+ slurm
+ tcraig -at- ucar.edu
+ 32
+ 32
+ FALSE
+
+ mpiexec_mpt
+
+ --mpi=none
+ -n={{ total_tasks }}
+ --kill-on-bad-exit
+
+
+
+ /share/apps/modules/Modules/3.2.7/init/perl.pm
+ /share/apps/modules/Modules/3.2.7/init/csh
+ /share/apps/modules/Modules/3.2.7/init/sh
+ /share/apps/modules/Modules/3.2.7/bin/modulecmd perl
+ module
+ module
+
+
+ precision/i4
+ pgi/11.8
+ mvapich2/1.7
+ netcdf/4.1.3
+
+
+
+ 64M
+
+
+
diff --git a/machines/perlmutter/config_machines.xml b/machines/perlmutter/config_machines.xml
new file mode 100644
index 00000000..a89159b8
--- /dev/null
+++ b/machines/perlmutter/config_machines.xml
@@ -0,0 +1,94 @@
+
+ NERSC EX AMD EPYC, os is CNL, 64 pes/node, batch system is Slurm
+ CNL
+ gnu,cray,nvidia,aocc
+ mpich
+ mp9_g
+ $ENV{SCRATCH}
+ /global/cfs/cdirs/ccsm1/inputdata
+ /global/cfs/cdirs/ccsm1/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /global/cfs/cdirs/ccsm1/ccsm_baselines
+ /global/cfs/cdirs/ccsm1/tools/cprnc.perlmutter/cprnc
+ 8
+ slurm
+ cseg
+ 128
+ 4
+ 64
+ TRUE
+
+ srun
+
+ --label
+ -n {{ total_tasks }}
+ -c {{ srun_binding }}
+
+
+
+ /usr/share/lmod/lmod/init/perl
+ /usr/share/lmod/lmod/init/env_modules_python.py
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/init/csh
+ /usr/share/lmod/lmod/libexec/lmod perl
+ /usr/share/lmod/lmod/libexec/lmod python
+ module
+ module
+
+ PrgEnv-nvidia
+ PrgEnv-cray
+ PrgEnv-aocc
+ PrgEnv-gnu
+ nvidia
+ cce
+ gnu
+ aocc
+ cray-parallel-netcdf
+ cray-hdf5-parallel
+ cray-libsci
+ cray-mpich
+ cray-hdf5
+ cray-netcdf-hdf5parallel
+ cray-netcdf
+ craype
+
+
+
+ PrgEnv-cray
+ cce cce/12.0.3
+
+
+ PrgEnv-gnu
+ gcc gcc/11.2.0
+
+
+ craype craype/2.7.10
+
+
+ cray-libsci/21.08.1.2
+
+
+ cray-mpich/8.1.9
+
+
+ cray-netcdf-hdf5parallel
+ cray-hdf5-parallel
+ cray-parallel-netcdf
+ cray-hdf5/1.12.0.7
+ cray-netcdf/4.7.4.7
+
+
+ cray-hdf5-parallel/1.12.0.7
+ cray-netcdf-hdf5parallel/4.7.4.7
+ cray-parallel-netcdf/1.12.1.7
+
+
+ cmake/3.20.5
+
+
+
+ 256M
+
+
+
diff --git a/machines/pleiades-bro/config_machines.xml b/machines/pleiades-bro/config_machines.xml
new file mode 100644
index 00000000..333e06d4
--- /dev/null
+++ b/machines/pleiades-bro/config_machines.xml
@@ -0,0 +1,54 @@
+
+ NASA/AMES Linux Cluster, Linux (ia64), 2.4 GHz Broadwell Intel Xeon E5-2680v4 processors, 28 pes/node (two 14-core processors) and 128 GB of memory/node, batch system is PBS
+ LINUX
+ intel
+ mpt
+ /nobackup/$USER
+ /nobackup/fvitt/csm/inputdata
+ /nobackup/fvitt/csm/inputdata/atm/datm7
+ /nobackup/$USER/archive/$CASE
+ /nobackup/fvitt/cesm_baselines
+ /u/fvitt/bin/cprnc
+ 8
+ pbs
+ fvitt -at- ucar.edu
+ 28
+ 28
+ TRUE
+
+ mpiexec_mpt
+
+ -n {{ total_tasks }}
+
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/python.py
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+ nas
+ pkgsrc
+ python3
+ comp-intel/2020.4.304
+ mpi-hpe/mpt.2.25
+ szip/2.1.1
+ hdf4/4.2.12
+ hdf5/1.8.18_mpt
+ netcdf/4.4.1.1_mpt
+
+
+
+ 1024
+ 100000
+ 16
+ 256M
+ /home6/fvitt/ESMFv8.4.1/lib/libO/Linux.intel.64.mpt.default/esmf.mk
+
+
+
diff --git a/machines/pleiades-has/config_machines.xml b/machines/pleiades-has/config_machines.xml
new file mode 100644
index 00000000..9c274c94
--- /dev/null
+++ b/machines/pleiades-has/config_machines.xml
@@ -0,0 +1,53 @@
+
+ NASA/AMES Linux Cluster, Linux (ia64), 2.5 GHz Haswell Intel Xeon E5-2680v3 processors, 24 pes/node (two 12-core processors) and 128 GB of memory/node, batch system is PBS
+ LINUX
+ intel
+ mpt
+ /nobackup/$USER
+ /nobackup/fvitt/csm/inputdata
+ /nobackup/fvitt/csm/inputdata/atm/datm7
+ /nobackup/$USER/archive/$CASE
+ /nobackup/fvitt/cesm_baselines
+ /u/fvitt/bin/cprnc
+ 8
+ pbs
+ fvitt -at- ucar.edu
+ 24
+ 24
+ TRUE
+
+ mpiexec_mpt
+
+ -n {{ total_tasks }}
+
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/python.py
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+ nas
+ pkgsrc
+ python3
+ comp-intel/2020.4.304
+ mpi-hpe/mpt.2.25
+ szip/2.1.1
+ hdf4/4.2.12
+ hdf5/1.8.18_mpt
+ netcdf/4.4.1.1_mpt
+
+
+
+ 1024
+ 100000
+ 16
+ 256M
+ /home6/fvitt/ESMFv8.4.1/lib/libO/Linux.intel.64.mpt.default/esmf.mk
+
+
diff --git a/machines/pleiades-ivy/config_machines.xml b/machines/pleiades-ivy/config_machines.xml
new file mode 100644
index 00000000..cf231792
--- /dev/null
+++ b/machines/pleiades-ivy/config_machines.xml
@@ -0,0 +1,53 @@
+
+ NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.8 GHz Ivy Bridge processors, 20 cores/node and 3.2 GB of memory per core, batch system is PBS
+ LINUX
+ intel
+ mpich
+ /nobackup/$USER
+ /nobackup/fvitt/csm/inputdata
+ /nobackup/fvitt/csm/inputdata/atm/datm7
+ /nobackup/$USER/archive/$CASE
+ /nobackup/fvitt/cesm_baselines
+ /u/fvitt/bin/cprnc
+ 8
+ pbs
+ fvitt -at- ucar.edu
+ 20
+ 20
+ TRUE
+
+ mpiexec_mpt
+
+ -n {{ total_tasks }}
+
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/python.py
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+ nas
+ pkgsrc
+ python3
+ comp-intel/2020.4.304
+ mpi-hpe/mpt.2.25
+ szip/2.1.1
+ hdf4/4.2.12
+ hdf5/1.8.18_mpt
+ netcdf/4.4.1.1_mpt
+
+
+
+ 1024
+ 100000
+ 16
+ 256M
+ /home6/fvitt/ESMFv8.4.1/lib/libO/Linux.intel.64.mpt.default/esmf.mk
+
+
diff --git a/machines/pleiades-san/config_machines.xml b/machines/pleiades-san/config_machines.xml
new file mode 100644
index 00000000..8f484578
--- /dev/null
+++ b/machines/pleiades-san/config_machines.xml
@@ -0,0 +1,54 @@
+
+ NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.6 GHz Sandy Bridge processors, 16 cores/node and 32 GB of memory, batch system is PBS
+ LINUX
+ intel
+ mpt
+ /nobackup/$USER
+ /nobackup/fvitt/csm/inputdata
+ /nobackup/fvitt/csm/inputdata/atm/datm7
+ /nobackup/$USER/archive/$CASE
+ /nobackup/fvitt/cesm_baselines
+ /u/fvitt/bin/cprnc
+ 8
+ pbs
+ fvitt -at- ucar.edu
+ 16
+ 16
+ TRUE
+
+ mpiexec_mpt
+
+ -n {{ total_tasks }}
+
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/python.py
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+ nas
+ pkgsrc
+ python3
+ comp-intel/2020.4.304
+ mpi-hpe/mpt.2.25
+ szip/2.1.1
+ hdf4/4.2.12
+ hdf5/1.8.18_mpt
+ netcdf/4.4.1.1_mpt
+
+
+
+ 1024
+ 100000
+ 16
+ 256M
+ /home6/fvitt/ESMFv8.4.1/lib/libO/Linux.intel.64.mpt.default/esmf.mk
+
+
+
diff --git a/machines/saga/config_machines.xml b/machines/saga/config_machines.xml
new file mode 100644
index 00000000..fa3592c1
--- /dev/null
+++ b/machines/saga/config_machines.xml
@@ -0,0 +1,46 @@
+
+ Hewlett Packard Enterprise - CentOS Linux release 7.6.1810 (Core)
+ LINUX
+ intel
+ impi
+ /cluster/work/users/$USER/noresm
+ /cluster/shared/noresm/inputdata
+ /cluster/shared/noresm/inputdata/atm/datm7
+ /cluster/work/users/$USER/archive/$CASE
+ UNSET
+ UNSET
+ 8
+ slurm
+ noresmCommunity
+ 40
+ 40
+ TRUE
+
+ mpirun
+
+
+ $ENV{LMOD_PKG}/init/env_modules_python.py
+ $ENV{LMOD_PKG}/init/sh
+ $ENV{LMOD_PKG}/libexec/lmod python
+ module
+
+
+ StdEnv
+ CMake/3.22.1-GCCcore-11.2.0
+ XML-LibXML/2.0207-GCCcore-11.2.0
+ ESMF/8.2.0-intel-2021b
+
+
+
+ 256M
+ lustre
+ on
+ $ENV{EBROOTESMF}/lib/esmf.mk
+ $ENV{EBROOTESMF}/lib
+ ON
+ SUMMARY
+
+
+ -1
+
+
diff --git a/machines/sandia-srn-sems/config_machines.xml b/machines/sandia-srn-sems/config_machines.xml
new file mode 100644
index 00000000..8ead7d8f
--- /dev/null
+++ b/machines/sandia-srn-sems/config_machines.xml
@@ -0,0 +1,52 @@
+
+
+ Linux workstation at Sandia on SRN with SEMS TPL modules
+ LINUX
+ wwwproxy.sandia.gov:80
+ gnu
+ openmpi
+ /sems-data-store/ACME/timings
+ $ENV{HOME}/acme/scratch
+ /sems-data-store/ACME/inputdata
+ /sems-data-store/ACME/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /sems-data-store/ACME/baselines
+ /sems-data-store/ACME/cprnc/build/cprnc
+ make
+ 32
+ acme_developer
+ none
+ jgfouca at sandia dot gov
+ 64
+ 64
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/bin/modulecmd python
+ /usr/bin/modulecmd perl
+ module
+ module
+
+
+ sems-env
+ sems-git
+ sems-python/2.7.9
+ sems-gcc/5.1.0
+ sems-openmpi/1.8.7
+ sems-cmake/2.8.12
+ sems-netcdf/4.3.2/parallel
+
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+ $ENV{SEMS_NETCDF_ROOT}
+
+
diff --git a/machines/sandiatoss3/config_machines.xml b/machines/sandiatoss3/config_machines.xml
new file mode 100644
index 00000000..0885838a
--- /dev/null
+++ b/machines/sandiatoss3/config_machines.xml
@@ -0,0 +1,63 @@
+
+ SNL clust
+ LINUX
+ wwwproxy.sandia.gov:80
+ intel
+ openmpi
+ /projects/ccsm/timings
+ /gscratch/$USER/acme_scratch/$MACH
+ /projects/ccsm/inputdata
+ /projects/ccsm/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /projects/ccsm/ccsm_baselines
+ /projects/ccsm/cprnc/build/cprnc_wrap
+ 8
+ acme_integration
+ slurm
+ jgfouca at sandia dot gov
+ 16
+ 16
+ TRUE
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+ -npernode $MAX_MPITASKS_PER_NODE
+
+
+
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/bin/modulecmd python
+ /usr/bin/modulecmd perl
+ module
+ module
+
+
+ sems-env
+ sems-git
+ sems-python/2.7.9
+ gnu/4.9.2
+ intel/intel-15.0.3.187
+ libraries/intel-mkl-15.0.2.164
+ libraries/intel-mkl-15.0.2.164
+
+
+ openmpi-intel/1.8
+ sems-hdf5/1.8.12/parallel
+ sems-netcdf/4.3.2/parallel
+ sems-hdf5/1.8.12/base
+ sems-netcdf/4.3.2/base
+
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+ 64M
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+
+
diff --git a/machines/stampede2-knl/config_machines.xml b/machines/stampede2-knl/config_machines.xml
new file mode 100644
index 00000000..c72ab44b
--- /dev/null
+++ b/machines/stampede2-knl/config_machines.xml
@@ -0,0 +1,57 @@
+
+ Intel Xeon Phi 7250 ("Knights Landing") , batch system is SLURM
+ LINUX
+ intel
+ impi,mvapich2
+ $ENV{SCRATCH}
+ /work/02503/edwardsj/CESM/inputdata
+ /work/02503/edwardsj/CESM/inputdata/lmwg
+ $ENV{WORK}/archive/$CASE
+ /work/02503/edwardsj/CESM/cesm_baselines
+ /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc
+ 4
+ slurm
+ cseg
+ 256
+ 64
+
+ ibrun
+
+
+ ibrun
+
+
+ /opt/apps/lmod/lmod/init/perl
+ /opt/apps/lmod/lmod/init/env_modules_python.py
+ /opt/apps/lmod/lmod/init/sh
+ /opt/apps/lmod/lmod/init/csh
+ /opt/apps/lmod/lmod/libexec/lmod perl
+ /opt/apps/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ TACC
+ python/2.7.13
+ intel/18.0.2
+ cmake/3.16.1
+
+
+ mvapich2/2.3.1
+ pnetcdf/1.11
+ parallel-netcdf/4.6.2
+
+
+ mvapich2
+ impi/18.0.2
+ pnetcdf/1.11
+ parallel-netcdf/4.6.2
+
+
+ netcdf/4.3.3.1
+
+
+
+ 256M
+
+
diff --git a/machines/stampede2-skx/config_machines.xml b/machines/stampede2-skx/config_machines.xml
new file mode 100644
index 00000000..21d5c853
--- /dev/null
+++ b/machines/stampede2-skx/config_machines.xml
@@ -0,0 +1,76 @@
+
+
+ Intel Xeon Platinum 8160 ("Skylake"),48 cores on two sockets (24 cores/socket) , batch system is SLURM
+ LINUX
+ intel
+ impi,mvapich2
+ $ENV{SCRATCH}
+ /work/02503/edwardsj/CESM/inputdata
+ /work/02503/edwardsj/CESM/inputdata/lmwg
+ $ENV{WORK}/archive/$CASE
+ /work/02503/edwardsj/CESM/cesm_baselines
+ /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc
+ 4
+ slurm
+ cseg
+ 96
+ 48
+
+ ibrun
+
+ -n {{ total_tasks }}
+
+
+
+ ibrun
+
+ -n {{ total_tasks }}
+
+
+
+ /opt/apps/lmod/lmod/init/perl
+ /opt/apps/lmod/lmod/init/env_modules_python.py
+ /opt/apps/lmod/lmod/init/sh
+ /opt/apps/lmod/lmod/init/csh
+ /opt/apps/lmod/lmod/libexec/lmod perl
+ /opt/apps/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ TACC
+ python/2.7.13
+ intel/18.0.2
+ cmake/3.16.1
+
+
+ mvapich2/2.3.1
+ pnetcdf/1.11
+ parallel-netcdf/4.6.2
+
+
+ mvapich2
+ impi/18.0.2
+ pnetcdf/1.11
+ parallel-netcdf/4.6.2
+
+
+ netcdf/4.3.3.1
+
+
+
+ 256M
+
+
+ /work/01118/tg803972/stampede2/ESMF-INSTALL/8.0.0bs38/lib/libO/Linux.intel.64.intelmpi.default/esmf.mk
+
+
+ ON
+ SUMMARY
+ /work/06242/tg855414/stampede2/FV3GFS/benchmark-inputs/2012010100/gfs/fcst
+ /work/06242/tg855414/stampede2/FV3GFS/fix_am
+ /work/06242/tg855414/stampede2/FV3GFS/addon
+
+
+
+
diff --git a/machines/theia/config_machines.xml b/machines/theia/config_machines.xml
new file mode 100644
index 00000000..2add8964
--- /dev/null
+++ b/machines/theia/config_machines.xml
@@ -0,0 +1,54 @@
+
+ theia
+ LINUX
+ intel
+ impi
+ nems
+
+ /scratch4/NCEPDEV/nems/noscrub/$USER/cimecases
+ /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata
+ /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/BASELINES
+ /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/tools/cprnc
+ make
+ 8
+ slurm
+ cseg
+ 24
+ 24
+ TRUE
+
+ srun
+
+ -n $TOTALPES
+
+
+
+
+
+
+ /apps/lmod/lmod/init/sh
+ /apps/lmod/lmod/init/csh
+ module
+ module
+ /apps/lmod/lmod/libexec/lmod python
+
+
+ intel/15.1.133
+ impi/5.1.1.109
+ netcdf/4.3.0
+ pnetcdf
+ /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/soft/modulefiles
+ yaml-cpp
+ esmf/8.0.0bs29g
+
+
+
+ ON
+ SUMMARY
+ /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/benchmark-inputs/2012010100/gfs/fcst
+ /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/fix_am
+ /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/addon
+
+
diff --git a/machines/theta/config_machines.xml b/machines/theta/config_machines.xml
new file mode 100644
index 00000000..a245e621
--- /dev/null
+++ b/machines/theta/config_machines.xml
@@ -0,0 +1,87 @@
+
+ ALCF Cray XC* KNL, os is CNL, 64 pes/node, batch system is cobalt
+ CNL
+ intel,gnu,cray
+ mpt
+ CESM_Highres_Testing
+ /projects/CESM_Highres_Testing/cesm/scratch/$USER
+ /projects/CESM_Highres_Testing/cesm/inputdata
+ /projects/CESM_Highres_Testing/cesm/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /projects/CESM_Highres_Testing/cesm/baselines
+ /projects/CESM_Highres_Testing/cesm/tools/cprnc/cprnc
+ 8
+ cobalt_theta
+ cseg
+ 64
+ 64
+ TRUE
+
+ aprun
+
+ -n {{ total_tasks }}
+ -N {{ tasks_per_node }}
+ --cc depth -d $OMP_NUM_THREADS
+ -e OMP_STACKSIZE=64M
+ -e OMP_NUM_THREADS=$OMP_NUM_THREADS
+
+
+
+ /opt/modules/default/init/perl.pm
+ /opt/modules/default/init/python.py
+ /opt/modules/default/init/sh
+ /opt/modules/default/init/csh
+ /opt/modules/default/bin/modulecmd perl
+ /opt/modules/default/bin/modulecmd python
+ module
+ module
+
+ craype-mic-knl
+ PrgEnv-intel
+ PrgEnv-cray
+ PrgEnv-gnu
+ intel
+ cce
+ cray-parallel-netcdf
+ cray-hdf5-parallel
+ pmi
+ cray-libsci
+ cray-mpich
+ cray-netcdf
+ cray-hdf5
+ cray-netcdf-hdf5parallel
+ craype
+ papi
+
+
+
+ PrgEnv-intel/6.0.4
+ intel intel/18.0.0.128
+ cray-libsci
+
+
+
+ PrgEnv-cray/6.0.4
+ cce cce/8.7.0
+
+
+ PrgEnv-gnu/6.0.4
+ gcc gcc/7.3.0
+
+
+ papi/5.6.0.1
+ craype craype/2.5.14
+
+
+ cray-libsci/18.04.1
+
+
+ cray-mpich/7.7.0
+
+
+ cray-netcdf-hdf5parallel/4.4.1.1.6
+ cray-hdf5-parallel/1.10.1.1
+ cray-parallel-netcdf/1.8.1.3
+
+
+
diff --git a/machines/thunder/config_machines.xml b/machines/thunder/config_machines.xml
new file mode 100644
index 00000000..03263adb
--- /dev/null
+++ b/machines/thunder/config_machines.xml
@@ -0,0 +1,73 @@
+
+ NCAR ARM platform, os is Linux, 64/128 pes/node, batch system is SLURM
+ LINUX
+
+ armgcc,gnu,arm
+ openmpi
+ /glade/scratch/$USER
+ $ENV{CESMDATAROOT}/inputdata
+ $DIN_LOC_ROOT/CTSM_datm_forcing_data
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{CESMDATAROOT}/cesm_baselines
+ $ENV{CESMDATAROOT}/tools/cprnc/cprnc
+ 16
+ slurm
+ cseg
+ 64
+ 128
+
+ mpiexec
+
+ --tag-output
+ -np {{ total_tasks }}
+
+
+
+ /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/perl
+ /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/env_modules_python.py
+ /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/csh
+ /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/sh
+ /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/libexec/lmod perl
+ /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ ncarenv/1.3
+ cmake/3.14.4
+
+
+ arm/19.3
+
+
+ armgcc/8.2.0
+
+
+ gnu/9.1.0
+ openblas/0.3.6
+ esmf_libs/8.0.0
+
+
+
+ ncarcompilers/0.5.0
+
+
+ openmpi/4.0.3
+ netcdf-mpi/4.7.1
+ pnetcdf/1.12.1
+
+
+ netcdf/4.7.1
+
+
+ esmf-8.0.0-ncdfio-uni-g
+
+
+ esmf-8.0.0-ncdfio-uni-O
+
+
+
+ 256M
+ $ENV{NETCDF}
+
+
diff --git a/machines/ubuntu-latest/config_machines.xml b/machines/ubuntu-latest/config_machines.xml
new file mode 100644
index 00000000..61158dd4
--- /dev/null
+++ b/machines/ubuntu-latest/config_machines.xml
@@ -0,0 +1,31 @@
+
+
+ used for github testing
+
+ LINUX
+
+ gnu
+ openmpi
+ none
+
+ $ENV{HOME}/cesm/scratch
+ $ENV{HOME}/cesm/inputdata
+ $ENV{HOME}/cesm/inputdata/lmwg
+ $ENV{HOME}/cesm/archive/$CASE
+ $ENV{HOME}/cesm/cesm_baselines
+
+ make
+ 8
+ none
+ jedwards
+ 4
+ 4
+ FALSE
+
+ mpiexec
+
+ -n {{ total_tasks }}
+
+
+
+
diff --git a/machines/userdefined_laptop_template/config_machines.xml b/machines/userdefined_laptop_template/config_machines.xml
index 2d1005c5..e8b42c9d 100644
--- a/machines/userdefined_laptop_template/config_machines.xml
+++ b/machines/userdefined_laptop_template/config_machines.xml
@@ -5,7 +5,8 @@
desired layout (change '${HOME}/projects' to your
prefered location). -->
__USEFUL_DESCRIPTION__
- something.matching.your.machine.hostname
+
Darwin
gnu
mpich
diff --git a/machines/zeus/config_machines.xml b/machines/zeus/config_machines.xml
new file mode 100644
index 00000000..914ea9d9
--- /dev/null
+++ b/machines/zeus/config_machines.xml
@@ -0,0 +1,79 @@
+
+
+ CMCC Lenovo ThinkSystem SD530, os is Linux, 36 pes/node, batch system is LSF
+ LINUX
+ intel
+ impi,mpi-serial
+ R000
+ /work/$ENV{DIVISION}/$ENV{USER}/CESM2
+ $ENV{CESMDATAROOT}/inputdata
+ $DIN_LOC_ROOT/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{CESMDATAROOT}/ccsm_baselines
+ $ENV{CESMDATAROOT}/cesm2_tools/cprnc/cprnc
+ /usr/lib64/perl5:/usr/share/perl5
+ 8
+ lsf
+ cmcc
+ 72
+ 36
+ TRUE
+
+ mpirun
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/sh
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+ module
+ module
+
+
+
+
+ intel20.1/20.1.217
+ intel20.1/szip/2.1.1
+ cmake/3.17.3
+ curl/7.70.0
+
+
+ intel20.1/hdf5/1.12.0
+ intel20.1/netcdf/C_4.7.4-F_4.5.3_CXX_4.3.1
+
+
+ impi20.1/19.7.217
+ impi20.1/hdf5/1.12.0
+ impi20.1/netcdf/C_4.7.4-F_4.5.3_CXX_4.3.1
+ impi20.1/parallel-netcdf/1.12.1
+
+
+ impi20.1/esmf/8.1.1-intelmpi-64-g
+
+
+ impi20.1/esmf/8.1.1-intelmpi-64-O
+
+
+ intel20.1/esmf/8.1.1-mpiuni-64-g
+
+
+ intel20.1/esmf/8.1.1-mpiuni-64-O
+
+
+
+ /data/inputs/CESM/xios-2.5
+
+
+ 1
+ gpfs
+ 0
+ 60
+ skx
+ skx_avx512
+ lsf
+ 1
+ {{ num_nodes }}
+
+
diff --git a/maps_nuopc.xml b/maps_nuopc.xml
index 9bb1ccb6..51954eb0 100644
--- a/maps_nuopc.xml
+++ b/maps_nuopc.xml
@@ -78,10 +78,18 @@
+
+
+
+
+
+
+
+
@@ -124,6 +132,10 @@
+
+
+
+