Skip to content

Commit

Permalink
More work on Systems
Browse files Browse the repository at this point in the history
  • Loading branch information
aaschwanden committed Nov 8, 2023
1 parent b5f2bd7 commit 1e75eb0
Show file tree
Hide file tree
Showing 9 changed files with 326 additions and 53 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,8 @@
### Synopsis

The stability of the Greenland Ice Sheet in a warming climate is a critical societal concern. Predicting Greenland's contribution to sea level remains a challenge as historical simulations of the past decades show limited agreement with observations. In this project, we develop a data assimilation framework that combines sparse observations and the ice sheet model PISM to produce a reanalysis of the state of the Greenland Ice Sheet from 1980 to 2020 using probabilistic filtering methods.


###

This repository contains scripts and functions to generate and analyze hindcasts performed with the [![Parallel Ice Sheet Model (PISM)]](https://pism.io)
38 changes: 26 additions & 12 deletions hindcasts/hindcast.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,23 @@
#!/usr/bin/env python
# Copyright (C) 2019-23 Andy Aschwanden

# Historical simulations for
# "A reanalyis of the Greenland Ice Sheet"
# Copyright (C) 2023 Andy Aschwanden
#
# This file is part of pism-ragis.
#
# PISM-RAGIS is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM-RAGIS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA

"""
Perform hindcasts of the Greenland Ice Sheet
Generate scrips to hindcasts of the Greenland Ice Sheet using the Parallel Ice Sheet Model (PISM)
"""

import inspect
Expand All @@ -21,7 +33,7 @@
import xarray as xr


def current_script_directory():
def current_script_directory() -> str:
"""
Return the current directory
"""
Expand All @@ -34,6 +46,7 @@ def current_script_directory():

sys.path.append(join(script_directory, "../pism_ragis"))
import computing # pylint: disable=C0413
from systems import Systems # pylint: disable=C0413

grid_choices = [
18000,
Expand All @@ -53,6 +66,9 @@ def current_script_directory():
150,
]

available_systems = Systems()
available_systems.default_path = "../hpc-systems"

if __name__ == "__main__":
# set up the option parser
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
Expand Down Expand Up @@ -169,7 +185,7 @@ def current_script_directory():
"-s",
"--system",
dest="system",
choices=computing.list_systems(),
choices=available_systems.list_systems(),
help="computer system to use.",
default="debug",
)
Expand Down Expand Up @@ -264,7 +280,7 @@ def current_script_directory():
osize = options.osize
queue = options.queue
walltime = options.walltime
system = options.system
system = available_systems[options.system]

spatial_ts = options.spatial_ts
test_climate_models = options.test_climate_models
Expand All @@ -277,9 +293,7 @@ def current_script_directory():

stress_balance = options.stress_balance
version = options.version

ensemble_file = options.ensemble_file

domain = options.domain
pism_exec = computing.generate_domain(domain)

Expand Down Expand Up @@ -358,7 +372,7 @@ def current_script_directory():
mkdir -p $each
done\n\n
"""
if system != "debug":
if system["machine"] != "debug":
cmd = f"""lfs setstripe -c -1 {dirs["output"]}"""
sub.call(shlex.split(cmd))
cmd = f"""lfs setstripe -c -1 {dirs["spatial_tmp"]}"""
Expand Down
60 changes: 60 additions & 0 deletions hpc-systems/chinook.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
machine = "chinook"

[MPI]

mpido = "mpirun -np {cores} -machinefile ./nodes_$SLURM_JOBID"

[scheduler]

name = "SLRUM"
submit = "sbatch"
job_id = "SLURM_JOBID"

[filesystem]

work_dir = "SLURM_SUBMIT_DIR"

[partitions]

default = "new"

[partitions.old]

name = "old-chinook"
cores_per_node = 24
queues = ["t1standard", "t1small", "t2standard", "t2small"]

[partitions.new]

name = "new-chinook"
cores_per_node = 40
queues = ["t1standard", "t1small", "t2standard", "t2small"]

[job]

header = """#!/bin/sh
#SBATCH --partition={queue}
#SBATCH --ntasks={cores}
#SBATCH --tasks-per-node={ppn}
#SBATCH --time={walltime}
#SBATCH --mail-type=BEGIN
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
#SBATCH --output=pism.%j
module list
umask 007
cd $SLURM_SUBMIT_DIR
# Generate a list of compute node hostnames reserved for this job,
# this ./nodes file is necessary for slurm to spawn mpi processes
# across multiple compute nodes
srun -l /bin/hostname | sort -n | awk '{{print $2}}' > ./nodes_$SLURM_JOBID
ulimit -l unlimited
ulimit -s unlimited
ulimit
"""
30 changes: 30 additions & 0 deletions hpc-systems/debug.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
machine = "debug"

[MPI]

mpido = "mpirun -np {cores}"

[scheduler]

name = "shell"
submit = "sh"
job_id = ""

[filesystem]

work_dir = "PWD"

[partitions]

default = "debug"

[partitions.debug]

name = "debug"

[job]

header = """
"""
59 changes: 59 additions & 0 deletions hpc-systems/pleiades.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
machine = "pleiades"

[partitions]

default = "sandy_bridge"

[partitions.broadwell]

name = "bro"
cores_per_node = 28
queues = ["debug", "normal", "long"]

[partitions.haswell]

name = "has"
cores_per_node = 24
queues = ["debug", "normal", "long"]

[partitions.ivy_bridge]

name = "ivy"
cores_per_node = 20
queues = ["debug", "normal", "long"]

[partitions.sandy_bridge]

name = "san"
cores_per_node = 16
queues = ["debug", "normal", "long"]

[MPI]

mpido = "mpiexec -n {cores}"

[scheduler]

name = "QSUB"
submit = "qusb"
job_id = "PBS_JOBID"

[filesystem]

work_dir = "PBS_O_WORKDIR"

[job]

header = """#PBS -S /bin/bash
#PBS -N cfd
#PBS -l walltime={walltime}
#PBS -m e
#PBS -W group_list={gid}
#PBS -q {queue}
#PBS -lselect={nodes}:ncpus={ppn}:mpiprocs={ppn}:model={partition}
#PBS -j oe
module list
cd $PBS_O_WORKDIR
"""
Loading

0 comments on commit 1e75eb0

Please sign in to comment.