-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathamber-12.0-7.spec
executable file
·313 lines (235 loc) · 9.67 KB
/
amber-12.0-7.spec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
Summary: Amber Toolkit and parallel modules.
Name: amber
Version: 12.0
Release: 7
License: UCSF
Vendor: Amber
Group: Applications/Life Sciences
Source0: AmberTools12.tar.bz2
Source1: Amber12.tar.bz2
Packager: TACC - [email protected]
BuildRoot: /var/tmp/%{name}-%{version}-buildroot
Requires: netcdf-3.6-intel13
%define version_unit 12
%include rpm-dir.inc
Buildroot: /var/tmp/%{name}-%{version}-buildroot
%include ../system-defines.inc
%include compiler-defines.inc
%include mpi-defines.inc
%define PNAME amber
%define MODULE_VAR TACC_AMBER
%define INSTALL_DIR %{APPS}/%{comp_fam_ver}/%{mpi_fam_ver}/%{PNAME}/%{version}
%define MODULE_DIR %{APPS}/%{comp_fam_ver}/%{mpi_fam_ver}/%{MODULES}/%{PNAME}
# Rename rpm to "-n" argument at TACC
%package -n %{name}%{version_unit}-%{comp_fam_ver}-%{mpi_fam_ver}
Summary: The Amber tools and parallel modules
Group: Applications/Life Sciences
%description
%description -n %{name}%{version_unit}-%{comp_fam_ver}-%{mpi_fam_ver}
Amber serial, parallel, and cuda modules
%prep
rm -rf $RPM_BUILD_ROOT
# %setup -n %{PNAME}%{version_unit}
%build
%install
%include ../system-load.inc
mkdir -p $RPM_BUILD_ROOT/%{INSTALL_DIR}
%include compiler-load.inc
%include mpi-load.inc
%if "%{PLATFORM}" != "stampede"
module load mkl
%endif
module load cuda
module load hdf5 netcdf/3.6.3
echo COMPILER LOAD: %{comp_fam_ver_load}
echo MPI LOAD: %{mpi_fam_ver_load}
# this is a test of Doug's new tacctmpfs ---
#mkdir -p %{INSTALL_DIR}
rm -rf %{INSTALL_DIR}
PATH=/tmp/djbin:$PATH
/tmp/djbin/tacctmpfs -m %{INSTALL_DIR}
cd %{INSTALL_DIR}
pwd
tar xjf %{_topdir}/SOURCES/AmberTools12.tar.bz2 --strip-components 1
tar xjf %{_topdir}/SOURCES/Amber12.tar.bz2 --strip-components 1
AMBERHOME=`pwd`
MKL_HOME=$TACC_MKL_DIR
CUDA_HOME=$TACC_CUDA_DIR
export AMBERHOME MKL_HOME CUDA_HOME
# LDFLAGS="/usr/lib64/libXext.so.6 -L/usr/lib64" ./configure intel
# Amber now tries to download and install new bugfixes during the configure step if you tell it "y"
# make clean
# it takes two or more rounds of configure to get the updates... Im' just testing this
$AMBERHOME/patch_amber.py --update-tree
$AMBERHOME/patch_amber.py --update-tree
$AMBERHOME/patch_amber.py --update-tree
yes | ./configure --with-netcdf $TACC_NETCDF_DIR intel
make install
make clean
yes | ./configure -mpi --with-netcdf $TACC_NETCDF_DIR intel
make install
make clean
yes | ./configure -cuda --with-netcdf $TACC_NETCDF_DIR intel
make LDFLAGS="-Wl,-rpath,$TACC_CUDA_LIB" install
make clean
yes | ./configure -cuda -mpi --with-netcdf $TACC_NETCDF_DIR intel
make LDFLAGS="-Wl,-rpath,$TACC_CUDA_LIB" install
cd %{INSTALL_DIR}
cp -rp AmberTools bin dat doc benchmarks \
GNU_LGPL_v2 include \
lib README share \
$RPM_BUILD_ROOT/%{INSTALL_DIR}
rm -r $RPM_BUILD_ROOT/%{INSTALL_DIR}/AmberTools/src
chmod -Rf u+rwX,g+rwX,o=rX $RPM_BUILD_ROOT/%{INSTALL_DIR}
cd $RPM_BUILD_ROOT
tacctmpfs -u %{INSTALL_DIR}
########################### MODULE FILE ####################################
rm -rf $RPM_BUILD_ROOT/%{MODULE_DIR}
mkdir -p $RPM_BUILD_ROOT/%{MODULE_DIR}
%if "%{PLATFORM}" == "stampede"
cat > $RPM_BUILD_ROOT/%{MODULE_DIR}/%{version}.lua << 'EOF'
help(
[[
The TACC Amber installation includes the parallel modules with the .MPI suffix:
MMPBSA.MPI pbsa.MPI pmemd.MPI ptraj.MPI sander.LES.MPI sander.MPI
The pmemd binaries for use with GPUs are named:
pmemd.cuda.MPI pmemd.cuda
They were built with "single-precision, double-precision" for the best trade-off
between speed and accuracy. Visit http://ambermd.org/gpus/ for more
information. Also note that when using the CUDA version of pmemd, you can only
use 1 thread per graphics card and must use the "gpu" queue. For example, if
using 1 GPU card on two nodes, your job submission script should include the
following lines (along with all the other usual lines):
#SBATCH -n 2 -N 2
#SBATCH -p gpu
ibrun pmemd.cuda.MPI -O -i mdin -o mdout -p prmtop \
-c inpcrd -r restrt -x mdcrd </dev/null
Your ibrun line will change depending your filenames, etc. Cuda libraries are
hard linked, so loading the cuda module is not required. Again, visit
http://ambermd.org/gpus/ for more information as well as the Stampede guide at
http://www.tacc.utexas.edu/user-services/user-guides/stampede-user-guide
Amber tools examples and benchmarks are included in the AmberTools directory.
Examples, data, docs, includes, info, libs are included in directories with
corresponding names.
The Amber modulefile defines the following environment variables:
TACC_AMBER_DIR, TACC_AMBER_TOOLS, TACC_AMBER_BIN, TACC_AMBER_DAT,
TACC_AMBER_DOC, TACC_AMBER_INC, TACC_AMBER_LIB, and TACC_AMBER_MAN
for the corresponding Amber directories.
Also, AMBERHOME is set to the Amber Home Directory (TACC_AMBER_DIR),
and $AMBERHOME/bin is included in the PATH variable.
Version %{version}
]]
)
whatis("Name: Amber")
whatis("Version: 12.0")
whatis("Version-notes: Compiler:%{comp_fam_ver}, MPI:%{mpi_fam_ver}")
whatis("Category: Application, Chemistry")
whatis("Keywords: Chemistry, Biology, Molecular Dynamics, Cuda, Application")
whatis("URL: http://amber.scripps.edu/")
whatis("Description: Molecular Modeling Package")
--
-- Create environment variables.
--
local amber_dir = "%{INSTALL_DIR}"
local amber_tools = "%{INSTALL_DIR}/AmberTools"
local amber_bin = "%{INSTALL_DIR}/bin"
local amber_dat = "%{INSTALL_DIR}/dat"
local amber_doc = "%{INSTALL_DIR}/doc"
local amber_inc = "%{INSTALL_DIR}/include"
local amber_lib = "%{INSTALL_DIR}/lib"
local amber_man = "%{INSTALL_DIR}/share/man"
setenv("TACC_AMBER_DIR" , amber_dir )
setenv("TACC_AMBER_TOOLS", amber_tools)
setenv("TACC_AMBER_BIN" , amber_bin )
setenv("TACC_AMBER_DAT" , amber_dat )
setenv("TACC_AMBER_DOC" , amber_doc )
setenv("TACC_AMBER_INC" , amber_inc )
setenv("TACC_AMBER_LIB" , amber_lib )
setenv("TACC_AMBER_MAN" , amber_man )
setenv("AMBERHOME" , amber_dir )
append_path("PATH" ,amber_bin )
append_path("MANPATH" ,amber_man )
EOF
%endif
%if "%{PLATFORM}" == "lonestar"
cat > $RPM_BUILD_ROOT/%{MODULE_DIR}/%{version}.lua << 'EOF'
help(
[[
The TACC Amber installation includes the parallel modules with the .MPI suffix:
MMPBSA.MPI pbsa.MPI pmemd.MPI ptraj.MPI sander.LES.MPI sander.MPI
The pmemd binaries for use with GPUs are named:
pmemd.cuda.MPI pmemd.cuda
They were built with "single-precision, double-precision" for the best trade-off
between speed and accuracy. Visit http://ambermd.org/gpus/ for more
information. Also note that when using the CUDA version of pmemd, you can only
use 1 thread per graphics card and must use the "gpu" queue. For example, if
using 2 GPU cards on one node, your job submission script should include the
following lines (along with all the other usual lines):
#$ -pe 2way 12
#$ -q gpu
ibrun pmemd.cuda.MPI -O -i mdin -o mdout -p prmtop \
-c inpcrd -r restrt -x mdcrd </dev/null
Your ibrun line will change depending your filenames, etc. Cuda libraries are
hard linked, so loading the cuda module is not required. Again, visit
http://ambermd.org/gpus/ for more information as well as the Lonestar guide at
http://www.tacc.utexas.edu/user-services/user-guides/lonestar-user-guide
Amber tools examples and benchmarks are included in the AmberTools directory.
Examples, data, docs, includes, info, libs are included in directories with
corresponding names.
The Amber modulefile defines the following environment variables:
TACC_AMBER_DIR, TACC_AMBER_TOOLS, TACC_AMBER_BIN, TACC_AMBER_DAT,
TACC_AMBER_DOC, TACC_AMBER_INC, TACC_AMBER_LIB, and TACC_AMBER_MAN
for the corresponding Amber directories.
Also, AMBERHOME is set to the Amber Home Directory (TACC_AMBER_DIR),
and $AMBERHOME/bin is included in the PATH variable.
Version %{version}
]]
)
whatis("Name: AMBER")
whatis("Version: %{version}")
whatis("Version-notes: Compiler:%{comp_fam_ver}, MPI:%{mpi_fam_ver}")
whatis("Category: Application, Chemistry")
whatis("Keywords: Chemistry, Biology, Molecular Dynamics, Cuda, Application")
whatis("URL: http://amber.scripps.edu/")
whatis("Description: Molecular Modeling Package")
--
-- Create environment variables.
--
local amber_dir = "%{INSTALL_DIR}"
local amber_tools = "%{INSTALL_DIR}/AmberTools"
local amber_bin = "%{INSTALL_DIR}/bin"
local amber_dat = "%{INSTALL_DIR}/dat"
local amber_doc = "%{INSTALL_DIR}/doc"
local amber_inc = "%{INSTALL_DIR}/include"
local amber_lib = "%{INSTALL_DIR}/lib"
local amber_man = "%{INSTALL_DIR}/share/man"
setenv("TACC_AMBER_DIR" , amber_dir )
setenv("TACC_AMBER_TOOLS", amber_tools)
setenv("TACC_AMBER_BIN" , amber_bin )
setenv("TACC_AMBER_DAT" , amber_dat )
setenv("TACC_AMBER_DOC" , amber_doc )
setenv("TACC_AMBER_INC" , amber_inc )
setenv("TACC_AMBER_LIB" , amber_lib )
setenv("TACC_AMBER_MAN" , amber_man )
setenv("AMBERHOME" , amber_dir )
append_path("PATH" ,amber_bin )
append_path("MANPATH" ,amber_man )
EOF
%endif
cat > $RPM_BUILD_ROOT/%{MODULE_DIR}/.version.%{version} << 'EOF'
#%Module1.0#################################################
##
## Version file for AMBER %version
## Compiler: %{comp_fam_ver} and MPI: %{mpi_fam_ver}
##
set ModulesVersion "%{version}"
EOF
%{SPEC_DIR}/checkModuleSyntax $RPM_BUILD_ROOT/%{MODULE_DIR}/%{version}.lua
############################# MODULES ######################################
%files -n %{name}%{version_unit}-%{comp_fam_ver}-%{mpi_fam_ver}
%defattr(-,root,install)
%{INSTALL_DIR}
%{MODULE_DIR}
%post -n %{name}%{version_unit}-%{comp_fam_ver}-%{mpi_fam_ver}
%clean
#rm -rf $RPM_BUILD_ROOT