tresources.py - pism - [fork] customized build of PISM, the parallel ice sheet model (tillflux branch)
(HTM) git clone git://src.adamsgaard.dk/pism
(DIR) Log
(DIR) Files
(DIR) Refs
(DIR) LICENSE
---
tresources.py (21191B)
---
1 """
2 resources
3 =========
4
5 Provides:
6 - general resources such as grid constructors, calving, hydrology, etc.
7 for the Greenland Ice Sheet and sub-regions thereof
8
9 """
10
11 from collections import OrderedDict
12 import os
13 import math
14 import sys
15 import os.path
16
17
18 def generate_prefix_str(pism_exec):
19 '''
20 Generate prefix string.
21
22 Returns: string
23 '''
24
25 return os.path.join(os.environ.get("PISM_PREFIX", ""), pism_exec)
26
27
28 def generate_domain(domain):
29 '''
30 Generate domain specific options
31
32 Returns: string
33 '''
34
35 if domain.lower() in ('storglaciaren'):
36 pism_exec = 'pismr -regional -no_model_strip 0'
37 else:
38 print(('Domain {} not recognized, exiting'.format(domain)))
39 import sys
40 sys.exit(0)
41
42 return pism_exec
43
44
45 def default_spatial_ts_vars():
46 '''
47 Returns a list of commonly-used extra vars
48 '''
49
50 exvars = ['beta',
51 'bmelt',
52 'climatic_mass_balance',
53 'dHdt',
54 'ice_mass',
55 'mass_fluxes',
56 'mask',
57 'taub_mag',
58 'tauc',
59 'taud_mag',
60 'tempicethk_basal',
61 'temppabase',
62 'tempsurf',
63 'thk',
64 'tillwat',
65 'topg',
66 'usurf',
67 'velbase_mag',
68 'velsurf_mag']
69 return exvars
70
71
72 def ch_spatial_ts_vars():
73 '''
74 Returns a list of commonly-used extra vars
75 '''
76
77 exvars = ['beta',
78 'bmelt',
79 'climatic_mass_balance',
80 'ch_heat_flux',
81 'dHdt',
82 'ice_mass',
83 'mass_fluxes',
84 'mask',
85 'taub_mag',
86 'tauc',
87 'taud_mag',
88 'tempicethk_basal',
89 'temppabase',
90 'tempsurf',
91 'thk',
92 'tillwat',
93 'topg',
94 'usurf',
95 'velbase_mag',
96 'velsurf_mag']
97 return exvars
98
99
100 def generate_spatial_ts(outfile, exvars, step, start=None, end=None, split=None, odir=None):
101 '''
102 Return dict to generate spatial time series
103
104 Returns: OrderedDict
105 '''
106
107 # check if list or comma-separated string is given.
108 try:
109 exvars = ','.join(exvars)
110 except:
111 pass
112
113 params_dict = OrderedDict()
114 if split is True:
115 outfile, ext = os.path.splitext(outfile)
116 params_dict['extra_split'] = ''
117 if odir is None:
118 params_dict['extra_file'] = 'ex_' + outfile
119 else:
120 params_dict['extra_file'] = os.path.join(odir, 'ex_' + outfile)
121 params_dict['extra_vars'] = exvars
122
123 if step is None:
124 step = 'yearly'
125
126 if (start is not None and end is not None):
127 times = '{start}:{step}:{end}'.format(start=start, step=step, end=end)
128 else:
129 times = step
130
131 params_dict['extra_times'] = times
132
133 return params_dict
134
135
136 def generate_scalar_ts(outfile, step, start=None, end=None, odir=None):
137 '''
138 Return dict to create scalar time series
139
140 Returns: OrderedDict
141 '''
142
143 params_dict = OrderedDict()
144 if odir is None:
145 params_dict['ts_file'] = 'ts_' + outfile
146 else:
147 params_dict['ts_file'] = os.path.join(odir, 'ts_' + outfile)
148
149 if step is None:
150 step = 'yearly'
151
152 if (start is not None and end is not None):
153 times = '{start}:{step}:{end}'.format(start=start, step=step, end=end)
154 else:
155 times = step
156 params_dict['ts_times'] = times
157
158 return params_dict
159
160
161 def generate_snap_shots(outfile, times, odir=None):
162 '''
163 Return dict to generate snap shots
164
165 Returns: OrderedDict
166 '''
167
168 params_dict = OrderedDict()
169 if odir is None:
170 params_dict['save_file'] = 'save_' + outfile.split('.nc')[0]
171 else:
172 params_dict['save_file'] = os.path.join(odir, 'save_' + outfile.split('.nc')[0])
173
174 params_dict['save_times'] = ','.join(str(e) for e in times)
175 params_dict['save_split'] = ''
176 params_dict['save_force_output_times'] = ''
177
178 return params_dict
179
180
181 def generate_grid_description(grid_resolution, domain, restart=False):
182 '''
183 Generate grid description dict
184
185 Returns: OrderedDict
186 '''
187
188 if domain.lower() in ('storglaciaren'):
189
190 mx_max = 400
191 my_max = 200
192
193 resolution_max = 10
194
195 accepted_resolutions = (10, 20, 40)
196
197 try:
198 grid_resolution in accepted_resolutions
199 pass
200 except:
201 print(('grid resolution {}m not recognized'.format(grid_resolution)))
202
203 if grid_resolution < 20:
204 skip_max = 500
205 mz = 201
206 mzb = 1
207 elif (grid_resolution >= 20) and (grid_resolution < 40):
208 skip_max = 200
209 mz = 201
210 mzb = 1
211 else:
212 skip_max = 200
213 mz = 101
214 mzb = 1
215
216 grid_div = (grid_resolution / resolution_max)
217
218 mx = mx_max / grid_div
219 my = my_max / grid_div
220
221 horizontal_grid = OrderedDict()
222 horizontal_grid['Mx'] = mx
223 horizontal_grid['My'] = my
224
225 vertical_grid = OrderedDict()
226 vertical_grid['Lz'] = 500
227 vertical_grid['z_spacing'] = 'equal'
228 vertical_grid['Mz'] = mz
229 vertical_grid['Mbz'] = mzb
230
231 grid_options = {}
232 grid_options['skip'] = ''
233 grid_options['skip_max'] = skip_max
234
235 grid_dict = merge_dicts(horizontal_grid, vertical_grid, grid_options)
236
237 if restart is True:
238 return grid_options
239 else:
240 return grid_dict
241
242
243 def merge_dicts(*dict_args):
244 '''
245 Given any number of dicts, shallow copy and merge into a new dict,
246 precedence goes to key value pairs in latter dicts.
247
248 Returns: OrderedDict
249 '''
250 result = OrderedDict()
251 for dictionary in dict_args:
252 result.update(dictionary)
253 return result
254
255
256 def uniquify_list(seq, idfun=None):
257 '''
258 Remove duplicates from a list, order preserving.
259 From http://www.peterbe.com/plog/uniqifiers-benchmark
260 '''
261
262 if idfun is None:
263 def idfun(x): return x
264 seen = {}
265 result = []
266 for item in seq:
267 marker = idfun(item)
268 if marker in seen:
269 continue
270 seen[marker] = 1
271 result.append(item)
272 return result
273
274
275 def generate_stress_balance(stress_balance, additional_params_dict):
276 '''
277 Generate stress balance params
278
279 Returns: OrderedDict
280 '''
281
282 accepted_stress_balances = ('sia', 'ssa+sia')
283
284 if stress_balance not in accepted_stress_balances:
285 print(('{} not in {}'.format(stress_balance, accepted_stress_balances)))
286 print(('available stress balance solvers are {}'.format(accepted_stress_balances)))
287 import sys
288 sys.exit(0)
289
290 params_dict = OrderedDict()
291 params_dict['stress_balance'] = stress_balance
292 if stress_balance in ('ssa+sia'):
293 params_dict['options_left'] = ''
294 params_dict['cfbc'] = ''
295 params_dict['sia_flow_law'] = 'gpbld'
296 params_dict['pseudo_plastic'] = ''
297
298 return merge_dicts(additional_params_dict, params_dict)
299
300
301 def generate_hydrology(hydro, **kwargs):
302 '''
303 Generate hydrology params
304
305 Returns: OrderedDict
306 '''
307
308 params_dict = OrderedDict()
309 if hydro in ('null'):
310 params_dict['hydrology'] = 'null'
311 elif hydro in ('diffuse'):
312 params_dict['hydrology'] = 'null'
313 params_dict['hydrology_null_diffuse_till_water'] = ''
314 elif hydro in ('routing'):
315 params_dict['hydrology'] = 'routing'
316 elif hydro in ('distributed'):
317 params_dict['hydrology'] = 'distributed'
318 else:
319 print(('hydrology {} not recognized, exiting'.format(hydro)))
320 import sys
321 sys.exit(0)
322
323 return merge_dicts(params_dict, kwargs)
324
325
326 def generate_calving(calving, **kwargs):
327 '''
328 Generate calving params
329
330 Returns: OrderedDict
331 '''
332
333 params_dict = OrderedDict()
334 if calving in ('ocean_kill'):
335 params_dict['calving'] = calving
336 elif calving in ('thickness_calving'):
337 params_dict['calving'] = calving
338 elif calving in ('eigen_calving', 'vonmises_calving'):
339 params_dict['calving'] = '{},thickness_calving'.format(calving)
340 elif calving in ('hybrid_calving'):
341 params_dict['calving'] = 'eigen_calving,vonmises_calving,thickness_calving'
342 elif calving in ('float_kill', 'float_kill,ocean_kill', 'vonmises_calving,ocean_kill', 'eigen_calving,ocean_kill'):
343 params_dict['calving'] = calving
344 else:
345 print(('calving {} not recognized, exiting'.format(calving)))
346 import sys
347 sys.exit(0)
348 if 'frontal_melt' in kwargs and kwargs['frontal_melt'] is True:
349 params_dict['calving'] += ',frontal_melt'
350 # need to delete the entry
351 del kwargs['frontal_melt']
352 return merge_dicts(params_dict, kwargs)
353
354
355 def generate_climate(climate, **kwargs):
356 '''
357 Generate climate params
358
359 Returns: OrderedDict
360 '''
361
362 params_dict = OrderedDict()
363 if climate in ('init'):
364 params_dict['surface'] = 'elevation,forcing'
365 if 'surface_given_file' not in kwargs:
366 params_dict['surface_given_file'] = 'pism_storglaciaren_3d.nc'
367 if 'force_to_thickness_file' not in kwargs:
368 params_dict['force_to_thickness_file'] = 'pism_storglaciaren_3d.nc'
369 if 'ice_surface_temp' not in kwargs:
370 params_dict['ice_surface_temp'] = '-8,-8,0,2500'
371 if 'climatice_mass_balance' not in kwargs:
372 params_dict['climatic_mass_balance'] = '-3,2.5,1200,1450,1615'
373 elif climate in ('pdd'):
374 params_dict['atmosphere'] = 'given,lapse_rate'
375 if 'atmosphere_given_file' not in kwargs:
376 params_dict['atmosphere_given_file'] = 'foo.nc'
377 if 'temp_lapse_rate' not in kwargs:
378 params_dict['temp_lapse_rate'] = 6.5
379 params_dict['surface'] = 'pdd'
380 elif climate in ('warming'):
381 params_dict['surface'] = 'elevation,forcing,delta_T'
382 if 'surface_given_file' not in kwargs:
383 params_dict['surface_given_file'] = 'pism_storglaciaren_3d.nc'
384 if 'surface_delta_T_file' not in kwargs:
385 params_dict['surface_delta_T_file'] = 'sg_warming_1K.nc'
386 if 'force_to_thickness_file' not in kwargs:
387 params_dict['force_to_thickness_file'] = 'pism_storglaciaren_3d.nc'
388 if 'ice_surface_temp' not in kwargs:
389 params_dict['ice_surface_temp'] = '-8,-8,0,2500'
390 if 'climatice_mass_balance' not in kwargs:
391 params_dict['climatic_mass_balance'] = '-3,2.5,1200,1450,1615'
392 elif climate in ('elev+ftt'):
393 params_dict['surface'] = 'elevation,forcing'
394 if 'surface_given_file' not in kwargs:
395 params_dict['surface_given_file'] = 'pism_storglaciaren_3d.nc'
396 if 'force_to_thickness_file' not in kwargs:
397 params_dict['force_to_thickness_file'] = 'pism_storglaciaren_mask.nc'
398 if 'ice_surface_temp' not in kwargs:
399 params_dict['ice_surface_temp'] = '-8,-8,0,2500'
400 if 'climatice_mass_balance' not in kwargs:
401 params_dict['climatic_mass_balance'] = '-3,2.5,1200,1450,1615'
402 else:
403 print(('climate {} not recognized, exiting'.format(climate)))
404 import sys
405 sys.exit(0)
406
407 return merge_dicts(params_dict, kwargs)
408
409
410 def generate_ocean(ocean, **kwargs):
411 '''
412 Generate ocean params
413
414 Returns: OrderedDict
415 '''
416
417 params_dict = OrderedDict()
418 if ocean == 'paleo':
419 params_dict['ocean'] = 'given,delta_SL,frac_SMB'
420 if 'ocean_delta_SL_file' not in kwargs:
421 params_dict['ocean_delta_SL_file'] = 'pism_dSL.nc'
422 elif ocean == 'abrupt_glacial':
423 params_dict['ocean'] = 'given,delta_SL,frac_SMB'
424 if 'ocean_delta_SL_file' not in kwargs:
425 params_dict['ocean_delta_SL_file'] = 'pism_abrupt_glacial_climate_forcing.nc'
426 elif ocean == 'abrupt_glacial_mbp':
427 params_dict['ocean'] = 'given,delta_SL,frac_SMB,delta_MBP'
428 if 'ocean_delta_SL_file' not in kwargs:
429 params_dict['ocean_delta_SL_file'] = 'pism_abrupt_glacial_climate_forcing.nc'
430 elif ocean == 'paleo_mbp':
431 params_dict['ocean'] = 'given,delta_SL,frac_SMB,delta_MBP'
432 if 'ocean_delta_SL_file' not in kwargs:
433 params_dict['ocean_delta_SL_file'] = 'pism_dSL.nc'
434 elif ocean == 'warming':
435 params_dict['ocean'] = 'given,runoff_SMB'
436 elif ocean == 'warming_3eqn':
437 params_dict['ocean'] = 'th,delta_T'
438 elif ocean == 'paleo_const':
439 params_dict['ocean'] = 'given,delta_SL'
440 elif ocean == 'paleo_const_mbp':
441 params_dict['ocean'] = 'given,delta_SL,delta_MBP'
442 elif ocean in ('given', 'relax'):
443 params_dict['ocean'] = 'given'
444 elif ocean in ('given_mbp'):
445 params_dict['ocean'] = 'given,delta_MBP'
446 elif ocean == 'const':
447 params_dict['ocean'] = 'constant'
448 else:
449 print(('ocean {} not recognized, exiting'.format(ocean)))
450 import sys
451 sys.exit(0)
452
453 return merge_dicts(params_dict, kwargs)
454
455
456 def list_systems():
457 '''
458 Return a list of supported systems.
459 '''
460 return sorted(systems.keys())
461
462
463 def list_queues():
464 '''
465 Return a list of supported queues.
466 '''
467 result = set()
468 for s in list(systems.values()):
469 for q in list(s["queue"].keys()):
470 result.add(q)
471
472 return result
473
474
475 def list_bed_types():
476 '''
477 Return a list of supported bed types.
478 '''
479
480 list = ['ctrl',
481 'cresis',
482 'cresisp',
483 'minus',
484 'plus',
485 'ba01_bed',
486 '970mW_hs',
487 'jak_1985',
488 'no_bath']
489
490 return list
491
492
493 # information about systems
494 systems = {}
495
496 systems['debug'] = {'mpido': 'mpiexec -n {cores}',
497 'submit': 'echo',
498 'job_id': 'PBS_JOBID',
499 'queue': {}}
500
501 systems['chinook'] = {'mpido': 'mpirun -np {cores} -machinefile ./nodes_$SLURM_JOBID',
502 'submit': 'sbatch',
503 'work_dir': 'SLURM_SUBMIT_DIR',
504 'job_id': 'SLURM_JOBID',
505 'queue': {
506 't1standard': 24,
507 't1small': 24,
508 't2standard': 24,
509 't2small': 24,
510 'debug': 24}}
511
512 systems['pleiades'] = {'mpido': 'mpiexec -n {cores}',
513 'submit': 'qsub',
514 'work_dir': 'PBS_O_WORKDIR',
515 'job_id': 'PBS_JOBID',
516 'queue': {'long': 20, 'normal': 20}}
517
518 systems['pleiades_haswell'] = systems['pleiades'].copy()
519 systems['pleiades_haswell']['queue'] = {'long': 24, 'normal': 24}
520
521 systems['pleiades_ivy'] = systems['pleiades'].copy()
522 systems['pleiades_ivy']['queue'] = {'long': 20, 'normal': 20}
523
524 systems['pleiades_broadwell'] = systems['pleiades'].copy()
525 systems['pleiades_broadwell']['queue'] = {'long': 28, 'normal': 28}
526
527 systems['electra_broadwell'] = systems['pleiades_broadwell'].copy()
528
529
530 # headers for batch jobs
531 #
532 # Available keywords:
533 #
534 # cores - number of cores (MPI tasks)
535 # queue - queue (partition) name
536 # nodes - number of nodes
537 # ppn - number of tasks per node
538 # walltime - wall time limit
539
540 systems['debug']['header'] = ""
541
542 systems['chinook']['header'] = """#!/bin/sh
543 #SBATCH --partition={queue}
544 #SBATCH --ntasks={cores}
545 #SBATCH --tasks-per-node={ppn}
546 #SBATCH --time={walltime}
547 #SBATCH --mail-type=BEGIN
548 #SBATCH --mail-type=END
549 #SBATCH --mail-type=FAIL
550 #SBATCH --output=pism.%j
551
552 module list
553
554 cd $SLURM_SUBMIT_DIR
555
556 # Generate a list of compute node hostnames reserved for this job,
557 # this ./nodes file is necessary for slurm to spawn mpi processes
558 # across multiple compute nodes
559 srun -l /bin/hostname | sort -n | awk '{{print $2}}' > ./nodes_$SLURM_JOBID
560
561 ulimit -l unlimited
562 ulimit -s unlimited
563 ulimit
564
565 """
566
567 systems['chinook']['footer'] = """
568 # clean up the list of hostnames
569 rm -rf ./nodes_$SLURM_JOBID
570 """
571
572 systems['electra_broadwell']['header'] = """#PBS -S /bin/bash
573 #PBS -N cfd
574 #PBS -l walltime={walltime}
575 #PBS -m e
576 #PBS -q {queue}
577 #PBS -lselect={nodes}:ncpus={ppn}:mpiprocs={ppn}:model=bro_ele
578 #PBS -j oe
579
580 module list
581
582 cd $PBS_O_WORKDIR
583
584 """
585
586 systems['pleiades']['header'] = """#PBS -S /bin/bash
587 #PBS -N cfd
588 #PBS -l walltime={walltime}
589 #PBS -m e
590 #PBS -W group_list=s1878
591 #PBS -q {queue}
592 #PBS -lselect={nodes}:ncpus={ppn}:mpiprocs={ppn}:model=ivy
593 #PBS -j oe
594
595 module list
596
597 cd $PBS_O_WORKDIR
598
599 """
600
601 systems['pleiades_broadwell']['header'] = """#PBS -S /bin/bash
602 #PBS -N cfd
603 #PBS -l walltime={walltime}
604 #PBS -m e
605 #PBS -W group_list=s1878
606 #PBS -q {queue}
607 #PBS -lselect={nodes}:ncpus={ppn}:mpiprocs={ppn}:model=bro
608 #PBS -j oe
609
610 module list
611
612 cd $PBS_O_WORKDIR
613
614 """
615
616 systems['pleiades_haswell']['header'] = """#PBS -S /bin/bash
617 #PBS -N cfd
618 #PBS -l walltime={walltime}
619 #PBS -m e
620 #PBS -W group_list=s1878
621 #PBS -q {queue}
622 #PBS -lselect={nodes}:ncpus={ppn}:mpiprocs={ppn}:model=has
623 #PBS -j oe
624
625 module list
626
627 cd $PBS_O_WORKDIR
628
629 """
630
631 systems['pleiades_ivy']['header'] = """#PBS -S /bin/bash
632 #PBS -N cfd
633 #PBS -l walltime={walltime}
634 #PBS -m e
635 #PBS -W group_list=s1878
636 #PBS -q {queue}
637 #PBS -lselect={nodes}:ncpus={ppn}:mpiprocs={ppn}:model=ivy
638 #PBS -j oe
639
640 module list
641
642 cd $PBS_O_WORKDIR
643
644 """
645
646 # headers for post-processing jobs
647
648 post_headers = {}
649 post_headers['default'] = """#!/bin/bash
650
651 """
652
653 post_headers['pbs'] = """#PBS -S /bin/bash
654 #PBS -l select=1:mem=94GB
655 #PBS -l walltime=8:00:00
656 #PBS -q ldan
657
658 cd $PBS_O_WORKDIR
659
660 """
661
662 post_headers['slurm'] = """#!/bin/bash
663 #SBATCH --partition=analysis
664 #SBATCH --ntasks=1
665 #SBATCH --tasks-per-node=1
666 #SBATCH --time=48:00:00
667 #SBATCH --output=pism.%j
668 #SBATCH --mem=214G
669
670 cd $SLURM_SUBMIT_DIR
671
672 ulimit -l unlimited
673 ulimit -s unlimited
674 ulimit
675
676 """
677
678
679 def make_batch_header(system_name, n_cores, walltime, queue):
680 '''
681 Generate header file for different HPC system.
682
683 Returns: String
684 '''
685
686 # get system info; use "debug" if the requested name was not found
687 system = systems.get(system_name, systems["debug"]).copy()
688
689 assert n_cores > 0
690
691 if system_name == 'debug':
692 # when debugging, assume that all we need is one node
693 ppn = n_cores
694 nodes = 1
695 else:
696 try:
697 ppn = system['queue'][queue]
698 except:
699 raise ValueError("There is no queue {} on {}. Pick one of {}.".format(
700 queue, system_name, list(system['queue'].keys())))
701 # round up when computing the number of nodes needed to run on 'n_cores' cores
702 nodes = int(math.ceil(float(n_cores) / ppn))
703
704 if nodes * ppn != n_cores:
705 print(("Warning! Running {n_cores} tasks on {nodes} {ppn}-processor nodes, wasting {N} processors!".format(nodes=nodes,
706 ppn=ppn,
707 n_cores=n_cores,
708 N=ppn*nodes - n_cores)))
709
710 system['mpido'] = system['mpido'].format(cores=n_cores)
711 system["header"] = system["header"].format(queue=queue,
712 walltime=walltime,
713 nodes=nodes,
714 ppn=ppn,
715 cores=n_cores)
716 system["header"] += version_header()
717
718 return system["header"], system
719
720
721 def make_batch_post_header(system):
722
723 v = version_header()
724
725 if system in ('electra_broadwell', 'pleiades', 'pleiades_ivy', 'pleiades_broadwell', 'pleiades_haswell'):
726 return post_headers['pbs'] + v
727 elif system in ('chinook'):
728 return post_headers['slurm'] + v
729 else:
730 return post_headers['default'] + v
731
732
733 def make_batch_header_test():
734 "print headers of all supported systems and queues (for testing)"
735 for s in list(systems.keys()):
736 for q in list(systems[s]['queue'].keys()):
737 print("# system: {system}, queue: {queue}".format(system=s, queue=q))
738 print(make_batch_header(s, 100, "1:00:00", q)[0])
739
740
741 def version():
742 """Return the path to the top directory of the Git repository
743 containing this script, the URL of the "origin" remote and the version."""
744 import inspect
745 import shlex
746 import subprocess
747
748 def output(command):
749 path = os.path.realpath(os.path.dirname(inspect.stack(0)[0][1]))
750 return subprocess.check_output(shlex.split(command), cwd=path).strip()
751
752 return (output("git rev-parse --show-toplevel"),
753 output("git remote get-url origin"),
754 output("git describe --always"))
755
756
757 def version_header():
758 "Return shell comments containing version info."
759 version_info = version()
760 return """
761 # Generated by {script}
762 # Command: {command}
763 # Git top level: {path}
764 # URL: {url}
765 # Version: {version}
766
767 """.format(script=os.path.realpath(sys.argv[0]),
768 command=" ".join(sys.argv),
769 path=version_info[0],
770 url=version_info[1],
771 version=version_info[2])