From 420abf4b790f4ddbf01ef882f3e25224828a4150 Mon Sep 17 00:00:00 2001 From: Thomas-Ulrich Date: Mon, 5 Oct 2020 14:47:07 +0200 Subject: [PATCH 1/7] fix Northridge --- Northridge/generating_the_nrf.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Northridge/generating_the_nrf.sh b/Northridge/generating_the_nrf.sh index aa93a18..280d5aa 100755 --- a/Northridge/generating_the_nrf.sh +++ b/Northridge/generating_the_nrf.sh @@ -4,7 +4,10 @@ prefix=northridge #Download the srf file: -wget http://hypocenter.usc.edu/research/SRF/nr6.70-s0000-h0000.txt -o $prefix.srf +wget http://hypocenter.usc.edu/research/SRF/nr6.70-s0000-h0000.txt + +#merge line 3 and 4 +sed '3{N;s/\n//;}' nr6.70-s0000-h0000.txt > ${prefix}.srf #To find the projected coordinates of the fault center, we apply cs2cs (from proj.4): echo -118.5150 34.3440 0.0 | cs2cs +proj=lonlat +axis=enu +units=m +to +proj=merc +lon_0=-118 +axis=enu +units=m From 7a50b01fd2777bd9e89ccc2539e699a02bd90d0a Mon Sep 17 00:00:00 2001 From: Sebastian Wolf Date: Fri, 16 Oct 2020 16:38:53 +0200 Subject: [PATCH 2/7] Include seissol-benchmarks convergence script from gitlab --- .../convergence_anisotropic/generateCubes.sh | 8 + .../convergence_anisotropic/material.yaml | 24 +++ .../convergence_anisotropic/parameters.par | 55 ++++++ .../convergence_anisotropic}/recordPoints.dat | 0 .../convergence_elastic}/generateCubes.sh | 0 .../convergence_elastic}/material.yaml | 0 .../convergence_elastic}/parameters.par | 0 .../convergence_elastic/recordPoints.dat | 2 + .../convergence_viscoelastic/generateCubes.sh | 8 + .../convergence_viscoelastic/material.yaml | 8 + .../convergence_viscoelastic/parameters.par | 57 ++++++ .../convergence_viscoelastic/recordPoints.dat | 3 + .../compile_and_run.py | 171 ++++++++++++++++++ .../do_all_the_convergence_tests/job.template | 47 +++++ .../material.yaml | 8 + .../parameters.template | 55 ++++++ 16 files changed, 446 insertions(+) create mode 100755 convergence/convergence_anisotropic/generateCubes.sh create mode 100644 convergence/convergence_anisotropic/material.yaml create mode 100644 convergence/convergence_anisotropic/parameters.par rename {convergence_elastic => convergence/convergence_anisotropic}/recordPoints.dat (100%) rename {convergence_elastic => convergence/convergence_elastic}/generateCubes.sh (100%) rename {convergence_elastic => convergence/convergence_elastic}/material.yaml (100%) rename {convergence_elastic => convergence/convergence_elastic}/parameters.par (100%) create mode 100644 convergence/convergence_elastic/recordPoints.dat create mode 100755 convergence/convergence_viscoelastic/generateCubes.sh create mode 100644 convergence/convergence_viscoelastic/material.yaml create mode 100644 convergence/convergence_viscoelastic/parameters.par create mode 100644 convergence/convergence_viscoelastic/recordPoints.dat create mode 100755 convergence/do_all_the_convergence_tests/compile_and_run.py create mode 100755 convergence/do_all_the_convergence_tests/job.template create mode 100644 convergence/do_all_the_convergence_tests/material.yaml create mode 100644 convergence/do_all_the_convergence_tests/parameters.template diff --git a/convergence/convergence_anisotropic/generateCubes.sh b/convergence/convergence_anisotropic/generateCubes.sh new file mode 100755 index 0000000..88f19de --- /dev/null +++ b/convergence/convergence_anisotropic/generateCubes.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# PATH must contain cubeGenerator from SeisSol/preprocessing/meshing/cube_c/ +S=2 +cubeGenerator -b 6 -x 4 -y 4 -z 4 --px 1 --py 1 --pz 1 -o cube_4.nc -s $S +cubeGenerator -b 6 -x 8 -y 8 -z 8 --px 1 --py 1 --pz 1 -o cube_8.nc -s $S +cubeGenerator -b 6 -x 16 -y 16 -z 16 --px 1 --py 1 --pz 1 -o cube_16.nc -s $S +cubeGenerator -b 6 -x 32 -y 32 -z 32 --px 1 --py 1 --pz 1 -o cube_32.nc -s $S + diff --git a/convergence/convergence_anisotropic/material.yaml b/convergence/convergence_anisotropic/material.yaml new file mode 100644 index 0000000..5f33f2b --- /dev/null +++ b/convergence/convergence_anisotropic/material.yaml @@ -0,0 +1,24 @@ +!ConstantMap +map: + rho: 1.0 + c11: 192.0 + c12: 66.0 + c13: 60.0 + c14: 0.0 + c15: 0.0 + c16: 0.0 + c22: 160.0 + c23: 56.0 + c24: 0.0 + c25: 0.0 + c26: 0.0 + c33: 272.0 + c34: 0.0 + c35: 0.0 + c36: 0.0 + c44: 60.0 + c45: 0.0 + c46: 0.0 + c55: 62.0 + c56: 0.0 + c66: 49.0 diff --git a/convergence/convergence_anisotropic/parameters.par b/convergence/convergence_anisotropic/parameters.par new file mode 100644 index 0000000..4cd32fc --- /dev/null +++ b/convergence/convergence_anisotropic/parameters.par @@ -0,0 +1,55 @@ +&Equations +MaterialFileName = 'material.yaml' +/ + +&IniCondition ! no initial condition +cICType = 'Planarwave' +!cICType = 'Zero' +/ + +&Boundaries ! activate boundary conditions: +BC_pe = 1 ! Periodic boundaries +/ + +&SourceType +/ + +&SpongeLayer +/ + +&MeshNml +MeshFile = 'cube_4' ! Name of mesh file +meshgenerator = 'Netcdf' ! Name of meshgenerator (format) +/ + +&Discretization +Order = 6 ! Order of accuracy in space and times +Material = 1 ! Material order +CFL = 0.5 ! CFL number (<=1.0) +FixTimeStep = 5 ! Manualy chosen minimum time +ClusteredLts = 2 +/ + +&Output +OutputFile = 'output/conv' +iOutputMask = 1 1 1 1 1 1 1 1 1 ! Variables ouptut +iOutputMaskMaterial = 1 1 1 ! Material output +Format = 10 ! Format (0=IDL, 1=TECPLOT, 2=IBM DX, 4=GiD)) +Interval = 200 ! Index of printed info at timesteps +TimeInterval = 0.01 ! Index of printed info at time +printIntervalCriterion = 2 ! Criterion for index of printed info: 1=timesteps,2=time,3=timesteps+time +pickdt = 0.01 ! Pickpoint Sampling +pickDtType = 1 ! Pickpoint Type +nRecordPoints = 2 ! number of Record points which are read from file +RFileName = 'recordPoints.dat' ! Record Points in extra file +/ + +&Postprocessing +/ + +&AbortCriteria +EndTime = 0.02 +/ + +&Debugging +/ diff --git a/convergence_elastic/recordPoints.dat b/convergence/convergence_anisotropic/recordPoints.dat similarity index 100% rename from convergence_elastic/recordPoints.dat rename to convergence/convergence_anisotropic/recordPoints.dat diff --git a/convergence_elastic/generateCubes.sh b/convergence/convergence_elastic/generateCubes.sh similarity index 100% rename from convergence_elastic/generateCubes.sh rename to convergence/convergence_elastic/generateCubes.sh diff --git a/convergence_elastic/material.yaml b/convergence/convergence_elastic/material.yaml similarity index 100% rename from convergence_elastic/material.yaml rename to convergence/convergence_elastic/material.yaml diff --git a/convergence_elastic/parameters.par b/convergence/convergence_elastic/parameters.par similarity index 100% rename from convergence_elastic/parameters.par rename to convergence/convergence_elastic/parameters.par diff --git a/convergence/convergence_elastic/recordPoints.dat b/convergence/convergence_elastic/recordPoints.dat new file mode 100644 index 0000000..4c2865b --- /dev/null +++ b/convergence/convergence_elastic/recordPoints.dat @@ -0,0 +1,2 @@ +0.0 0.0 0.0 +-0.750000000000000 -0.750000000000000 -0.250000000000000 diff --git a/convergence/convergence_viscoelastic/generateCubes.sh b/convergence/convergence_viscoelastic/generateCubes.sh new file mode 100755 index 0000000..88f19de --- /dev/null +++ b/convergence/convergence_viscoelastic/generateCubes.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# PATH must contain cubeGenerator from SeisSol/preprocessing/meshing/cube_c/ +S=2 +cubeGenerator -b 6 -x 4 -y 4 -z 4 --px 1 --py 1 --pz 1 -o cube_4.nc -s $S +cubeGenerator -b 6 -x 8 -y 8 -z 8 --px 1 --py 1 --pz 1 -o cube_8.nc -s $S +cubeGenerator -b 6 -x 16 -y 16 -z 16 --px 1 --py 1 --pz 1 -o cube_16.nc -s $S +cubeGenerator -b 6 -x 32 -y 32 -z 32 --px 1 --py 1 --pz 1 -o cube_32.nc -s $S + diff --git a/convergence/convergence_viscoelastic/material.yaml b/convergence/convergence_viscoelastic/material.yaml new file mode 100644 index 0000000..54e2db3 --- /dev/null +++ b/convergence/convergence_viscoelastic/material.yaml @@ -0,0 +1,8 @@ +!ConstantMap +map: + rho: 1. + mu: 1. + lambda: 2. + Qp: 20. + Qs: 10. + diff --git a/convergence/convergence_viscoelastic/parameters.par b/convergence/convergence_viscoelastic/parameters.par new file mode 100644 index 0000000..a9fe806 --- /dev/null +++ b/convergence/convergence_viscoelastic/parameters.par @@ -0,0 +1,57 @@ +&Equations +MaterialFileName = material.yaml +FreqCentral = 1. +FreqRatio = 100. +/ + +&IniCondition ! no initial condition +cICType = 'Planarwave' +/ + +&Boundaries ! activate boundary conditions: +BC_pe = 1 ! Periodic boundaries +/ + +&SourceType +/ + +&SpongeLayer +/ + +&MeshNml +MeshFile = 'cube_8' ! Name of mesh file +meshgenerator = 'Netcdf' ! Name of meshgenerator (format) +/ + +&Discretization +Order = 6 ! Order of accuracy in space and times +Material = 1 ! Material order +CFL = 0.5 ! CFL number (<=1.0) +FixTimeStep = 5 ! Manualy chosen minimum time +ClusteredLts = 2 +/ + +&Output +OutputFile = 'output/conv' +iOutputMask = 1 1 1 1 1 1 1 1 1 ! Variables ouptut +iOutputMaskMaterial = 1 1 1 ! Material output +Format = 10 ! Format (0=IDL, 1=TECPLOT, 2=IBM DX, 4=GiD)) +Interval = 200 ! Index of printed info at timesteps +TimeInterval = 0.01 ! Index of printed info at time +printIntervalCriterion = 2 ! Criterion for index of printed info: 1=timesteps,2=time,3=timesteps+time +pickdt = 0.001 ! Pickpoint Sampling +pickDtType = 1 ! Pickpoint Type +nRecordPoints = 3 ! number of Record points which are read from file +RFileName = 'recordPoints.dat' ! Record Points in extra file +/ + +&Postprocessing +/ + +&AbortCriteria +!EndTime = 0.5 +EndTime = 10.0 +/ + +&Debugging +/ diff --git a/convergence/convergence_viscoelastic/recordPoints.dat b/convergence/convergence_viscoelastic/recordPoints.dat new file mode 100644 index 0000000..e8960b9 --- /dev/null +++ b/convergence/convergence_viscoelastic/recordPoints.dat @@ -0,0 +1,3 @@ +0.0 0.0 0.0 +-0.750000000000000 -0.750000000000000 -0.250000000000000 +-0.375000000000000 -0.875000000000000 0.625000000000000 diff --git a/convergence/do_all_the_convergence_tests/compile_and_run.py b/convergence/do_all_the_convergence_tests/compile_and_run.py new file mode 100755 index 0000000..f8bb133 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/compile_and_run.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 + +import os +import subprocess +import errno +import re +from argparse import ArgumentParser +from shutil import copy +from pathlib import Path +import math + +def str2bool(value): + return value.lower() == 'true' or value.lower() == 'yes' + +cmd_line_parser = ArgumentParser() +cmd_line_parser.add_argument('seissol_dir', type=str) +cmd_line_parser.add_argument('--steps', type=str, choices=['build', 'prepare', 'run', 'jobs', 'analyse', 'all'], default='all') +cmd_line_parser.add_argument('--mpiexec', type=str, default='mpiexec') +args = cmd_line_parser.parse_args() + +dirs = ('mesh', 'par', 'logs', 'jobs') +mesh_dir, par_dir, log_dir, job_dir = dirs +for d in dirs: + try: + os.mkdir(d) + except OSError as ex: + if ex.errno != errno.EEXIST: + raise +convergence_file = 'convergence.csv' + +cwd = os.getcwd() + +os.chdir(args.seissol_dir) + +host_arch = 'skx' +precision = ['float', 'double'] +arch = [arch_name(p, host_arc) for p in precision] +#TODO(SW): add anisotropic and poroelastic +equations = ['elastic', 'viscoelastic', 'viscoelastic2'] +orders = range(2,8) +resolutions = range(2,7) + + +parts = {2: 1, 3: 1, 4: 1, 5: 4, 6: 4} +scales = [2, 100] +scale_map = {'elastic': 2, 'viscoelastic': 2, 'viscoelastic2': 2} +end_time = {'elastic': '0.1', 'viscoelastic': '0.1', 'viscoelastic2': '0.1'} + +def partition(nodes): + if nodes <= 16: + return 'micro' + elif nodes <= 768: + return 'general' + return 'large' + +def num_mechs(eq): + return 3 if eq.startswith('viscoelastic') else 0 + +def arch_name(precision, host_arch): + return prec[0] + host_arch + +def seissol_name(arch, eq, o, build_type='Release'): + return 'SeisSol_{}_{}_{}_{}'.format(build_type, arch, eq, o) + +def cube_name(n, scale): + return '{}/cube_{}_{}'.format(mesh_dir, 2**n, scale) + +def par_name(eq, n): + return '{}/parameters_{}_{}.par'.format(par_dir, eq, 2**n) + +def resolution(eq, n): + return scale_map[eq] / 2**n * math.sqrt(3) + +def log_name(arch, eq, o, n): + return '{}_{}_{}_{}.log'.format(arch, eq, o, 2**n) + +def job_name(arch, eq, o, n): + return '{}_{}_{}_{}.sh'.format(arch, eq, o, 2**n) + +if args.steps in ['build', 'all']: + if not os.path.exists('build_convergence'): + try: + os.mkdir('build_convergence') + except OSError as ex: + if ex.errno != errno.EEXIST: + raise + os.chdir('build_convergence') + for prec in precision: + for eq in equations: + for o in orders: + compile_cmd = 'CC=mpicc CXX=mpiicpc FC=mpiifort cmake .. \ + -DCMAKE_BUILD_TYPE=Release \ + -DCOMMTHREAD=ON \ + -DEQUATIONS={} \ + -DGEMM_TOOL_LIST=LIBXSMM,PSpaMM, \ + -DHOST_ARCH={} \ + -DNUMBER_OF_MECHANISMS={} \ + -DORDER={} \ + -DPRECISION={} \ + -DTESTING=OFF && make -j48'.format( + eq, + host_arch, + num_mechs(eq), + o, + prec + ) + print(compile_cmd) + subprocess.check_output(compile_cmd, shell=True) + num_quantities = 9+6*num_mechs(eq) + sn = seissol_name(arch(precision, host_arch), eq, o, 'Release')) + copy_source = sn + copy_target = os.path.join(cwd, sn) + copy(copy_source, copy_target) + +os.chdir(cwd) + +if args.steps in ['prepare', 'all']: + with open('DGPATH', 'w') as f: + f.write(os.path.join(args.seissol_dir, 'Maple', '\n')) + for n in resolutions: + for scale in scales: + generate_cmd = 'cubeGenerator -b 6 -x {0} -y {0} -z {0} --px {1} --py {1} --pz {1} -o {2}.nc -s {3}'.format( + 2**n, + parts[n], + cube_name(n, scale), + scale + ) + os.system(generate_cmd) + for eq in equations: + for n in resolutions: + parameters = Path('parameters.template').read_text() + parameters = parameters.replace('MESH_FILE', cube_name(n, scale_map[eq])) + parameters = parameters.replace('END_TIME', end_time[eq]) + with open(par_name(eq, n), 'w') as f: + f.write(parameters) + +if args.steps in ['run', 'jobs', 'all']: + for arch in archs: + for eq in equations: + for o in orders: + for n in resolutions: + log_file = os.path.join(log_dir, log_name(arch, eq, o, n)) + nodes = parts[n]**3 + run_cmd = '{} -n {} ./{} {}'.format(args.mpiexec, nodes, seissol_name(arch,eq,o), par_name(eq, n)) + if nodes == 1 and args.steps in ['run', 'all']: + run_cmd += ' > ' + log_file + print(run_cmd) + os.system(run_cmd) + elif nodes > 1 and args.steps in ['jobs', 'all']: + job = Path('job.template').read_text() + job = job.replace('WORK_DIR', cwd) + job = job.replace('LOG_FILE', log_file) + job = job.replace('NODES', str(nodes)) + job = job.replace('PARTITION', partition(nodes)) + with open(os.path.join(job_dir, job_name(arch, eq, o, n)), 'w') as f: + f.write(job) + f.write(run_cmd + '\n') + +if args.steps in ['analyse', 'all']: + with open(convergence_file, 'w') as result_file: + result_file.write('arch,equations,order,h,norm,var,error\n') + for arch in archs: + for eq in equations: + for o in orders: + for n in resolutions: + log_file = os.path.join(log_dir, log_name(arch,eq,o,n)) + result = Path(log_file).read_text() + for line in result.split('\n'): + err = re.search('(LInf|L2|L1)\s*,\s+var\[\s*(\d+)\s*\]\s*=\s*([0-9\.eE+-]+)', line) + if err: + result_file.write('{},{},{},{},{}\n'.format(arch,eq,o,resolution(eq,n), ','.join([str(g) for g in err.groups()]))) diff --git a/convergence/do_all_the_convergence_tests/job.template b/convergence/do_all_the_convergence_tests/job.template new file mode 100755 index 0000000..5c61969 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/job.template @@ -0,0 +1,47 @@ +#!/bin/bash +#SBATCH -J SeisSol_LOG_NAME +#Output and error (also --output, --error): +#SBATCH -o WORKDIR/LOG_FILE +#SBATCH -e WORKDIR/LOG_FILE + +#Initial working directory (also --chdir): +#SBATCH --workdir=WORK_DIR + +#Notification and type +#SBATCH --mail-type=END +#SBATCH --mail-user=TODO + +# Wall clock limit: +#SBATCH --time=10:00 +#SBATCH --no-requeue + +#Setup of execution environment +#SBATCH --export=ALL +#SBATCH --account=TODO +#constraints are optional +#--constraint="scratch&work" +#SBATCH --partition=PARTITION + +#Number of nodes and MPI tasks per node: +#SBATCH --nodes=NODES +#SBATCH --ntasks-per-node=1 + +module load slurm_setup +#Run the program: +export MP_SINGLE_THREAD=no +unset KMP_AFFINITY +export OMP_NUM_THREADS=94 +export OMP_PLACES="cores(47)" + +export XDMFWRITER_ALIGNMENT=8388608 +export XDMFWRITER_BLOCK_SIZE=8388608 +export SC_CHECKPOINT_ALIGNMENT=8388608 + +export SEISSOL_CHECKPOINT_ALIGNMENT=8388608 +export SEISSOL_CHECKPOINT_DIRECT=1 +export ASYNC_MODE=THREAD +export ASYNC_BUFFER_ALIGNMENT=8388608 +source /etc/profile.d/modules.sh + +echo $SLURM_NTASKS +ulimit -Ss unlimited diff --git a/convergence/do_all_the_convergence_tests/material.yaml b/convergence/do_all_the_convergence_tests/material.yaml new file mode 100644 index 0000000..54e2db3 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/material.yaml @@ -0,0 +1,8 @@ +!ConstantMap +map: + rho: 1. + mu: 1. + lambda: 2. + Qp: 20. + Qs: 10. + diff --git a/convergence/do_all_the_convergence_tests/parameters.template b/convergence/do_all_the_convergence_tests/parameters.template new file mode 100644 index 0000000..11220e4 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/parameters.template @@ -0,0 +1,55 @@ +&Equations +MaterialFileName = 'material.yaml' +FreqCentral = 1. +FreqRatio = 100. +/ + +&IniCondition ! no initial condition +cICType = 'Planarwave' +/ + +&Boundaries ! activate boundary conditions: +BC_pe = 1 ! Periodic boundaries +/ + +&SourceType +/ + +&SpongeLayer +/ + +&MeshNml +MeshFile = 'MESH_FILE' ! Name of mesh file +meshgenerator = 'Netcdf' ! Name of meshgenerator (format) +/ + +&Discretization +Order = 6 ! Order of accuracy in space and times +Material = 1 ! Material order +CFL = 0.5 ! CFL number (<=1.0) +FixTimeStep = 5 ! Manualy chosen minimum time +ClusteredLts = 1 +/ + +&Output +OutputFile = 'output/conv' +iOutputMask = 1 1 1 1 1 1 1 1 1 ! Variables ouptut +iOutputMaskMaterial = 1 1 1 ! Material output +Format = 10 ! Format (0=IDL, 1=TECPLOT, 2=IBM DX, 4=GiD)) +Interval = 200 ! Index of printed info at timesteps +TimeInterval = 0.01 ! Index of printed info at time +printIntervalCriterion = 2 ! Criterion for index of printed info: 1=timesteps,2=time,3=timesteps+time +pickdt = 0.05 ! Pickpoint Sampling +pickDtType = 1 ! Pickpoint Type +nRecordPoints = 0 ! number of Record points which are read from file +/ + +&Postprocessing +/ + +&AbortCriteria +EndTime = END_TIME +/ + +&Debugging +/ From 2c45a3a017262c5d486a6ab324ffbff0f39f0dfc Mon Sep 17 00:00:00 2001 From: Sebastian Wolf Date: Wed, 23 Dec 2020 14:47:25 +0100 Subject: [PATCH 3/7] Polish do_all_convergence_tests --- .gitignore | 10 + .../compile_and_run.py | 337 +++++++++++------- .../error_plots.py | 119 +++++++ .../error_table.py | 45 +++ .../do_all_the_convergence_tests/job.template | 4 +- .../material_anisotropic.yaml | 26 ++ ...terial.yaml => material_viscoelastic.yaml} | 0 .../parameters.template | 4 +- 8 files changed, 412 insertions(+), 133 deletions(-) create mode 100644 .gitignore create mode 100644 convergence/do_all_the_convergence_tests/error_plots.py create mode 100644 convergence/do_all_the_convergence_tests/error_table.py mode change 100755 => 100644 convergence/do_all_the_convergence_tests/job.template create mode 100644 convergence/do_all_the_convergence_tests/material_anisotropic.yaml rename convergence/do_all_the_convergence_tests/{material.yaml => material_viscoelastic.yaml} (100%) diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4232a22 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +*.pyc + +convergence/do_all_the_convergence_tests/jobs +convergence/do_all_the_convergence_tests/logs +convergence/do_all_the_convergence_tests/mesh +convergence/do_all_the_convergence_tests/par +convergence/do_all_the_convergence_tests/plots +convergence/do_all_the_convergence_tests/SeisSol* +convergence/do_all_the_convergence_tests/convergence.csv + diff --git a/convergence/do_all_the_convergence_tests/compile_and_run.py b/convergence/do_all_the_convergence_tests/compile_and_run.py index f8bb133..de48cf3 100755 --- a/convergence/do_all_the_convergence_tests/compile_and_run.py +++ b/convergence/do_all_the_convergence_tests/compile_and_run.py @@ -9,163 +9,242 @@ from pathlib import Path import math -def str2bool(value): - return value.lower() == 'true' or value.lower() == 'yes' +### Settings for SuperMUC-NG +# on_a_cluster = True +# host_arch = 'skx' +# orders = range(2,8) +# resolutions = range(2,7) +# compilers = ['mpiicc', 'mpiicpc', 'mpiifor'] + +### Settings for Sebastians workstation +on_a_cluster = False +host_arch = "hsw" +orders = range(3, 7) +resolutions = range(2, 5) +compilers = ["mpicc", "mpicxx", "mpif90"] cmd_line_parser = ArgumentParser() -cmd_line_parser.add_argument('seissol_dir', type=str) -cmd_line_parser.add_argument('--steps', type=str, choices=['build', 'prepare', 'run', 'jobs', 'analyse', 'all'], default='all') -cmd_line_parser.add_argument('--mpiexec', type=str, default='mpiexec') +cmd_line_parser.add_argument("seissol_dir", type=str) +cmd_line_parser.add_argument( + "--steps", + type=str, + choices=["build", "prepare", "run", "analyse", "all"], + default="all", +) +cmd_line_parser.add_argument( + "--equations", + type=str, + choices=["elastic", "viscoelastic", "viscoelastic2", "anisotropic"], + default="elastic", +) +cmd_line_parser.add_argument("--mpiexec", type=str, default="mpiexec") args = cmd_line_parser.parse_args() -dirs = ('mesh', 'par', 'logs', 'jobs') -mesh_dir, par_dir, log_dir, job_dir = dirs -for d in dirs: - try: - os.mkdir(d) - except OSError as ex: - if ex.errno != errno.EEXIST: - raise -convergence_file = 'convergence.csv' -cwd = os.getcwd() - -os.chdir(args.seissol_dir) +def partition(nodes): + if nodes <= 16: + return "micro" + elif nodes <= 768: + return "general" + return "large" -host_arch = 'skx' -precision = ['float', 'double'] -arch = [arch_name(p, host_arc) for p in precision] -#TODO(SW): add anisotropic and poroelastic -equations = ['elastic', 'viscoelastic', 'viscoelastic2'] -orders = range(2,8) -resolutions = range(2,7) +def num_mechs(eq): + return 3 if eq.startswith("viscoelastic") else 0 -parts = {2: 1, 3: 1, 4: 1, 5: 4, 6: 4} -scales = [2, 100] -scale_map = {'elastic': 2, 'viscoelastic': 2, 'viscoelastic2': 2} -end_time = {'elastic': '0.1', 'viscoelastic': '0.1', 'viscoelastic2': '0.1'} -def partition(nodes): - if nodes <= 16: - return 'micro' - elif nodes <= 768: - return 'general' - return 'large' +def arch_name(precision, host_arch): + return precision[0] + host_arch -def num_mechs(eq): - return 3 if eq.startswith('viscoelastic') else 0 -def arch_name(precision, host_arch): - return prec[0] + host_arch +def seissol_name(arch, eq, o, build_type="Release"): + return "SeisSol_{}_{}_{}_{}".format(build_type, arch, o, eq) -def seissol_name(arch, eq, o, build_type='Release'): - return 'SeisSol_{}_{}_{}_{}'.format(build_type, arch, eq, o) def cube_name(n, scale): - return '{}/cube_{}_{}'.format(mesh_dir, 2**n, scale) + return "{}/cube_{}_{}".format(mesh_dir, 2 ** n, scale) + def par_name(eq, n): - return '{}/parameters_{}_{}.par'.format(par_dir, eq, 2**n) + return "{}/parameters_{}_{}.par".format(par_dir, eq, 2 ** n) + def resolution(eq, n): - return scale_map[eq] / 2**n * math.sqrt(3) + return scale_map[eq] / 2 ** n * math.sqrt(3) + def log_name(arch, eq, o, n): - return '{}_{}_{}_{}.log'.format(arch, eq, o, 2**n) + return "{}_{}_{}_{}.log".format(arch, eq, o, 2 ** n) + def job_name(arch, eq, o, n): - return '{}_{}_{}_{}.sh'.format(arch, eq, o, 2**n) + return "{}_{}_{}_{}.sh".format(arch, eq, o, 2 ** n) -if args.steps in ['build', 'all']: - if not os.path.exists('build_convergence'): + +def on_off(boolean): + return "ON" if boolean else "OFF" + + +dirs = ("mesh", "par", "logs", "jobs") +mesh_dir, par_dir, log_dir, job_dir = dirs +for d in dirs: try: - os.mkdir('build_convergence') + os.mkdir(d) except OSError as ex: - if ex.errno != errno.EEXIST: - raise - os.chdir('build_convergence') - for prec in precision: - for eq in equations: - for o in orders: - compile_cmd = 'CC=mpicc CXX=mpiicpc FC=mpiifort cmake .. \ - -DCMAKE_BUILD_TYPE=Release \ - -DCOMMTHREAD=ON \ - -DEQUATIONS={} \ - -DGEMM_TOOL_LIST=LIBXSMM,PSpaMM, \ - -DHOST_ARCH={} \ - -DNUMBER_OF_MECHANISMS={} \ - -DORDER={} \ - -DPRECISION={} \ - -DTESTING=OFF && make -j48'.format( - eq, - host_arch, - num_mechs(eq), - o, - prec - ) - print(compile_cmd) - subprocess.check_output(compile_cmd, shell=True) - num_quantities = 9+6*num_mechs(eq) - sn = seissol_name(arch(precision, host_arch), eq, o, 'Release')) - copy_source = sn - copy_target = os.path.join(cwd, sn) - copy(copy_source, copy_target) + if ex.errno != errno.EEXIST: + raise +convergence_file = "convergence.csv" + +cwd = os.getcwd() + +os.chdir(args.seissol_dir) + +precision = ["single", "double"] +parts = {2: 1, 3: 1, 4: 1, 5: 4, 6: 4} +scales = [2, 100] +scale_map = {"elastic": 2, "viscoelastic": 2, "viscoelastic2": 2, "anisotropic": 2} +end_time = { + "elastic": "0.1", + "viscoelastic": "0.1", + "viscoelastic2": "0.1", + "anisotropic": "0.1", +} +material_file = { + "elastic": "material_viscoelastic.yaml", + "viscoelastic": "material_viscoelastic.yaml", + "viscoelastic2": "material_viscoelastic.yaml", + "anisotropic": "material_anisotropic.yaml", +} +initial_condition = { + "elastic": "PlanarWave", + "viscoelastic": "PlanarWave", + "viscoelastic2": "PlanarWave", + "anisotropic": "SuperimposedPlanarwave", +} + +archs = [arch_name(p, host_arch) for p in precision] +equations = args.equations + +if args.steps in ["build", "all"]: + if not os.path.exists("build_convergence"): + try: + os.mkdir("build_convergence") + except OSError as ex: + if ex.errno != errno.EEXIST: + raise + os.chdir("build_convergence") + for prec in precision: + for o in orders: + compile_cmd = ( + "CC={} CXX={} FC={} cmake .. " + "-DCMAKE_BUILD_TYPE=Release " + "-DCOMMTHREAD={} " + "-DEQUATIONS={} " + "-DGEMM_TOOL_LIST=LIBXSMM,PSpaMM, " + "-DHOST_ARCH={} " + "-DNUMBER_OF_MECHANISMS={} " + "-DORDER={} " + "-DPRECISION={} " + "-DTESTING=OFF && make -j8".format( + compilers[0], + compilers[1], + compilers[2], + on_off(on_a_cluster), + equations, + host_arch, + num_mechs(equations), + o, + prec, + ) + ) + print(compile_cmd) + try: + subprocess.check_output(compile_cmd, shell=True) + except subprocess.CalledProcessError as cpe: + print("Build command exited with : {}".format(cpe.returncode)) + print(cpe.output) + quit() + num_quantities = 9 + 6 * num_mechs(equations) + sn = seissol_name(arch_name(prec, host_arch), equations, o, "Release") + copy_source = sn + copy_target = os.path.join(cwd, sn) + copy(copy_source, copy_target) os.chdir(cwd) -if args.steps in ['prepare', 'all']: - with open('DGPATH', 'w') as f: - f.write(os.path.join(args.seissol_dir, 'Maple', '\n')) - for n in resolutions: - for scale in scales: - generate_cmd = 'cubeGenerator -b 6 -x {0} -y {0} -z {0} --px {1} --py {1} --pz {1} -o {2}.nc -s {3}'.format( - 2**n, - parts[n], - cube_name(n, scale), - scale - ) - os.system(generate_cmd) - for eq in equations: +if args.steps in ["prepare", "all"]: for n in resolutions: - parameters = Path('parameters.template').read_text() - parameters = parameters.replace('MESH_FILE', cube_name(n, scale_map[eq])) - parameters = parameters.replace('END_TIME', end_time[eq]) - with open(par_name(eq, n), 'w') as f: - f.write(parameters) - -if args.steps in ['run', 'jobs', 'all']: - for arch in archs: - for eq in equations: - for o in orders: - for n in resolutions: - log_file = os.path.join(log_dir, log_name(arch, eq, o, n)) - nodes = parts[n]**3 - run_cmd = '{} -n {} ./{} {}'.format(args.mpiexec, nodes, seissol_name(arch,eq,o), par_name(eq, n)) - if nodes == 1 and args.steps in ['run', 'all']: - run_cmd += ' > ' + log_file - print(run_cmd) - os.system(run_cmd) - elif nodes > 1 and args.steps in ['jobs', 'all']: - job = Path('job.template').read_text() - job = job.replace('WORK_DIR', cwd) - job = job.replace('LOG_FILE', log_file) - job = job.replace('NODES', str(nodes)) - job = job.replace('PARTITION', partition(nodes)) - with open(os.path.join(job_dir, job_name(arch, eq, o, n)), 'w') as f: - f.write(job) - f.write(run_cmd + '\n') - -if args.steps in ['analyse', 'all']: - with open(convergence_file, 'w') as result_file: - result_file.write('arch,equations,order,h,norm,var,error\n') + for scale in scales: + generate_cmd = ( + "cubeGenerator " + "-b 6 -x {0} -y {0} -z {0} " + "--px {1} --py {1} --pz {1} " + "-o {2}.nc -s {3}".format(2 ** n, parts[n], cube_name(n, scale), scale) + ) + os.system(generate_cmd) + parameters = Path("parameters.template").read_text() + parameters = parameters.replace("MESH_FILE", cube_name(n, scale_map[equations])) + parameters = parameters.replace("END_TIME", end_time[equations]) + parameters = parameters.replace("MATERIAL_FILE", material_file[equations]) + parameters = parameters.replace( + "INITIAL_CONDITION", initial_condition[equations] + ) + with open(par_name(equations, n), "w") as f: + f.write(parameters) + +if args.steps in ["run", "all"]: for arch in archs: - for eq in equations: for o in orders: - for n in resolutions: - log_file = os.path.join(log_dir, log_name(arch,eq,o,n)) - result = Path(log_file).read_text() - for line in result.split('\n'): - err = re.search('(LInf|L2|L1)\s*,\s+var\[\s*(\d+)\s*\]\s*=\s*([0-9\.eE+-]+)', line) - if err: - result_file.write('{},{},{},{},{}\n'.format(arch,eq,o,resolution(eq,n), ','.join([str(g) for g in err.groups()]))) + for n in resolutions: + log_file = os.path.join(log_dir, log_name(arch, equations, o, n)) + nodes = parts[n] ** 3 + if not on_a_cluster: + run_cmd = "OMP_NUM_THREADS=8 ./{} {}".format( + seissol_name(arch, equations, o), par_name(equations, n) + ) + run_cmd += " > " + log_file + print(run_cmd) + os.system(run_cmd) + else: + run_cmd = "{} -n {} ./{} {}".format( + args.mpiexec, + nodes, + seissol_name(arch, equations, o), + par_name(equations, n), + ) + job = Path("job.template").read_text() + job = job.replace("WORK_DIR", cwd) + job = job.replace("LOG_FILE", log_file) + job = job.replace("NODES", str(nodes)) + job = job.replace("PARTITION", partition(nodes)) + file_name = os.path.join(job_dir, job_name(arch, equations, o, n)) + with open(file_name, "w") as f: + f.write(job) + f.write(run_cmd + "\n") + print( + "Created SLURM job file {}. Run with sbatch.".format(file_name) + ) + +if args.steps in ["analyse", "all"]: + with open(convergence_file, "w") as result_file: + result_file.write("arch,equations,order,h,norm,var,error\n") + for arch in archs: + for o in orders: + for n in resolutions: + log_file = os.path.join(log_dir, log_name(arch, equations, o, n)) + result = Path(log_file).read_text() + for line in result.split("\n"): + err = re.search( + "(LInf|L2|L1)\s*,\s+var\[\s*(\d+)\s*\]\s*=\s*([0-9\.eE+-]+)", + line, + ) + if err: + result_file.write( + "{},{},{},{},{}\n".format( + arch, + equations, + o, + resolution(equations, n), + ",".join([str(g) for g in err.groups()]), + ) + ) diff --git a/convergence/do_all_the_convergence_tests/error_plots.py b/convergence/do_all_the_convergence_tests/error_plots.py new file mode 100644 index 0000000..8eac0c8 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/error_plots.py @@ -0,0 +1,119 @@ +from argparse import ArgumentParser +import numpy as np +import pandas as pd +import os +import errno +import matplotlib.pyplot as plt + + +def plot_errors(errors_df, var, norm, output_prefix, interactive=False): + markers = ["o-", "v-", "^-", "s-", "P-", "X-"] + variable_names = [ + "\sigma_{xx}", + "\sigma_{yy}", + "\sigma_{zz}", + "\sigma_{xy}", + "\sigma_{yz}", + "\sigma_{xz}", + "u", + "v", + "w", + ] + variable_names_output = [ + "s_xx", + "s_yy", + "s_zz", + "s_xy", + "s_yz", + "s_xz", + "u", + "v", + "w", + ] + norm_to_latex = {"L1": "L^1", "L2": "L^2", "LInf": "L^\infty"} + + # use LaTeX fonts in the plot + plt.rcParams.update({"font.size": 32}) + + plt.rc("text", usetex=True) + plt.rc("font", family="serif") + plt.rc("figure", figsize=(20, 10)) + + fig, ax = plt.subplots() + for i, o in enumerate(pd.unique(errors_df["order"])): + err_df = errors_df[ + (errors_df["norm"] == norm) + & (errors_df["order"] == o) + & (errors_df["var"] == var) + ] + ax.plot( + err_df["h"].values, + err_df["error"].values, + markers[i], + label="order {}".format(o), + ) + + title = "${}$ norm of ${}$".format(norm_to_latex[norm], variable_names[var]) + ax.set_title(title) + + ax.set_xlabel("h") + ax.set_ylabel("error") + ax.set_yscale("log") + ax.set_xscale("log") + + tick_values = np.round(np.unique(errors_df["h"].values), 3) + ax.set_xticks(tick_values) + ax.set_xticklabels(tick_values) + plt.minorticks_off() + plt.legend() + + if interactive: + plt.show() + else: + plt.savefig( + output_prefix + "_{}_{}.pdf".format(variable_names_output[var], norm), + bbox_inches="tight", + ) + + +cmd_line_parser = ArgumentParser() +cmd_line_parser.add_argument("convergence_file", type=str, default="convergence.csv") +cmd_line_parser.add_argument("--output_dir", type=str, default="plots") +cmd_line_parser.add_argument( + "--var", type=int, default=-1, help="Specify the variable to plot, -1 for all" +) +cmd_line_parser.add_argument( + "--norm", type=str, default="LInf", help="Norm, can be LInf, L1, L2" +) +cmd_line_parser.add_argument("--arch", type=str, default="dhsw") +cmd_line_parser.add_argument("--equations", type=str, default="elastic") +args = cmd_line_parser.parse_args() + +try: + os.mkdir(args.output_dir) +except OSError as ex: + if ex.errno != errno.EEXIST: + raise + +convergence_df = pd.read_csv(args.convergence_file) +convergence_df = convergence_df[ + (convergence_df["arch"] == args.arch) + & (convergence_df["equations"] == args.equations) + & (convergence_df["norm"] == args.norm) +] + +if args.var == -1: + for v in pd.unique(convergence_df["var"]): + plot_errors( + convergence_df, + v, + args.norm, + os.path.join(args.output_dir, "{}_{}".format(args.arch, args.equations)), + ) +else: + plot_errors( + convergence_df, + args.var, + args.norm, + os.path.join(args.output_dir, "{}_{}".format(args.arch, args.equations)), + ) diff --git a/convergence/do_all_the_convergence_tests/error_table.py b/convergence/do_all_the_convergence_tests/error_table.py new file mode 100644 index 0000000..9339705 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/error_table.py @@ -0,0 +1,45 @@ +from argparse import ArgumentParser +import numpy as np +import pandas as pd + + +def calculate_error_rates(errors_df): + rate_dicts = [] + for n in pd.unique(errors_df["norm"]): + for v in pd.unique(errors_df["var"]): + conv_df = errors_df[(errors_df["norm"] == n) & (errors_df["var"] == v)] + d = {"norm": n, "var": v} + resolutions = pd.unique(conv_df["h"]) + + for r in range(len(resolutions) - 1): + error_decay = ( + conv_df.loc[conv_df["h"] == resolutions[r], "error"].values[0] + / conv_df.loc[conv_df["h"] == resolutions[r + 1], "error"].values[0] + ) + rate = np.log(error_decay) / np.log(resolutions[r] / resolutions[r + 1]) + resolution_decay = "{}->{}".format( + np.round(resolutions[r], 3), np.round(resolutions[r + 1], 3) + ) + d.update({resolution_decay: rate}) + rate_dicts.append(d) + + rate_df = pd.DataFrame(rate_dicts) + return rate_df + + +cmd_line_parser = ArgumentParser() +cmd_line_parser.add_argument("convergence_file", type=str, default="convergence.csv") +cmd_line_parser.add_argument("--order", type=int, default=3) +cmd_line_parser.add_argument("--arch", type=str, default="dhsw") +cmd_line_parser.add_argument("--equations", type=str, default="elastic") +args = cmd_line_parser.parse_args() + +convergence_df = pd.read_csv(args.convergence_file) +convergence_df = convergence_df[ + (convergence_df["order"] == args.order) + & (convergence_df["arch"] == args.arch) + & (convergence_df["equations"] == args.equations) +] + +rate_df = calculate_error_rates(convergence_df) +print(rate_df) diff --git a/convergence/do_all_the_convergence_tests/job.template b/convergence/do_all_the_convergence_tests/job.template old mode 100755 new mode 100644 index 5c61969..3c36666 --- a/convergence/do_all_the_convergence_tests/job.template +++ b/convergence/do_all_the_convergence_tests/job.template @@ -9,7 +9,7 @@ #Notification and type #SBATCH --mail-type=END -#SBATCH --mail-user=TODO +#SBATCH --mail-user=USER_TODO # Wall clock limit: #SBATCH --time=10:00 @@ -17,7 +17,7 @@ #Setup of execution environment #SBATCH --export=ALL -#SBATCH --account=TODO +#SBATCH --account=USER_TODO #constraints are optional #--constraint="scratch&work" #SBATCH --partition=PARTITION diff --git a/convergence/do_all_the_convergence_tests/material_anisotropic.yaml b/convergence/do_all_the_convergence_tests/material_anisotropic.yaml new file mode 100644 index 0000000..e06cdc9 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/material_anisotropic.yaml @@ -0,0 +1,26 @@ +!ConstantMap +map: + rho: 1. + c11: 192.0 + c12: 66.0 + c13: 60.0 + c14: 0.0 + c15: 0.0 + c16: 0.0 + c22: 160.0 + c23: 56.0 + c24: 0.0 + c25: 0.0 + c26: 0.0 + c33: 272.0 + c34: 0.0 + c35: 0.0 + c36: 0.0 + c44: 60.0 + c45: 0.0 + c46: 0.0 + c55: 62.0 + c56: 0.0 + c66: 49.6 + + diff --git a/convergence/do_all_the_convergence_tests/material.yaml b/convergence/do_all_the_convergence_tests/material_viscoelastic.yaml similarity index 100% rename from convergence/do_all_the_convergence_tests/material.yaml rename to convergence/do_all_the_convergence_tests/material_viscoelastic.yaml diff --git a/convergence/do_all_the_convergence_tests/parameters.template b/convergence/do_all_the_convergence_tests/parameters.template index 11220e4..7dcab9c 100644 --- a/convergence/do_all_the_convergence_tests/parameters.template +++ b/convergence/do_all_the_convergence_tests/parameters.template @@ -1,11 +1,11 @@ &Equations -MaterialFileName = 'material.yaml' +MaterialFileName = 'MATERIAL_FILE' FreqCentral = 1. FreqRatio = 100. / &IniCondition ! no initial condition -cICType = 'Planarwave' +cICType = 'INITIAL_CONDITION' / &Boundaries ! activate boundary conditions: From 42a1bd73b6ef95cd25110bb094511cf3ec4b95d4 Mon Sep 17 00:00:00 2001 From: Sebastian Wolf Date: Wed, 23 Dec 2020 14:56:56 +0100 Subject: [PATCH 4/7] remove viscoelastic and anisotropic convergence directories, only duplication --- .../convergence_anisotropic/generateCubes.sh | 8 --- .../convergence_anisotropic/material.yaml | 24 -------- .../convergence_anisotropic/parameters.par | 55 ------------------ .../convergence_anisotropic/recordPoints.dat | 2 - .../convergence_viscoelastic/generateCubes.sh | 8 --- .../convergence_viscoelastic/material.yaml | 8 --- .../convergence_viscoelastic/parameters.par | 57 ------------------- .../convergence_viscoelastic/recordPoints.dat | 3 - 8 files changed, 165 deletions(-) delete mode 100755 convergence/convergence_anisotropic/generateCubes.sh delete mode 100644 convergence/convergence_anisotropic/material.yaml delete mode 100644 convergence/convergence_anisotropic/parameters.par delete mode 100644 convergence/convergence_anisotropic/recordPoints.dat delete mode 100755 convergence/convergence_viscoelastic/generateCubes.sh delete mode 100644 convergence/convergence_viscoelastic/material.yaml delete mode 100644 convergence/convergence_viscoelastic/parameters.par delete mode 100644 convergence/convergence_viscoelastic/recordPoints.dat diff --git a/convergence/convergence_anisotropic/generateCubes.sh b/convergence/convergence_anisotropic/generateCubes.sh deleted file mode 100755 index 88f19de..0000000 --- a/convergence/convergence_anisotropic/generateCubes.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# PATH must contain cubeGenerator from SeisSol/preprocessing/meshing/cube_c/ -S=2 -cubeGenerator -b 6 -x 4 -y 4 -z 4 --px 1 --py 1 --pz 1 -o cube_4.nc -s $S -cubeGenerator -b 6 -x 8 -y 8 -z 8 --px 1 --py 1 --pz 1 -o cube_8.nc -s $S -cubeGenerator -b 6 -x 16 -y 16 -z 16 --px 1 --py 1 --pz 1 -o cube_16.nc -s $S -cubeGenerator -b 6 -x 32 -y 32 -z 32 --px 1 --py 1 --pz 1 -o cube_32.nc -s $S - diff --git a/convergence/convergence_anisotropic/material.yaml b/convergence/convergence_anisotropic/material.yaml deleted file mode 100644 index 5f33f2b..0000000 --- a/convergence/convergence_anisotropic/material.yaml +++ /dev/null @@ -1,24 +0,0 @@ -!ConstantMap -map: - rho: 1.0 - c11: 192.0 - c12: 66.0 - c13: 60.0 - c14: 0.0 - c15: 0.0 - c16: 0.0 - c22: 160.0 - c23: 56.0 - c24: 0.0 - c25: 0.0 - c26: 0.0 - c33: 272.0 - c34: 0.0 - c35: 0.0 - c36: 0.0 - c44: 60.0 - c45: 0.0 - c46: 0.0 - c55: 62.0 - c56: 0.0 - c66: 49.0 diff --git a/convergence/convergence_anisotropic/parameters.par b/convergence/convergence_anisotropic/parameters.par deleted file mode 100644 index 4cd32fc..0000000 --- a/convergence/convergence_anisotropic/parameters.par +++ /dev/null @@ -1,55 +0,0 @@ -&Equations -MaterialFileName = 'material.yaml' -/ - -&IniCondition ! no initial condition -cICType = 'Planarwave' -!cICType = 'Zero' -/ - -&Boundaries ! activate boundary conditions: -BC_pe = 1 ! Periodic boundaries -/ - -&SourceType -/ - -&SpongeLayer -/ - -&MeshNml -MeshFile = 'cube_4' ! Name of mesh file -meshgenerator = 'Netcdf' ! Name of meshgenerator (format) -/ - -&Discretization -Order = 6 ! Order of accuracy in space and times -Material = 1 ! Material order -CFL = 0.5 ! CFL number (<=1.0) -FixTimeStep = 5 ! Manualy chosen minimum time -ClusteredLts = 2 -/ - -&Output -OutputFile = 'output/conv' -iOutputMask = 1 1 1 1 1 1 1 1 1 ! Variables ouptut -iOutputMaskMaterial = 1 1 1 ! Material output -Format = 10 ! Format (0=IDL, 1=TECPLOT, 2=IBM DX, 4=GiD)) -Interval = 200 ! Index of printed info at timesteps -TimeInterval = 0.01 ! Index of printed info at time -printIntervalCriterion = 2 ! Criterion for index of printed info: 1=timesteps,2=time,3=timesteps+time -pickdt = 0.01 ! Pickpoint Sampling -pickDtType = 1 ! Pickpoint Type -nRecordPoints = 2 ! number of Record points which are read from file -RFileName = 'recordPoints.dat' ! Record Points in extra file -/ - -&Postprocessing -/ - -&AbortCriteria -EndTime = 0.02 -/ - -&Debugging -/ diff --git a/convergence/convergence_anisotropic/recordPoints.dat b/convergence/convergence_anisotropic/recordPoints.dat deleted file mode 100644 index 4c2865b..0000000 --- a/convergence/convergence_anisotropic/recordPoints.dat +++ /dev/null @@ -1,2 +0,0 @@ -0.0 0.0 0.0 --0.750000000000000 -0.750000000000000 -0.250000000000000 diff --git a/convergence/convergence_viscoelastic/generateCubes.sh b/convergence/convergence_viscoelastic/generateCubes.sh deleted file mode 100755 index 88f19de..0000000 --- a/convergence/convergence_viscoelastic/generateCubes.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# PATH must contain cubeGenerator from SeisSol/preprocessing/meshing/cube_c/ -S=2 -cubeGenerator -b 6 -x 4 -y 4 -z 4 --px 1 --py 1 --pz 1 -o cube_4.nc -s $S -cubeGenerator -b 6 -x 8 -y 8 -z 8 --px 1 --py 1 --pz 1 -o cube_8.nc -s $S -cubeGenerator -b 6 -x 16 -y 16 -z 16 --px 1 --py 1 --pz 1 -o cube_16.nc -s $S -cubeGenerator -b 6 -x 32 -y 32 -z 32 --px 1 --py 1 --pz 1 -o cube_32.nc -s $S - diff --git a/convergence/convergence_viscoelastic/material.yaml b/convergence/convergence_viscoelastic/material.yaml deleted file mode 100644 index 54e2db3..0000000 --- a/convergence/convergence_viscoelastic/material.yaml +++ /dev/null @@ -1,8 +0,0 @@ -!ConstantMap -map: - rho: 1. - mu: 1. - lambda: 2. - Qp: 20. - Qs: 10. - diff --git a/convergence/convergence_viscoelastic/parameters.par b/convergence/convergence_viscoelastic/parameters.par deleted file mode 100644 index a9fe806..0000000 --- a/convergence/convergence_viscoelastic/parameters.par +++ /dev/null @@ -1,57 +0,0 @@ -&Equations -MaterialFileName = material.yaml -FreqCentral = 1. -FreqRatio = 100. -/ - -&IniCondition ! no initial condition -cICType = 'Planarwave' -/ - -&Boundaries ! activate boundary conditions: -BC_pe = 1 ! Periodic boundaries -/ - -&SourceType -/ - -&SpongeLayer -/ - -&MeshNml -MeshFile = 'cube_8' ! Name of mesh file -meshgenerator = 'Netcdf' ! Name of meshgenerator (format) -/ - -&Discretization -Order = 6 ! Order of accuracy in space and times -Material = 1 ! Material order -CFL = 0.5 ! CFL number (<=1.0) -FixTimeStep = 5 ! Manualy chosen minimum time -ClusteredLts = 2 -/ - -&Output -OutputFile = 'output/conv' -iOutputMask = 1 1 1 1 1 1 1 1 1 ! Variables ouptut -iOutputMaskMaterial = 1 1 1 ! Material output -Format = 10 ! Format (0=IDL, 1=TECPLOT, 2=IBM DX, 4=GiD)) -Interval = 200 ! Index of printed info at timesteps -TimeInterval = 0.01 ! Index of printed info at time -printIntervalCriterion = 2 ! Criterion for index of printed info: 1=timesteps,2=time,3=timesteps+time -pickdt = 0.001 ! Pickpoint Sampling -pickDtType = 1 ! Pickpoint Type -nRecordPoints = 3 ! number of Record points which are read from file -RFileName = 'recordPoints.dat' ! Record Points in extra file -/ - -&Postprocessing -/ - -&AbortCriteria -!EndTime = 0.5 -EndTime = 10.0 -/ - -&Debugging -/ diff --git a/convergence/convergence_viscoelastic/recordPoints.dat b/convergence/convergence_viscoelastic/recordPoints.dat deleted file mode 100644 index e8960b9..0000000 --- a/convergence/convergence_viscoelastic/recordPoints.dat +++ /dev/null @@ -1,3 +0,0 @@ -0.0 0.0 0.0 --0.750000000000000 -0.750000000000000 -0.250000000000000 --0.375000000000000 -0.875000000000000 0.625000000000000 From d564d1ecd8ba01807b4a30c6653f6e838d173ba6 Mon Sep 17 00:00:00 2001 From: Sebastian Wolf Date: Mon, 4 Jan 2021 11:02:27 +0100 Subject: [PATCH 5/7] Fix typo --- convergence/do_all_the_convergence_tests/compile_and_run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convergence/do_all_the_convergence_tests/compile_and_run.py b/convergence/do_all_the_convergence_tests/compile_and_run.py index de48cf3..427cfde 100755 --- a/convergence/do_all_the_convergence_tests/compile_and_run.py +++ b/convergence/do_all_the_convergence_tests/compile_and_run.py @@ -14,7 +14,7 @@ # host_arch = 'skx' # orders = range(2,8) # resolutions = range(2,7) -# compilers = ['mpiicc', 'mpiicpc', 'mpiifor'] +# compilers = ['mpiicc', 'mpiicpc', 'mpiifort'] ### Settings for Sebastians workstation on_a_cluster = False From 8933ffe7496d8bc6b4cc728c5f8c56d09b8c4f99 Mon Sep 17 00:00:00 2001 From: Sebastian Wolf Date: Mon, 4 Jan 2021 13:49:10 +0100 Subject: [PATCH 6/7] Unify workflow between cluster and local --- .../compile_and_run.py | 102 +++++++++--------- .../local.template | 18 ++++ .../{job.template => supermuc.template} | 0 3 files changed, 72 insertions(+), 48 deletions(-) create mode 100644 convergence/do_all_the_convergence_tests/local.template rename convergence/do_all_the_convergence_tests/{job.template => supermuc.template} (100%) diff --git a/convergence/do_all_the_convergence_tests/compile_and_run.py b/convergence/do_all_the_convergence_tests/compile_and_run.py index 427cfde..320d389 100755 --- a/convergence/do_all_the_convergence_tests/compile_and_run.py +++ b/convergence/do_all_the_convergence_tests/compile_and_run.py @@ -9,19 +9,19 @@ from pathlib import Path import math -### Settings for SuperMUC-NG -# on_a_cluster = True -# host_arch = 'skx' -# orders = range(2,8) -# resolutions = range(2,7) -# compilers = ['mpiicc', 'mpiicpc', 'mpiifort'] - -### Settings for Sebastians workstation -on_a_cluster = False -host_arch = "hsw" -orders = range(3, 7) -resolutions = range(2, 5) -compilers = ["mpicc", "mpicxx", "mpif90"] +### Machine dependend settings +# does the machine support distributed memory parallelism? +on_a_cluster = {"supermuc": True, "local": False} +# cpu architecture of the cluster +host_arch = {"supermuc": "skx", "local": "hsw"} +# convergence orders to test for +orders = {"supermuc": range(2,8), "local": range(3,7)} +# mesh resolutions to test for +resolutions = {"supermuc": range(2,7), "local": range(2,5)} +# list of compilers in the order C Compiler, C++ compiler, Fortran compiler +compilers = {"supermuc": ['mpiicc', 'mpiicpc', 'mpiifort'], "local": ["mpicc", "mpicxx", "mpif90"]} +# cluster specific mpiexec command +mpiexec = {"supermuc": "mpiexec", "local": None} cmd_line_parser = ArgumentParser() cmd_line_parser.add_argument("seissol_dir", type=str) @@ -37,16 +37,24 @@ choices=["elastic", "viscoelastic", "viscoelastic2", "anisotropic"], default="elastic", ) -cmd_line_parser.add_argument("--mpiexec", type=str, default="mpiexec") +cmd_line_parser.add_argument( + "--cluster", + type=str, + choices=["supermuc", "local"], + default="supermuc", +) args = cmd_line_parser.parse_args() -def partition(nodes): - if nodes <= 16: - return "micro" - elif nodes <= 768: - return "general" - return "large" +def partition(nodes, cluster="supermuc"): + if cluster == "supermuc": + if nodes <= 16: + return "micro" + elif nodes <= 768: + return "general" + return "large" + else: + raise NotImplementedError def num_mechs(eq): @@ -122,7 +130,7 @@ def on_off(boolean): "anisotropic": "SuperimposedPlanarwave", } -archs = [arch_name(p, host_arch) for p in precision] +archs = [arch_name(p, host_arch[args.cluster]) for p in precision] equations = args.equations if args.steps in ["build", "all"]: @@ -134,7 +142,7 @@ def on_off(boolean): raise os.chdir("build_convergence") for prec in precision: - for o in orders: + for o in orders[args.cluster]: compile_cmd = ( "CC={} CXX={} FC={} cmake .. " "-DCMAKE_BUILD_TYPE=Release " @@ -146,12 +154,12 @@ def on_off(boolean): "-DORDER={} " "-DPRECISION={} " "-DTESTING=OFF && make -j8".format( - compilers[0], - compilers[1], - compilers[2], - on_off(on_a_cluster), + compilers[args.cluster][0], + compilers[args.cluster][1], + compilers[args.cluster][2], + on_off(on_a_cluster[args.cluster]), equations, - host_arch, + host_arch[args.cluster], num_mechs(equations), o, prec, @@ -165,7 +173,7 @@ def on_off(boolean): print(cpe.output) quit() num_quantities = 9 + 6 * num_mechs(equations) - sn = seissol_name(arch_name(prec, host_arch), equations, o, "Release") + sn = seissol_name(arch_name(prec, host_arch[args.cluster]), equations, o, "Release") copy_source = sn copy_target = os.path.join(cwd, sn) copy(copy_source, copy_target) @@ -194,43 +202,41 @@ def on_off(boolean): if args.steps in ["run", "all"]: for arch in archs: - for o in orders: - for n in resolutions: + for o in orders[args.cluster]: + for n in resolutions[args.cluster]: log_file = os.path.join(log_dir, log_name(arch, equations, o, n)) nodes = parts[n] ** 3 - if not on_a_cluster: - run_cmd = "OMP_NUM_THREADS=8 ./{} {}".format( + job = Path("{}.template".format(args.cluster)).read_text() + if not on_a_cluster[args.cluster]: + run_cmd = "./{} {}".format( seissol_name(arch, equations, o), par_name(equations, n) ) run_cmd += " > " + log_file - print(run_cmd) - os.system(run_cmd) else: run_cmd = "{} -n {} ./{} {}".format( - args.mpiexec, + mpiexec[args.cluster], nodes, seissol_name(arch, equations, o), par_name(equations, n), ) - job = Path("job.template").read_text() - job = job.replace("WORK_DIR", cwd) - job = job.replace("LOG_FILE", log_file) + job = job.replace("PARTITION", partition(nodes, args.cluster)) job = job.replace("NODES", str(nodes)) - job = job.replace("PARTITION", partition(nodes)) - file_name = os.path.join(job_dir, job_name(arch, equations, o, n)) - with open(file_name, "w") as f: - f.write(job) - f.write(run_cmd + "\n") - print( - "Created SLURM job file {}. Run with sbatch.".format(file_name) - ) + job = job.replace("LOG_FILE", log_file) + job = job.replace("WORK_DIR", cwd) + file_name = os.path.join(job_dir, job_name(arch, equations, o, n)) + with open(file_name, "w") as f: + f.write(job) + f.write(run_cmd + "\n") + print( + "Created job file {}.".format(file_name) + ) if args.steps in ["analyse", "all"]: with open(convergence_file, "w") as result_file: result_file.write("arch,equations,order,h,norm,var,error\n") for arch in archs: - for o in orders: - for n in resolutions: + for o in orders[args.cluster]: + for n in resolutions[args.cluster]: log_file = os.path.join(log_dir, log_name(arch, equations, o, n)) result = Path(log_file).read_text() for line in result.split("\n"): diff --git a/convergence/do_all_the_convergence_tests/local.template b/convergence/do_all_the_convergence_tests/local.template new file mode 100644 index 0000000..c73f230 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/local.template @@ -0,0 +1,18 @@ +#!/bin/bash +cd WORK_DIR + +export MP_SINGLE_THREAD=no +unset KMP_AFFINITY +export OMP_NUM_THREADS=8 +export OMP_PLACES="cores(8)" + +export XDMFWRITER_ALIGNMENT=8388608 +export XDMFWRITER_BLOCK_SIZE=8388608 +export SC_CHECKPOINT_ALIGNMENT=8388608 + +export SEISSOL_CHECKPOINT_ALIGNMENT=8388608 +export SEISSOL_CHECKPOINT_DIRECT=1 +export ASYNC_MODE=THREAD +export ASYNC_BUFFER_ALIGNMENT=8388608 + +ulimit -Ss unlimited diff --git a/convergence/do_all_the_convergence_tests/job.template b/convergence/do_all_the_convergence_tests/supermuc.template similarity index 100% rename from convergence/do_all_the_convergence_tests/job.template rename to convergence/do_all_the_convergence_tests/supermuc.template From a350194e0778ed628f6d4fba29b6109c3f56409b Mon Sep 17 00:00:00 2001 From: Sebastian Wolf Date: Mon, 4 Jan 2021 14:08:29 +0100 Subject: [PATCH 7/7] Add regression column --- convergence/do_all_the_convergence_tests/error_table.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/convergence/do_all_the_convergence_tests/error_table.py b/convergence/do_all_the_convergence_tests/error_table.py index 9339705..16938fd 100644 --- a/convergence/do_all_the_convergence_tests/error_table.py +++ b/convergence/do_all_the_convergence_tests/error_table.py @@ -1,5 +1,6 @@ from argparse import ArgumentParser import numpy as np +import scipy.stats as sp_stat import pandas as pd @@ -21,12 +22,15 @@ def calculate_error_rates(errors_df): np.round(resolutions[r], 3), np.round(resolutions[r + 1], 3) ) d.update({resolution_decay: rate}) + + regression = sp_stat.linregress(np.log(conv_df[["h", "error"]].values)) + d.update({"regression": regression.slope}) + rate_dicts.append(d) rate_df = pd.DataFrame(rate_dicts) return rate_df - cmd_line_parser = ArgumentParser() cmd_line_parser.add_argument("convergence_file", type=str, default="convergence.csv") cmd_line_parser.add_argument("--order", type=int, default=3)