diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4232a22 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +*.pyc + +convergence/do_all_the_convergence_tests/jobs +convergence/do_all_the_convergence_tests/logs +convergence/do_all_the_convergence_tests/mesh +convergence/do_all_the_convergence_tests/par +convergence/do_all_the_convergence_tests/plots +convergence/do_all_the_convergence_tests/SeisSol* +convergence/do_all_the_convergence_tests/convergence.csv + diff --git a/Northridge/generating_the_nrf.sh b/Northridge/generating_the_nrf.sh index aa93a18..280d5aa 100755 --- a/Northridge/generating_the_nrf.sh +++ b/Northridge/generating_the_nrf.sh @@ -4,7 +4,10 @@ prefix=northridge #Download the srf file: -wget http://hypocenter.usc.edu/research/SRF/nr6.70-s0000-h0000.txt -o $prefix.srf +wget http://hypocenter.usc.edu/research/SRF/nr6.70-s0000-h0000.txt + +#merge line 3 and 4 +sed '3{N;s/\n//;}' nr6.70-s0000-h0000.txt > ${prefix}.srf #To find the projected coordinates of the fault center, we apply cs2cs (from proj.4): echo -118.5150 34.3440 0.0 | cs2cs +proj=lonlat +axis=enu +units=m +to +proj=merc +lon_0=-118 +axis=enu +units=m diff --git a/convergence_elastic/generateCubes.sh b/convergence/convergence_elastic/generateCubes.sh similarity index 100% rename from convergence_elastic/generateCubes.sh rename to convergence/convergence_elastic/generateCubes.sh diff --git a/convergence_elastic/material.yaml b/convergence/convergence_elastic/material.yaml similarity index 100% rename from convergence_elastic/material.yaml rename to convergence/convergence_elastic/material.yaml diff --git a/convergence_elastic/parameters.par b/convergence/convergence_elastic/parameters.par similarity index 100% rename from convergence_elastic/parameters.par rename to convergence/convergence_elastic/parameters.par diff --git a/convergence_elastic/recordPoints.dat b/convergence/convergence_elastic/recordPoints.dat similarity index 100% rename from convergence_elastic/recordPoints.dat rename to convergence/convergence_elastic/recordPoints.dat diff --git a/convergence/do_all_the_convergence_tests/compile_and_run.py b/convergence/do_all_the_convergence_tests/compile_and_run.py new file mode 100755 index 0000000..320d389 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/compile_and_run.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python3 + +import os +import subprocess +import errno +import re +from argparse import ArgumentParser +from shutil import copy +from pathlib import Path +import math + +### Machine dependend settings +# does the machine support distributed memory parallelism? +on_a_cluster = {"supermuc": True, "local": False} +# cpu architecture of the cluster +host_arch = {"supermuc": "skx", "local": "hsw"} +# convergence orders to test for +orders = {"supermuc": range(2,8), "local": range(3,7)} +# mesh resolutions to test for +resolutions = {"supermuc": range(2,7), "local": range(2,5)} +# list of compilers in the order C Compiler, C++ compiler, Fortran compiler +compilers = {"supermuc": ['mpiicc', 'mpiicpc', 'mpiifort'], "local": ["mpicc", "mpicxx", "mpif90"]} +# cluster specific mpiexec command +mpiexec = {"supermuc": "mpiexec", "local": None} + +cmd_line_parser = ArgumentParser() +cmd_line_parser.add_argument("seissol_dir", type=str) +cmd_line_parser.add_argument( + "--steps", + type=str, + choices=["build", "prepare", "run", "analyse", "all"], + default="all", +) +cmd_line_parser.add_argument( + "--equations", + type=str, + choices=["elastic", "viscoelastic", "viscoelastic2", "anisotropic"], + default="elastic", +) +cmd_line_parser.add_argument( + "--cluster", + type=str, + choices=["supermuc", "local"], + default="supermuc", +) +args = cmd_line_parser.parse_args() + + +def partition(nodes, cluster="supermuc"): + if cluster == "supermuc": + if nodes <= 16: + return "micro" + elif nodes <= 768: + return "general" + return "large" + else: + raise NotImplementedError + + +def num_mechs(eq): + return 3 if eq.startswith("viscoelastic") else 0 + + +def arch_name(precision, host_arch): + return precision[0] + host_arch + + +def seissol_name(arch, eq, o, build_type="Release"): + return "SeisSol_{}_{}_{}_{}".format(build_type, arch, o, eq) + + +def cube_name(n, scale): + return "{}/cube_{}_{}".format(mesh_dir, 2 ** n, scale) + + +def par_name(eq, n): + return "{}/parameters_{}_{}.par".format(par_dir, eq, 2 ** n) + + +def resolution(eq, n): + return scale_map[eq] / 2 ** n * math.sqrt(3) + + +def log_name(arch, eq, o, n): + return "{}_{}_{}_{}.log".format(arch, eq, o, 2 ** n) + + +def job_name(arch, eq, o, n): + return "{}_{}_{}_{}.sh".format(arch, eq, o, 2 ** n) + + +def on_off(boolean): + return "ON" if boolean else "OFF" + + +dirs = ("mesh", "par", "logs", "jobs") +mesh_dir, par_dir, log_dir, job_dir = dirs +for d in dirs: + try: + os.mkdir(d) + except OSError as ex: + if ex.errno != errno.EEXIST: + raise +convergence_file = "convergence.csv" + +cwd = os.getcwd() + +os.chdir(args.seissol_dir) + +precision = ["single", "double"] +parts = {2: 1, 3: 1, 4: 1, 5: 4, 6: 4} +scales = [2, 100] +scale_map = {"elastic": 2, "viscoelastic": 2, "viscoelastic2": 2, "anisotropic": 2} +end_time = { + "elastic": "0.1", + "viscoelastic": "0.1", + "viscoelastic2": "0.1", + "anisotropic": "0.1", +} +material_file = { + "elastic": "material_viscoelastic.yaml", + "viscoelastic": "material_viscoelastic.yaml", + "viscoelastic2": "material_viscoelastic.yaml", + "anisotropic": "material_anisotropic.yaml", +} +initial_condition = { + "elastic": "PlanarWave", + "viscoelastic": "PlanarWave", + "viscoelastic2": "PlanarWave", + "anisotropic": "SuperimposedPlanarwave", +} + +archs = [arch_name(p, host_arch[args.cluster]) for p in precision] +equations = args.equations + +if args.steps in ["build", "all"]: + if not os.path.exists("build_convergence"): + try: + os.mkdir("build_convergence") + except OSError as ex: + if ex.errno != errno.EEXIST: + raise + os.chdir("build_convergence") + for prec in precision: + for o in orders[args.cluster]: + compile_cmd = ( + "CC={} CXX={} FC={} cmake .. " + "-DCMAKE_BUILD_TYPE=Release " + "-DCOMMTHREAD={} " + "-DEQUATIONS={} " + "-DGEMM_TOOL_LIST=LIBXSMM,PSpaMM, " + "-DHOST_ARCH={} " + "-DNUMBER_OF_MECHANISMS={} " + "-DORDER={} " + "-DPRECISION={} " + "-DTESTING=OFF && make -j8".format( + compilers[args.cluster][0], + compilers[args.cluster][1], + compilers[args.cluster][2], + on_off(on_a_cluster[args.cluster]), + equations, + host_arch[args.cluster], + num_mechs(equations), + o, + prec, + ) + ) + print(compile_cmd) + try: + subprocess.check_output(compile_cmd, shell=True) + except subprocess.CalledProcessError as cpe: + print("Build command exited with : {}".format(cpe.returncode)) + print(cpe.output) + quit() + num_quantities = 9 + 6 * num_mechs(equations) + sn = seissol_name(arch_name(prec, host_arch[args.cluster]), equations, o, "Release") + copy_source = sn + copy_target = os.path.join(cwd, sn) + copy(copy_source, copy_target) + +os.chdir(cwd) + +if args.steps in ["prepare", "all"]: + for n in resolutions: + for scale in scales: + generate_cmd = ( + "cubeGenerator " + "-b 6 -x {0} -y {0} -z {0} " + "--px {1} --py {1} --pz {1} " + "-o {2}.nc -s {3}".format(2 ** n, parts[n], cube_name(n, scale), scale) + ) + os.system(generate_cmd) + parameters = Path("parameters.template").read_text() + parameters = parameters.replace("MESH_FILE", cube_name(n, scale_map[equations])) + parameters = parameters.replace("END_TIME", end_time[equations]) + parameters = parameters.replace("MATERIAL_FILE", material_file[equations]) + parameters = parameters.replace( + "INITIAL_CONDITION", initial_condition[equations] + ) + with open(par_name(equations, n), "w") as f: + f.write(parameters) + +if args.steps in ["run", "all"]: + for arch in archs: + for o in orders[args.cluster]: + for n in resolutions[args.cluster]: + log_file = os.path.join(log_dir, log_name(arch, equations, o, n)) + nodes = parts[n] ** 3 + job = Path("{}.template".format(args.cluster)).read_text() + if not on_a_cluster[args.cluster]: + run_cmd = "./{} {}".format( + seissol_name(arch, equations, o), par_name(equations, n) + ) + run_cmd += " > " + log_file + else: + run_cmd = "{} -n {} ./{} {}".format( + mpiexec[args.cluster], + nodes, + seissol_name(arch, equations, o), + par_name(equations, n), + ) + job = job.replace("PARTITION", partition(nodes, args.cluster)) + job = job.replace("NODES", str(nodes)) + job = job.replace("LOG_FILE", log_file) + job = job.replace("WORK_DIR", cwd) + file_name = os.path.join(job_dir, job_name(arch, equations, o, n)) + with open(file_name, "w") as f: + f.write(job) + f.write(run_cmd + "\n") + print( + "Created job file {}.".format(file_name) + ) + +if args.steps in ["analyse", "all"]: + with open(convergence_file, "w") as result_file: + result_file.write("arch,equations,order,h,norm,var,error\n") + for arch in archs: + for o in orders[args.cluster]: + for n in resolutions[args.cluster]: + log_file = os.path.join(log_dir, log_name(arch, equations, o, n)) + result = Path(log_file).read_text() + for line in result.split("\n"): + err = re.search( + "(LInf|L2|L1)\s*,\s+var\[\s*(\d+)\s*\]\s*=\s*([0-9\.eE+-]+)", + line, + ) + if err: + result_file.write( + "{},{},{},{},{}\n".format( + arch, + equations, + o, + resolution(equations, n), + ",".join([str(g) for g in err.groups()]), + ) + ) diff --git a/convergence/do_all_the_convergence_tests/error_plots.py b/convergence/do_all_the_convergence_tests/error_plots.py new file mode 100644 index 0000000..8eac0c8 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/error_plots.py @@ -0,0 +1,119 @@ +from argparse import ArgumentParser +import numpy as np +import pandas as pd +import os +import errno +import matplotlib.pyplot as plt + + +def plot_errors(errors_df, var, norm, output_prefix, interactive=False): + markers = ["o-", "v-", "^-", "s-", "P-", "X-"] + variable_names = [ + "\sigma_{xx}", + "\sigma_{yy}", + "\sigma_{zz}", + "\sigma_{xy}", + "\sigma_{yz}", + "\sigma_{xz}", + "u", + "v", + "w", + ] + variable_names_output = [ + "s_xx", + "s_yy", + "s_zz", + "s_xy", + "s_yz", + "s_xz", + "u", + "v", + "w", + ] + norm_to_latex = {"L1": "L^1", "L2": "L^2", "LInf": "L^\infty"} + + # use LaTeX fonts in the plot + plt.rcParams.update({"font.size": 32}) + + plt.rc("text", usetex=True) + plt.rc("font", family="serif") + plt.rc("figure", figsize=(20, 10)) + + fig, ax = plt.subplots() + for i, o in enumerate(pd.unique(errors_df["order"])): + err_df = errors_df[ + (errors_df["norm"] == norm) + & (errors_df["order"] == o) + & (errors_df["var"] == var) + ] + ax.plot( + err_df["h"].values, + err_df["error"].values, + markers[i], + label="order {}".format(o), + ) + + title = "${}$ norm of ${}$".format(norm_to_latex[norm], variable_names[var]) + ax.set_title(title) + + ax.set_xlabel("h") + ax.set_ylabel("error") + ax.set_yscale("log") + ax.set_xscale("log") + + tick_values = np.round(np.unique(errors_df["h"].values), 3) + ax.set_xticks(tick_values) + ax.set_xticklabels(tick_values) + plt.minorticks_off() + plt.legend() + + if interactive: + plt.show() + else: + plt.savefig( + output_prefix + "_{}_{}.pdf".format(variable_names_output[var], norm), + bbox_inches="tight", + ) + + +cmd_line_parser = ArgumentParser() +cmd_line_parser.add_argument("convergence_file", type=str, default="convergence.csv") +cmd_line_parser.add_argument("--output_dir", type=str, default="plots") +cmd_line_parser.add_argument( + "--var", type=int, default=-1, help="Specify the variable to plot, -1 for all" +) +cmd_line_parser.add_argument( + "--norm", type=str, default="LInf", help="Norm, can be LInf, L1, L2" +) +cmd_line_parser.add_argument("--arch", type=str, default="dhsw") +cmd_line_parser.add_argument("--equations", type=str, default="elastic") +args = cmd_line_parser.parse_args() + +try: + os.mkdir(args.output_dir) +except OSError as ex: + if ex.errno != errno.EEXIST: + raise + +convergence_df = pd.read_csv(args.convergence_file) +convergence_df = convergence_df[ + (convergence_df["arch"] == args.arch) + & (convergence_df["equations"] == args.equations) + & (convergence_df["norm"] == args.norm) +] + +if args.var == -1: + for v in pd.unique(convergence_df["var"]): + plot_errors( + convergence_df, + v, + args.norm, + os.path.join(args.output_dir, "{}_{}".format(args.arch, args.equations)), + ) +else: + plot_errors( + convergence_df, + args.var, + args.norm, + os.path.join(args.output_dir, "{}_{}".format(args.arch, args.equations)), + ) diff --git a/convergence/do_all_the_convergence_tests/error_table.py b/convergence/do_all_the_convergence_tests/error_table.py new file mode 100644 index 0000000..16938fd --- /dev/null +++ b/convergence/do_all_the_convergence_tests/error_table.py @@ -0,0 +1,49 @@ +from argparse import ArgumentParser +import numpy as np +import scipy.stats as sp_stat +import pandas as pd + + +def calculate_error_rates(errors_df): + rate_dicts = [] + for n in pd.unique(errors_df["norm"]): + for v in pd.unique(errors_df["var"]): + conv_df = errors_df[(errors_df["norm"] == n) & (errors_df["var"] == v)] + d = {"norm": n, "var": v} + resolutions = pd.unique(conv_df["h"]) + + for r in range(len(resolutions) - 1): + error_decay = ( + conv_df.loc[conv_df["h"] == resolutions[r], "error"].values[0] + / conv_df.loc[conv_df["h"] == resolutions[r + 1], "error"].values[0] + ) + rate = np.log(error_decay) / np.log(resolutions[r] / resolutions[r + 1]) + resolution_decay = "{}->{}".format( + np.round(resolutions[r], 3), np.round(resolutions[r + 1], 3) + ) + d.update({resolution_decay: rate}) + + regression = sp_stat.linregress(np.log(conv_df[["h", "error"]].values)) + d.update({"regression": regression.slope}) + + rate_dicts.append(d) + + rate_df = pd.DataFrame(rate_dicts) + return rate_df + +cmd_line_parser = ArgumentParser() +cmd_line_parser.add_argument("convergence_file", type=str, default="convergence.csv") +cmd_line_parser.add_argument("--order", type=int, default=3) +cmd_line_parser.add_argument("--arch", type=str, default="dhsw") +cmd_line_parser.add_argument("--equations", type=str, default="elastic") +args = cmd_line_parser.parse_args() + +convergence_df = pd.read_csv(args.convergence_file) +convergence_df = convergence_df[ + (convergence_df["order"] == args.order) + & (convergence_df["arch"] == args.arch) + & (convergence_df["equations"] == args.equations) +] + +rate_df = calculate_error_rates(convergence_df) +print(rate_df) diff --git a/convergence/do_all_the_convergence_tests/local.template b/convergence/do_all_the_convergence_tests/local.template new file mode 100644 index 0000000..c73f230 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/local.template @@ -0,0 +1,18 @@ +#!/bin/bash +cd WORK_DIR + +export MP_SINGLE_THREAD=no +unset KMP_AFFINITY +export OMP_NUM_THREADS=8 +export OMP_PLACES="cores(8)" + +export XDMFWRITER_ALIGNMENT=8388608 +export XDMFWRITER_BLOCK_SIZE=8388608 +export SC_CHECKPOINT_ALIGNMENT=8388608 + +export SEISSOL_CHECKPOINT_ALIGNMENT=8388608 +export SEISSOL_CHECKPOINT_DIRECT=1 +export ASYNC_MODE=THREAD +export ASYNC_BUFFER_ALIGNMENT=8388608 + +ulimit -Ss unlimited diff --git a/convergence/do_all_the_convergence_tests/material_anisotropic.yaml b/convergence/do_all_the_convergence_tests/material_anisotropic.yaml new file mode 100644 index 0000000..e06cdc9 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/material_anisotropic.yaml @@ -0,0 +1,26 @@ +!ConstantMap +map: + rho: 1. + c11: 192.0 + c12: 66.0 + c13: 60.0 + c14: 0.0 + c15: 0.0 + c16: 0.0 + c22: 160.0 + c23: 56.0 + c24: 0.0 + c25: 0.0 + c26: 0.0 + c33: 272.0 + c34: 0.0 + c35: 0.0 + c36: 0.0 + c44: 60.0 + c45: 0.0 + c46: 0.0 + c55: 62.0 + c56: 0.0 + c66: 49.6 + + diff --git a/convergence/do_all_the_convergence_tests/material_viscoelastic.yaml b/convergence/do_all_the_convergence_tests/material_viscoelastic.yaml new file mode 100644 index 0000000..54e2db3 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/material_viscoelastic.yaml @@ -0,0 +1,8 @@ +!ConstantMap +map: + rho: 1. + mu: 1. + lambda: 2. + Qp: 20. + Qs: 10. + diff --git a/convergence/do_all_the_convergence_tests/parameters.template b/convergence/do_all_the_convergence_tests/parameters.template new file mode 100644 index 0000000..7dcab9c --- /dev/null +++ b/convergence/do_all_the_convergence_tests/parameters.template @@ -0,0 +1,55 @@ +&Equations +MaterialFileName = 'MATERIAL_FILE' +FreqCentral = 1. +FreqRatio = 100. +/ + +&IniCondition ! no initial condition +cICType = 'INITIAL_CONDITION' +/ + +&Boundaries ! activate boundary conditions: +BC_pe = 1 ! Periodic boundaries +/ + +&SourceType +/ + +&SpongeLayer +/ + +&MeshNml +MeshFile = 'MESH_FILE' ! Name of mesh file +meshgenerator = 'Netcdf' ! Name of meshgenerator (format) +/ + +&Discretization +Order = 6 ! Order of accuracy in space and times +Material = 1 ! Material order +CFL = 0.5 ! CFL number (<=1.0) +FixTimeStep = 5 ! Manualy chosen minimum time +ClusteredLts = 1 +/ + +&Output +OutputFile = 'output/conv' +iOutputMask = 1 1 1 1 1 1 1 1 1 ! Variables ouptut +iOutputMaskMaterial = 1 1 1 ! Material output +Format = 10 ! Format (0=IDL, 1=TECPLOT, 2=IBM DX, 4=GiD)) +Interval = 200 ! Index of printed info at timesteps +TimeInterval = 0.01 ! Index of printed info at time +printIntervalCriterion = 2 ! Criterion for index of printed info: 1=timesteps,2=time,3=timesteps+time +pickdt = 0.05 ! Pickpoint Sampling +pickDtType = 1 ! Pickpoint Type +nRecordPoints = 0 ! number of Record points which are read from file +/ + +&Postprocessing +/ + +&AbortCriteria +EndTime = END_TIME +/ + +&Debugging +/ diff --git a/convergence/do_all_the_convergence_tests/supermuc.template b/convergence/do_all_the_convergence_tests/supermuc.template new file mode 100644 index 0000000..3c36666 --- /dev/null +++ b/convergence/do_all_the_convergence_tests/supermuc.template @@ -0,0 +1,47 @@ +#!/bin/bash +#SBATCH -J SeisSol_LOG_NAME +#Output and error (also --output, --error): +#SBATCH -o WORKDIR/LOG_FILE +#SBATCH -e WORKDIR/LOG_FILE + +#Initial working directory (also --chdir): +#SBATCH --workdir=WORK_DIR + +#Notification and type +#SBATCH --mail-type=END +#SBATCH --mail-user=USER_TODO + +# Wall clock limit: +#SBATCH --time=10:00 +#SBATCH --no-requeue + +#Setup of execution environment +#SBATCH --export=ALL +#SBATCH --account=USER_TODO +#constraints are optional +#--constraint="scratch&work" +#SBATCH --partition=PARTITION + +#Number of nodes and MPI tasks per node: +#SBATCH --nodes=NODES +#SBATCH --ntasks-per-node=1 + +module load slurm_setup +#Run the program: +export MP_SINGLE_THREAD=no +unset KMP_AFFINITY +export OMP_NUM_THREADS=94 +export OMP_PLACES="cores(47)" + +export XDMFWRITER_ALIGNMENT=8388608 +export XDMFWRITER_BLOCK_SIZE=8388608 +export SC_CHECKPOINT_ALIGNMENT=8388608 + +export SEISSOL_CHECKPOINT_ALIGNMENT=8388608 +export SEISSOL_CHECKPOINT_DIRECT=1 +export ASYNC_MODE=THREAD +export ASYNC_BUFFER_ALIGNMENT=8388608 +source /etc/profile.d/modules.sh + +echo $SLURM_NTASKS +ulimit -Ss unlimited