From 782c50bcb87a8bb5203d06785e0994f40cd07f36 Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Mon, 23 Jun 2025 13:02:24 -0600 Subject: [PATCH 01/12] tool to convert native sampling to structured ascii output. one text file per sampler per directory. --- ...ert_native_sampling_to_structured_ascii.py | 93 +++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 tools/convert_native_sampling_to_structured_ascii.py diff --git a/tools/convert_native_sampling_to_structured_ascii.py b/tools/convert_native_sampling_to_structured_ascii.py new file mode 100644 index 0000000000..05cd695ff1 --- /dev/null +++ b/tools/convert_native_sampling_to_structured_ascii.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 + +import numpy as np +import matplotlib.pyplot as plt +import sys +import glob +import os +AMR_WIND_PATH = 'amr-wind' +loc_dir = "." +sampling_label = "sampling" +pp_dir = "post_processing" + +########## Input arguments from command line ############## +narg = len(sys.argv) +print("Total input arguments passed:", narg-1) + +print("\nArguments passed:", end = " ") +for i in range(1, narg): + print(sys.argv[i], end = " ") + +if (narg > 1): + loc_dir = sys.argv[1] +if (narg > 2): + AMR_WIND_PATH = sys.argv[2] +if (narg > 3): + pp_dir = sys.argv[3] +if (narg > 4): + sampling_label = sys.argv[4] +if (narg > 5): + print("WARNING: routine only takes 4 arguments maximum. Ignoring additional arguments") + +print("\nParameters to be used:") +print(" run directory path: ", os.path.realpath(loc_dir)) +print(" AMR-Wind path: ", os.path.realpath(AMR_WIND_PATH)) +print(" post-processing directory name: ", pp_dir) +print(" top-level sampling label name: ", sampling_label) +########################################################### + +sys.path.append(AMR_WIND_PATH+'/tools/') +import amrex_particle +from amrex_particle import AmrexParticleFile + +pp_dir = loc_dir + "/" + pp_dir +pfile = AmrexParticleFile(loc_dir) + +os.chdir(pp_dir) +contents = glob.glob(sampling_label + "*") +directories = [path for path in contents if os.path.isdir(path)] +sdirs = sorted(directories) + +max_dir = len(sdirs)-1 +max_step = int(sdirs[max_dir][len(sampling_label):len(sdirs[max_dir])]) + +for n in range(len(sdirs)): + step = int(sdirs[n][len(sampling_label):len(sdirs[n])]) + print(str(step) + " out of " + str(max_step)) + pt = pfile.load(step, label=sampling_label, root_dir = ".") + pt.parse_header() + pt.parse_info() + pt.load_binary_data() + data = pt.df + samplers = pt.info["samplers"] + + t_str = str(pt.info["time"]) + v_str = "x y z" + + for rv in range(pt.num_reals): + v_str += " " + pt.real_var_names[rv] + for iv in range(pt.num_ints - 3): + v_str += " " + pt.int_var_names[iv + 3] + + header = "t = " + t_str + "\n" + v_str + + # Have to go with max possible size, subsampler size not stored + output = np.zeros((pt.num_particles, 3 + pt.num_reals + pt.num_ints - 3)) + + for s in range(len(samplers)): + np_s = 0 + for p in range(pt.num_particles): + if data.set_id[p] == s: + np_s += 1 + pid = data.probe_id[p] + output[pid, 0] = data.xco[p] + output[pid, 1] = data.yco[p] + output[pid, 2] = data.zco[p] + for rv in range(pt.num_reals): + output[pid, 3+rv] = data[pt.real_var_names[rv]][p] + for iv in range(pt.num_ints - 3): + output[pid, 3+pt.num_reals+iv] = data[pt.int_var_names[iv+3]][p] + + + fname = sdirs[n] + "_" + samplers[s]["label"]+ ".txt" + np.savetxt(fname,output[0:np_s,:],header=header) From 6f612a7eeb457f4b190ab88815be1af988a780ad Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Mon, 23 Jun 2025 14:57:46 -0600 Subject: [PATCH 02/12] tool to convert single particle from native to ascii time series --- tools/convert_native_sample_to_time_series.py | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 tools/convert_native_sample_to_time_series.py diff --git a/tools/convert_native_sample_to_time_series.py b/tools/convert_native_sample_to_time_series.py new file mode 100644 index 0000000000..1a196817a3 --- /dev/null +++ b/tools/convert_native_sample_to_time_series.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 + +import numpy as np +import matplotlib.pyplot as plt +import sys +import glob +import os +AMR_WIND_PATH = 'amr-wind' +loc_dir = "." +sampling_label = "sampling" +pp_dir = "post_processing" +sp_index = 0 +s_name = "* will use first *" + +########## Input arguments from command line ############## +narg = len(sys.argv) +print("Total input arguments passed:", narg-1) + +print("\nArguments passed:", end = " ") +for i in range(1, narg): + print(sys.argv[i], end = " ") + +if (narg > 1): + loc_dir = sys.argv[1] +if (narg > 2): + AMR_WIND_PATH = sys.argv[2] +if (narg > 3): + pp_dir = sys.argv[3] +if (narg > 4): + sampling_label = sys.argv[4] +if (narg > 5): + s_name = sys.argv[5] +if (narg > 6): + sp_index = sys.argv[6] +if (narg > 7): + print("WARNING: routine only takes 6 arguments maximum. Ignoring additional arguments") + +print("\nParameters to be used:") +print(" run directory path: ", os.path.realpath(loc_dir)) +print(" AMR-Wind path: ", os.path.realpath(AMR_WIND_PATH)) +print(" post-processing directory name: ", pp_dir) +print(" top-level sampling label name: ", sampling_label) +print(" bottom-level sampling label name: ", s_name) +print(" particle index within sampler: ", sp_index) +########################################################### + +sys.path.append(AMR_WIND_PATH+'/tools/') +import amrex_particle +from amrex_particle import AmrexParticleFile + +pp_dir = loc_dir + "/" + pp_dir +pfile = AmrexParticleFile(loc_dir) + +os.chdir(pp_dir) +contents = glob.glob(sampling_label + "*") +directories = [path for path in contents if os.path.isdir(path)] +sdirs = sorted(directories) + +# Separate by length, then order again (because 10000 would be next to 100000) +max_len = len(sdirs[0]) +min_len = max_len +for n in range(len(sdirs)): + max_len = max(max_len,len(sdirs[n])) + min_len = min(min_len,len(sdirs[n])) + +sdirs_init = list(sdirs) +if (max_len != min_len): + cur_len = min_len + idx = 0 + for inc_len in range(max_len - min_len + 1): + cur_len += inc_len + for n in range(len(sdirs_init)): + if (len(sdirs_init[n]) == cur_len): + sdirs[idx] = sdirs_init[n] + idx += 1 + +n_dir = len(sdirs) +max_step = int(sdirs[n_dir-1][len(sampling_label):len(sdirs[n_dir-1])]) + +# Get stuff for header and array sizes using first available directory +n0 = 0 +step = int(sdirs[n0][len(sampling_label):len(sdirs[n0])]) +pt = pfile.load(step, label=sampling_label, root_dir = ".") +pt.parse_header() +pt.parse_info() +output = np.zeros((n_dir, 1 + 3 + pt.num_reals + pt.num_ints - 3)) +v_str = "x y z" +for rv in range(pt.num_reals): + v_str += " " + pt.real_var_names[rv] +for iv in range(pt.num_ints - 3): + v_str += " " + pt.int_var_names[iv + 3] +header = "t " + v_str +samplers = pt.info["samplers"] +s_index = 0 +if (s_name[0] != "*"): + for s in range(len(samplers)): + if (s_name == samplers[s]["label"]): + s_index = s +else: + s_name = samplers[0]["label"] + +fname = sampling_label + "_" + s_name + "_" + str(sp_index) + "_time_series.txt" +print("Output file name: ", fname) + +# Loop through files to get time series data +for n in range(n_dir): + step = int(sdirs[n][len(sampling_label):len(sdirs[n])]) + print(str(step) + " out of " + str(max_step)) + pt = pfile.load(step, label=sampling_label, root_dir = ".") + pt.parse_header() + pt.parse_info() + pt.load_binary_data() + data = pt.df + + output[n, 0] = pt.info["time"] + + for p in range(pt.num_particles): + if (data.set_id[p] == s_index and data.probe_id[p] == sp_index): + output[n, 1] = data.xco[p] + output[n, 2] = data.yco[p] + output[n, 3] = data.zco[p] + for rv in range(pt.num_reals): + output[n, 4+rv] = data[pt.real_var_names[rv]][p] + for iv in range(pt.num_ints - 3): + output[n, 4+pt.num_reals+iv] = data[pt.int_var_names[iv+3]][p] + +np.savetxt(fname, output[0:len(sdirs)],header=header) \ No newline at end of file From 6f681d8ac6f9d15d0dee41079c08ad89204ff370 Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Mon, 23 Jun 2025 15:32:17 -0600 Subject: [PATCH 03/12] upgrade one of the scripts - much better parsing --- tools/convert_native_sample_to_time_series.py | 246 ++++++++++-------- 1 file changed, 137 insertions(+), 109 deletions(-) diff --git a/tools/convert_native_sample_to_time_series.py b/tools/convert_native_sample_to_time_series.py index 1a196817a3..e6917f5859 100644 --- a/tools/convert_native_sample_to_time_series.py +++ b/tools/convert_native_sample_to_time_series.py @@ -1,127 +1,155 @@ -#!/usr/bin/env python3 - import numpy as np import matplotlib.pyplot as plt import sys import glob import os -AMR_WIND_PATH = 'amr-wind' -loc_dir = "." -sampling_label = "sampling" -pp_dir = "post_processing" -sp_index = 0 -s_name = "* will use first *" +import argparse -########## Input arguments from command line ############## -narg = len(sys.argv) -print("Total input arguments passed:", narg-1) +def main(): + """Convert native data for a single sampler point to a time series in ASCII.""" + parser = argparse.ArgumentParser( + description="Convert native data for a single sampler point to a time series in ASCII" + ) + parser.add_argument( + "-aw", + "--amr_wind_path", + help="path to amr-wind directory", + required=True, + type=str, + ) + parser.add_argument( + "-d", + "--run_dir", + help="Directory where run took place, where post-processing directory is", + required=True, + type=str, + ) + parser.add_argument( + "-p", + "--post_proc_dir", + help="Name of post-processing directory", + default="post_processing", + type=str, + ) + parser.add_argument( + "-l", + "--label", + help="Top-level sampling label name", + default="sampling", + type=str, + ) + parser.add_argument( + "-s", + "--sampler", + help="Bottom-level sampling label name", + default="* use first *", + type=str, + ) + parser.add_argument( + "-i", + "--index", + help="Particle index within specified sampler", + default=0, + type=int, + ) + args = parser.parse_args() -print("\nArguments passed:", end = " ") -for i in range(1, narg): - print(sys.argv[i], end = " ") + ########## Input arguments from command line ############## + print("\nParameters to be used:") + print(" run directory path: ", os.path.realpath(args.run_dir)) + print(" AMR-Wind path: ", os.path.realpath(args.amr_wind_path)) + print(" post-processing directory name: ", args.post_proc_dir) + print(" top-level sampling label name: ", args.label) + ########################################################### -if (narg > 1): - loc_dir = sys.argv[1] -if (narg > 2): - AMR_WIND_PATH = sys.argv[2] -if (narg > 3): - pp_dir = sys.argv[3] -if (narg > 4): - sampling_label = sys.argv[4] -if (narg > 5): - s_name = sys.argv[5] -if (narg > 6): - sp_index = sys.argv[6] -if (narg > 7): - print("WARNING: routine only takes 6 arguments maximum. Ignoring additional arguments") + sys.path.append(args.amr_wind_path+'/tools/') + import amrex_particle + from amrex_particle import AmrexParticleFile -print("\nParameters to be used:") -print(" run directory path: ", os.path.realpath(loc_dir)) -print(" AMR-Wind path: ", os.path.realpath(AMR_WIND_PATH)) -print(" post-processing directory name: ", pp_dir) -print(" top-level sampling label name: ", sampling_label) -print(" bottom-level sampling label name: ", s_name) -print(" particle index within sampler: ", sp_index) -########################################################### + post_proc_dir = args.run_dir + "/" + args.post_proc_dir + pfile = AmrexParticleFile(args.run_dir) -sys.path.append(AMR_WIND_PATH+'/tools/') -import amrex_particle -from amrex_particle import AmrexParticleFile + os.chdir(post_proc_dir) + contents = glob.glob(args.label + "*") + directories = [path for path in contents if os.path.isdir(path)] + sdirs = sorted(directories) -pp_dir = loc_dir + "/" + pp_dir -pfile = AmrexParticleFile(loc_dir) + # Separate by length, then order again (because 10000 would be next to 100000) + max_len = len(sdirs[0]) + min_len = max_len + for n in range(len(sdirs)): + max_len = max(max_len,len(sdirs[n])) + min_len = min(min_len,len(sdirs[n])) -os.chdir(pp_dir) -contents = glob.glob(sampling_label + "*") -directories = [path for path in contents if os.path.isdir(path)] -sdirs = sorted(directories) + sdirs_init = list(sdirs) + if (max_len != min_len): + cur_len = min_len + idx = 0 + for inc_len in range(max_len - min_len + 1): + cur_len += inc_len + for n in range(len(sdirs_init)): + if (len(sdirs_init[n]) == cur_len): + sdirs[idx] = sdirs_init[n] + idx += 1 -# Separate by length, then order again (because 10000 would be next to 100000) -max_len = len(sdirs[0]) -min_len = max_len -for n in range(len(sdirs)): - max_len = max(max_len,len(sdirs[n])) - min_len = min(min_len,len(sdirs[n])) + n_dir = len(sdirs) + max_step = int(sdirs[n_dir-1][len(args.label):len(sdirs[n_dir-1])]) -sdirs_init = list(sdirs) -if (max_len != min_len): - cur_len = min_len - idx = 0 - for inc_len in range(max_len - min_len + 1): - cur_len += inc_len - for n in range(len(sdirs_init)): - if (len(sdirs_init[n]) == cur_len): - sdirs[idx] = sdirs_init[n] - idx += 1 + # Get stuff for header and array sizes using first available directory + n0 = 0 + step = int(sdirs[n0][len(args.label):len(sdirs[n0])]) + pt = pfile.load(step, label=args.label, root_dir = ".") + pt.parse_header() + pt.parse_info() + output = np.zeros((n_dir, 1 + 3 + pt.num_reals + pt.num_ints - 3)) + v_str = "x y z" + for rv in range(pt.num_reals): + v_str += " " + pt.real_var_names[rv] + for iv in range(pt.num_ints - 3): + v_str += " " + pt.int_var_names[iv + 3] + header = "t " + v_str + samplers = pt.info["samplers"] + s_index = 0 + if (args.sampler[0] != "*"): + for s in range(len(samplers)): + if (args.sampler == samplers[s]["label"]): + s_index = s + else: + args.sampler = samplers[0]["label"] -n_dir = len(sdirs) -max_step = int(sdirs[n_dir-1][len(sampling_label):len(sdirs[n_dir-1])]) + ########## Input arguments from command line ############## + print(" bottom-level sampling label name: ", args.sampler) + print(" particle index within sampler: ", args.index) + ########################################################### -# Get stuff for header and array sizes using first available directory -n0 = 0 -step = int(sdirs[n0][len(sampling_label):len(sdirs[n0])]) -pt = pfile.load(step, label=sampling_label, root_dir = ".") -pt.parse_header() -pt.parse_info() -output = np.zeros((n_dir, 1 + 3 + pt.num_reals + pt.num_ints - 3)) -v_str = "x y z" -for rv in range(pt.num_reals): - v_str += " " + pt.real_var_names[rv] -for iv in range(pt.num_ints - 3): - v_str += " " + pt.int_var_names[iv + 3] -header = "t " + v_str -samplers = pt.info["samplers"] -s_index = 0 -if (s_name[0] != "*"): - for s in range(len(samplers)): - if (s_name == samplers[s]["label"]): - s_index = s -else: - s_name = samplers[0]["label"] + fname = args.label + "_" + args.sampler + "_" + str(args.index) + "_time_series.txt" + print("Output file name: ", fname) + print("Output file name with path: ", os.path.realpath(fname)) + print() -fname = sampling_label + "_" + s_name + "_" + str(sp_index) + "_time_series.txt" -print("Output file name: ", fname) + # Loop through files to get time series data + for n in range(n_dir): + step = int(sdirs[n][len(args.label):len(sdirs[n])]) + print(str(step) + " out of " + str(max_step)) + pt = pfile.load(step, label=args.label, root_dir = ".") + pt.parse_header() + pt.parse_info() + pt.load_binary_data() + data = pt.df + + output[n, 0] = pt.info["time"] + + for p in range(pt.num_particles): + if (data.set_id[p] == s_index and data.probe_id[p] == args.index): + output[n, 1] = data.xco[p] + output[n, 2] = data.yco[p] + output[n, 3] = data.zco[p] + for rv in range(pt.num_reals): + output[n, 4+rv] = data[pt.real_var_names[rv]][p] + for iv in range(pt.num_ints - 3): + output[n, 4+pt.num_reals+iv] = data[pt.int_var_names[iv+3]][p] + + np.savetxt(fname, output[0:len(sdirs)],header=header) -# Loop through files to get time series data -for n in range(n_dir): - step = int(sdirs[n][len(sampling_label):len(sdirs[n])]) - print(str(step) + " out of " + str(max_step)) - pt = pfile.load(step, label=sampling_label, root_dir = ".") - pt.parse_header() - pt.parse_info() - pt.load_binary_data() - data = pt.df - - output[n, 0] = pt.info["time"] - - for p in range(pt.num_particles): - if (data.set_id[p] == s_index and data.probe_id[p] == sp_index): - output[n, 1] = data.xco[p] - output[n, 2] = data.yco[p] - output[n, 3] = data.zco[p] - for rv in range(pt.num_reals): - output[n, 4+rv] = data[pt.real_var_names[rv]][p] - for iv in range(pt.num_ints - 3): - output[n, 4+pt.num_reals+iv] = data[pt.int_var_names[iv+3]][p] - -np.savetxt(fname, output[0:len(sdirs)],header=header) \ No newline at end of file +if __name__ == "__main__": + main() \ No newline at end of file From c570c171419fb4f31057ef855960f36b792408ef Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Mon, 23 Jun 2025 15:39:13 -0600 Subject: [PATCH 04/12] same for the other script --- ...ert_native_sampling_to_structured_ascii.py | 171 ++++++++++-------- 1 file changed, 93 insertions(+), 78 deletions(-) diff --git a/tools/convert_native_sampling_to_structured_ascii.py b/tools/convert_native_sampling_to_structured_ascii.py index 05cd695ff1..7d9b18105f 100644 --- a/tools/convert_native_sampling_to_structured_ascii.py +++ b/tools/convert_native_sampling_to_structured_ascii.py @@ -1,93 +1,108 @@ -#!/usr/bin/env python3 - import numpy as np import matplotlib.pyplot as plt import sys import glob import os -AMR_WIND_PATH = 'amr-wind' -loc_dir = "." -sampling_label = "sampling" -pp_dir = "post_processing" - -########## Input arguments from command line ############## -narg = len(sys.argv) -print("Total input arguments passed:", narg-1) - -print("\nArguments passed:", end = " ") -for i in range(1, narg): - print(sys.argv[i], end = " ") +import argparse -if (narg > 1): - loc_dir = sys.argv[1] -if (narg > 2): - AMR_WIND_PATH = sys.argv[2] -if (narg > 3): - pp_dir = sys.argv[3] -if (narg > 4): - sampling_label = sys.argv[4] -if (narg > 5): - print("WARNING: routine only takes 4 arguments maximum. Ignoring additional arguments") +def main(): + """Convert native data for a single sampler point to a time series in ASCII.""" + parser = argparse.ArgumentParser( + description="Convert native data for a single sampler point to a time series in ASCII" + ) + parser.add_argument( + "-aw", + "--amr_wind_path", + help="path to amr-wind directory", + required=True, + type=str, + ) + parser.add_argument( + "-d", + "--run_dir", + help="Directory where run took place, where post-processing directory is", + required=True, + type=str, + ) + parser.add_argument( + "-p", + "--post_proc_dir", + help="Name of post-processing directory", + default="post_processing", + type=str, + ) + parser.add_argument( + "-l", + "--label", + help="Top-level sampling label name", + default="sampling", + type=str, + ) + args = parser.parse_args() -print("\nParameters to be used:") -print(" run directory path: ", os.path.realpath(loc_dir)) -print(" AMR-Wind path: ", os.path.realpath(AMR_WIND_PATH)) -print(" post-processing directory name: ", pp_dir) -print(" top-level sampling label name: ", sampling_label) -########################################################### + ########## Input arguments from command line ############## + print("\nParameters to be used:") + print(" run directory path: ", os.path.realpath(args.run_dir)) + print(" AMR-Wind path: ", os.path.realpath(args.amr_wind_path)) + print(" post-processing directory name: ", args.post_proc_dir) + print(" top-level sampling label name: ", args.label) + ########################################################### -sys.path.append(AMR_WIND_PATH+'/tools/') -import amrex_particle -from amrex_particle import AmrexParticleFile + sys.path.append(args.amr_wind_path+'/tools/') + import amrex_particle + from amrex_particle import AmrexParticleFile -pp_dir = loc_dir + "/" + pp_dir -pfile = AmrexParticleFile(loc_dir) + post_proc_dir = args.run_dir + "/" + args.post_proc_dir + pfile = AmrexParticleFile(args.run_dir) -os.chdir(pp_dir) -contents = glob.glob(sampling_label + "*") -directories = [path for path in contents if os.path.isdir(path)] -sdirs = sorted(directories) + os.chdir(post_proc_dir) + contents = glob.glob(args.label + "*") + directories = [path for path in contents if os.path.isdir(path)] + sdirs = sorted(directories) -max_dir = len(sdirs)-1 -max_step = int(sdirs[max_dir][len(sampling_label):len(sdirs[max_dir])]) + max_dir = len(sdirs)-1 + max_step = int(sdirs[max_dir][len(args.label):len(sdirs[max_dir])]) -for n in range(len(sdirs)): - step = int(sdirs[n][len(sampling_label):len(sdirs[n])]) - print(str(step) + " out of " + str(max_step)) - pt = pfile.load(step, label=sampling_label, root_dir = ".") - pt.parse_header() - pt.parse_info() - pt.load_binary_data() - data = pt.df - samplers = pt.info["samplers"] - - t_str = str(pt.info["time"]) - v_str = "x y z" - - for rv in range(pt.num_reals): - v_str += " " + pt.real_var_names[rv] - for iv in range(pt.num_ints - 3): - v_str += " " + pt.int_var_names[iv + 3] + for n in range(len(sdirs)): + step = int(sdirs[n][len(args.label):len(sdirs[n])]) + print(str(step) + " out of " + str(max_step)) + pt = pfile.load(step, label=args.label, root_dir = ".") + pt.parse_header() + pt.parse_info() + pt.load_binary_data() + data = pt.df + samplers = pt.info["samplers"] + + t_str = str(pt.info["time"]) + v_str = "x y z" + + for rv in range(pt.num_reals): + v_str += " " + pt.real_var_names[rv] + for iv in range(pt.num_ints - 3): + v_str += " " + pt.int_var_names[iv + 3] - header = "t = " + t_str + "\n" + v_str + header = "t = " + t_str + "\n" + v_str + + # Have to go with max possible size, subsampler size not stored + output = np.zeros((pt.num_particles, 3 + pt.num_reals + pt.num_ints - 3)) - # Have to go with max possible size, subsampler size not stored - output = np.zeros((pt.num_particles, 3 + pt.num_reals + pt.num_ints - 3)) - - for s in range(len(samplers)): - np_s = 0 - for p in range(pt.num_particles): - if data.set_id[p] == s: - np_s += 1 - pid = data.probe_id[p] - output[pid, 0] = data.xco[p] - output[pid, 1] = data.yco[p] - output[pid, 2] = data.zco[p] - for rv in range(pt.num_reals): - output[pid, 3+rv] = data[pt.real_var_names[rv]][p] - for iv in range(pt.num_ints - 3): - output[pid, 3+pt.num_reals+iv] = data[pt.int_var_names[iv+3]][p] - + for s in range(len(samplers)): + np_s = 0 + for p in range(pt.num_particles): + if data.set_id[p] == s: + np_s += 1 + pid = data.probe_id[p] + output[pid, 0] = data.xco[p] + output[pid, 1] = data.yco[p] + output[pid, 2] = data.zco[p] + for rv in range(pt.num_reals): + output[pid, 3+rv] = data[pt.real_var_names[rv]][p] + for iv in range(pt.num_ints - 3): + output[pid, 3+pt.num_reals+iv] = data[pt.int_var_names[iv+3]][p] + + + fname = sdirs[n] + "_" + samplers[s]["label"]+ ".txt" + np.savetxt(fname,output[0:np_s,:],header=header) - fname = sdirs[n] + "_" + samplers[s]["label"]+ ".txt" - np.savetxt(fname,output[0:np_s,:],header=header) +if __name__ == "__main__": + main() From 2238989ca00015d98a84c7c6d074cae720226987 Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Mon, 23 Jun 2025 15:46:39 -0600 Subject: [PATCH 05/12] adding some aborts for bad names --- tools/convert_native_sample_to_time_series.py | 5 ++++- tools/convert_native_sampling_to_structured_ascii.py | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/convert_native_sample_to_time_series.py b/tools/convert_native_sample_to_time_series.py index e6917f5859..2f6954f4aa 100644 --- a/tools/convert_native_sample_to_time_series.py +++ b/tools/convert_native_sample_to_time_series.py @@ -73,6 +73,10 @@ def main(): contents = glob.glob(args.label + "*") directories = [path for path in contents if os.path.isdir(path)] sdirs = sorted(directories) + n_dir = len(sdirs) + if (n_dir == 0): + print("ERROR: No matching sampling directories found, exiting!") + sys.exit(1) # Separate by length, then order again (because 10000 would be next to 100000) max_len = len(sdirs[0]) @@ -92,7 +96,6 @@ def main(): sdirs[idx] = sdirs_init[n] idx += 1 - n_dir = len(sdirs) max_step = int(sdirs[n_dir-1][len(args.label):len(sdirs[n_dir-1])]) # Get stuff for header and array sizes using first available directory diff --git a/tools/convert_native_sampling_to_structured_ascii.py b/tools/convert_native_sampling_to_structured_ascii.py index 7d9b18105f..4843cfbb72 100644 --- a/tools/convert_native_sampling_to_structured_ascii.py +++ b/tools/convert_native_sampling_to_structured_ascii.py @@ -61,6 +61,10 @@ def main(): sdirs = sorted(directories) max_dir = len(sdirs)-1 + print(max_dir) + if (max_dir < 0): + print("ERROR: No matching sampling directories found, exiting!") + sys.exit(1) max_step = int(sdirs[max_dir][len(args.label):len(sdirs[max_dir])]) for n in range(len(sdirs)): From 6a8b3bb724e971aa77eea3a2097dda6d463c33f9 Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Thu, 26 Jun 2025 09:08:52 -0600 Subject: [PATCH 06/12] correcting description --- tools/convert_native_sampling_to_structured_ascii.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/convert_native_sampling_to_structured_ascii.py b/tools/convert_native_sampling_to_structured_ascii.py index 4843cfbb72..e3b8f9a1b7 100644 --- a/tools/convert_native_sampling_to_structured_ascii.py +++ b/tools/convert_native_sampling_to_structured_ascii.py @@ -6,9 +6,9 @@ import argparse def main(): - """Convert native data for a single sampler point to a time series in ASCII.""" + """Convert native sampling data to a structured ASCII file for every output step""" parser = argparse.ArgumentParser( - description="Convert native data for a single sampler point to a time series in ASCII" + description="Convert native sampling data to a structured ASCII file for every output step" ) parser.add_argument( "-aw", From 91655663c90e769480826421e85512f4459b7d0d Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Thu, 26 Jun 2025 11:45:52 -0600 Subject: [PATCH 07/12] small tweaks, including checking against sampler index in file --- tools/convert_native_sampling_to_structured_ascii.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tools/convert_native_sampling_to_structured_ascii.py b/tools/convert_native_sampling_to_structured_ascii.py index e3b8f9a1b7..f3f2d0ef94 100644 --- a/tools/convert_native_sampling_to_structured_ascii.py +++ b/tools/convert_native_sampling_to_structured_ascii.py @@ -6,9 +6,9 @@ import argparse def main(): - """Convert native sampling data to a structured ASCII file for every output step""" + """Convert native sampling data to a structured ASCII file for every sampler and output step""" parser = argparse.ArgumentParser( - description="Convert native sampling data to a structured ASCII file for every output step" + description="Convert native sampling data to a structured ASCII file for every sampler and output step" ) parser.add_argument( "-aw", @@ -76,6 +76,10 @@ def main(): pt.load_binary_data() data = pt.df samplers = pt.info["samplers"] + if (n == 0): + print("sampler labels found in first step:") + for sampler in samplers: + print(" " + sampler["label"]) t_str = str(pt.info["time"]) v_str = "x y z" @@ -93,7 +97,7 @@ def main(): for s in range(len(samplers)): np_s = 0 for p in range(pt.num_particles): - if data.set_id[p] == s: + if data.set_id[p] == samplers[s]["index"]: np_s += 1 pid = data.probe_id[p] output[pid, 0] = data.xco[p] From a62267ba80d9db8d0c434ed66b3462c09ba47ae9 Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Thu, 26 Jun 2025 13:26:28 -0600 Subject: [PATCH 08/12] improve print statements and add fixed length (number of digits) input --- ...ert_native_sampling_to_structured_ascii.py | 37 ++++++++++++++++--- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/tools/convert_native_sampling_to_structured_ascii.py b/tools/convert_native_sampling_to_structured_ascii.py index f3f2d0ef94..80ceb092e5 100644 --- a/tools/convert_native_sampling_to_structured_ascii.py +++ b/tools/convert_native_sampling_to_structured_ascii.py @@ -38,6 +38,13 @@ def main(): default="sampling", type=str, ) + parser.add_argument( + "-n", + "--number_of_digits", + help="Fixed number of digits denoting output step", + default=0, + type=int, + ) args = parser.parse_args() ########## Input arguments from command line ############## @@ -60,26 +67,46 @@ def main(): directories = [path for path in contents if os.path.isdir(path)] sdirs = sorted(directories) - max_dir = len(sdirs)-1 - print(max_dir) + # if fixed length is specified, prune directories that do not fit + skip_flag = np.zeros(len(sdirs)) + ndir = len(sdirs) + max_dir = ndir - 1 + if (args.number_of_digits > 0): + ndir = 0 + max_dir = ndir - 1 + for n in range(len(sdirs)): + if (int(len(sdirs[n])) != int(len(args.label) + args.number_of_digits)): + skip_flag[n] = 1 + else: + ndir += 1 + max_dir = n + + print("Number of folders to read: " + str(ndir)) if (max_dir < 0): print("ERROR: No matching sampling directories found, exiting!") sys.exit(1) max_step = int(sdirs[max_dir][len(args.label):len(sdirs[max_dir])]) + samplers_printed = False for n in range(len(sdirs)): + if (skip_flag[n] == 1): + continue + step = int(sdirs[n][len(args.label):len(sdirs[n])]) - print(str(step) + " out of " + str(max_step)) + if (samplers_printed): + print(str(step) + " out of " + str(max_step)) pt = pfile.load(step, label=args.label, root_dir = ".") pt.parse_header() pt.parse_info() pt.load_binary_data() data = pt.df samplers = pt.info["samplers"] - if (n == 0): - print("sampler labels found in first step:") + if (not samplers_printed): + samplers_printed = True + print("Sampler labels found in first step:") for sampler in samplers: print(" " + sampler["label"]) + print("Reading folder " + str(step) + " out of " + str(max_step)) t_str = str(pt.info["time"]) v_str = "x y z" From 58766bece1a6275b0558ab2ff63daa947e1ee47c Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Fri, 18 Jul 2025 11:41:54 -0600 Subject: [PATCH 09/12] document tools briefly --- docs/sphinx/user/tools.rst | 128 +++++++++++++++++++++++++++++++++++++ docs/sphinx/user/user.rst | 1 + tools/README.md | 2 + 3 files changed, 131 insertions(+) create mode 100644 docs/sphinx/user/tools.rst diff --git a/docs/sphinx/user/tools.rst b/docs/sphinx/user/tools.rst new file mode 100644 index 0000000000..1bad58b6cd --- /dev/null +++ b/docs/sphinx/user/tools.rst @@ -0,0 +1,128 @@ +.. _tools: + + +Tools reference +=============== + +This section summarizes the functionality of the auxiliary tools included +in the AMR-Wind ``tools/`` directory. There are two main parts of this folder. Python +scripts, which do not require compilation, are in ``tools/``. C++ programs, which +are compiled as separate executables when AMR-Wind is compiled, are saved in ``tools/utilities/``. +After compilation, these executables can be found in the build directory, where each has its own +folder within ``tools/utilities/`` there. + +Python scripts +-------------- + +.. input_param:: abl_plots_compare.py + + Plots quantities from ABL statistics file (when in NetCDF format). Also relies on an averages.csv file. *Deprecate?* + +.. input_param:: amrex_particle.py + + Contains helpful routines for manipulating AMReX particle data. + +.. input_param:: amrex_plotfile.py + + Contains helpful routines for manipulating AMReX plotfile data. + +.. input_param:: amrex_utils.py + + Contains helpful routines for interacting with AMReX data structures. + +.. input_param:: avg_lines.py + + Reads in a line plot file and writes flow statistics to a CSV file. *Deprecate?* + +.. input_param:: calc_inflowoutflow_stats.py + + Tool to process statistics from precursor ABL runs and provide information to populate certain inputs of a subsequent inflow-outflow simulation. + +.. input_param:: convert_amrex_hdf5_plotfile.py + + Converts plotfiles written in HDF5 format to plain numpy data files. + +.. input_param:: convert_native_sample_to_time_series.py + + Converts sampler data written in native format to a time series written in ASCII format. This is intended for scenarios when there is a single sampler point of interest, which has to be specified by naming the sampler labels and point index. + +.. input_param:: convert_native_sampling_to_structured_ascii.py + + Converts sampler data written in native format to files written in ASCII format. For every sampling folder (i.e. every output step), this sampler creates a file for each sampler group, where each file lists the sampled data in order of the points belonging to that sampler. + +.. input_param:: example_plotfile_io.py + + Example script for directly interacting with plotfile data. + +.. input_param:: fcompare_particles.py + + Tool to compare native AMReX particle data. This has similar capability to the AMReX ``fcompare`` utility, which compares mesh data written to AMReX plotfiles. + +.. input_param:: generate_native_boundary_plane.py + + Tool to generate arbitrary temporal and spatially varying boundary conditions via boundary plane files written in native format. + +.. input_param:: generate_native_boundary_plane_header.py + + Tool to generate native format boundary plane header files for arbitrary temporal and spatially varying boundary conditions. + +.. input_param:: modify_hdf5_attributes.py + + Modifies HDF5 attributes of files in order to be read into yt. + +.. input_param:: naluwind2amrwind.py + + Script to convert from a Nalu-wind YAML file to an AMR-wind input file. *Deprecate?* + +.. input_param:: native_boundary_plane.py + + Contains helpful routines for manipulating native boundary plane data. + +.. input_param:: plot_lines.py + + Reads in CSV file and generates line plots. *Deprecate?* + +.. input_param:: postproamrwind.py + + Tools for post-processing amr-wind data (specifically line plot) *Deprecate?* + +.. input_param:: refine_native_boundary_plane.py + + Apply mesh refinement to a boundary plane file written in native format. + +.. input_param:: sampling_dam_break_godunov_ascii.py + + Example script for plotting free surface sampler outputs in ASCII format. + +.. input_param:: sampling_dam_break_godunov_native.py + + Example script for plotting free surface sampler outputs in native particle format. + +.. input_param:: sampling_dam_break_godunov_netcdf.py + + Example script for plotting free surface sampler outputs in NetCDF format. + + +C++ programs (utilities) +------------------------ + + +.. input_param:: CheckpointToCSV + + Converts checkpoint files to CSV format. + +.. input_param:: PlotfileToCSV + + Converts checkpoint files to CSV format. + +.. input_param:: coarsen-chkpt + + Reads in a checkpoint file and adds a coarser base level to the existing grid. + +.. input_param:: compareMultilevelToReference + + Compares plotfiles (similar to fcompare) when the grid refinements do not exactly match between the two. + +.. input_param:: refine-chkpt + + Reads in a checkpoint file and refines it by increasing its base resolution. \ No newline at end of file diff --git a/docs/sphinx/user/user.rst b/docs/sphinx/user/user.rst index 7eedd74291..2edf0b3053 100644 --- a/docs/sphinx/user/user.rst +++ b/docs/sphinx/user/user.rst @@ -14,3 +14,4 @@ User Manual faq.rst post_processing_examples.rst compression + tools.rst diff --git a/tools/README.md b/tools/README.md index 35f6f118e5..76f4c3422c 100644 --- a/tools/README.md +++ b/tools/README.md @@ -3,6 +3,8 @@ This directory contains a collection of helpful preprocessing and postprocessing utilities. +Remove everything below? + ## Preprocessing scripts ### naluwind2amrwind.py From c2d03545c2830651b32a4f7ad0980cd44f83148b Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Fri, 18 Jul 2025 11:51:24 -0600 Subject: [PATCH 10/12] spelling list --- docs/sphinx/user/tools.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/sphinx/user/tools.rst b/docs/sphinx/user/tools.rst index 1bad58b6cd..b83cd0c7f3 100644 --- a/docs/sphinx/user/tools.rst +++ b/docs/sphinx/user/tools.rst @@ -1,5 +1,9 @@ -.. _tools: +.. spelling:word-list:: + + fcompare + csv +.. _tools: Tools reference =============== From c637113c67d3ef383a66f3710445d1bd148864f9 Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Fri, 18 Jul 2025 14:01:17 -0600 Subject: [PATCH 11/12] deprecate old tools --- docs/sphinx/user/tools.rst | 20 --- tools/README.md | 90 +--------- tools/abl_plots_compare.py | 230 ------------------------ tools/avg_lines.py | 93 ---------- tools/naluwind2amrwind.py | 352 ------------------------------------- tools/plot_lines.py | 141 --------------- tools/postproamrwind.py | 109 ------------ 7 files changed, 4 insertions(+), 1031 deletions(-) delete mode 100644 tools/abl_plots_compare.py delete mode 100644 tools/avg_lines.py delete mode 100755 tools/naluwind2amrwind.py delete mode 100644 tools/plot_lines.py delete mode 100644 tools/postproamrwind.py diff --git a/docs/sphinx/user/tools.rst b/docs/sphinx/user/tools.rst index b83cd0c7f3..5488ccad92 100644 --- a/docs/sphinx/user/tools.rst +++ b/docs/sphinx/user/tools.rst @@ -18,10 +18,6 @@ folder within ``tools/utilities/`` there. Python scripts -------------- -.. input_param:: abl_plots_compare.py - - Plots quantities from ABL statistics file (when in NetCDF format). Also relies on an averages.csv file. *Deprecate?* - .. input_param:: amrex_particle.py Contains helpful routines for manipulating AMReX particle data. @@ -34,10 +30,6 @@ Python scripts Contains helpful routines for interacting with AMReX data structures. -.. input_param:: avg_lines.py - - Reads in a line plot file and writes flow statistics to a CSV file. *Deprecate?* - .. input_param:: calc_inflowoutflow_stats.py Tool to process statistics from precursor ABL runs and provide information to populate certain inputs of a subsequent inflow-outflow simulation. @@ -74,22 +66,10 @@ Python scripts Modifies HDF5 attributes of files in order to be read into yt. -.. input_param:: naluwind2amrwind.py - - Script to convert from a Nalu-wind YAML file to an AMR-wind input file. *Deprecate?* - .. input_param:: native_boundary_plane.py Contains helpful routines for manipulating native boundary plane data. -.. input_param:: plot_lines.py - - Reads in CSV file and generates line plots. *Deprecate?* - -.. input_param:: postproamrwind.py - - Tools for post-processing amr-wind data (specifically line plot) *Deprecate?* - .. input_param:: refine_native_boundary_plane.py Apply mesh refinement to a boundary plane file written in native format. diff --git a/tools/README.md b/tools/README.md index 76f4c3422c..4fae8785a6 100644 --- a/tools/README.md +++ b/tools/README.md @@ -1,89 +1,7 @@ # Preprocessing and postprocessing tools This directory contains a collection of helpful preprocessing and -postprocessing utilities. - -Remove everything below? - -## Preprocessing scripts - -### naluwind2amrwind.py -The [naluwind2amrwind.py](naluwind2amrwind.py) python script allows you to -convert a Nalu-wind input file to an amr-wind input file. To the best of -its ability, it will try to take the parameters from Nalu-wind and map them -to amr-wind. - -The basic usage is given by: -```bash -usage: naluwind2amrwind.py [-h] [--outfile OUTFILE] yamlfile - -Convert Nalu-Wind input file to AMR-wind input file - -positional arguments: - yamlfile - -optional arguments: - -h, --help show this help message and exit - --outfile OUTFILE write output to this file -``` - -For instance, to convert the Nalu-wind input file `naluwind.yaml`, run -```bash -$ ./naluwind2amrwind.py naluwind.yaml - -#---------------------------------------# -# SIMULATION STOP # -#---------------------------------------# -time.stop_time = 20000.0 # Max (simulated) time to evolve -time.max_step = 40000 # Max number of time steps - -#---------------------------------------# -# TIME STEP COMPUTATION # -#---------------------------------------# -time.fixed_dt = 0.5 # Use this constant dt if > 0 -time.cfl = 0.95 # CFL factor - -#---------------------------------------# -# INPUT AND OUTPUT # -#---------------------------------------# -time.plot_interval = 1000 # Steps between plot files -time.checkpoint_interval = 1000 # Steps between checkpoint files - -[...snip...] -``` -Use the `--outfile` option to write the output to a file instead. Note -that the `nalu_abl_mesh` and `nalu_preprocess` section should be present -in the yaml file for it to correctly extract the geometry and the -inversion layer properties. - -## Postprocessing scripts - -### postproamrwind.py - -The [postproamrwind.py](postproamrwind.py) python provides a quick -method to time-average and extract data from the `line_plot.txt` -output. - -The major functions in this code are: -- `loadfile(filename, t1, t2, headerinfo=[])`: Loads the line_plot.txt - file given by `filename`, for all times between `t1` and `t2`. -- `timeaverage(dat, t1, t2, Ncells, Ncomp)`: Time averages the - line_plot data given by `dat`, between times `t1` and `t2`. -- `avglineplot(filename, t1,t2, headerinfo=[])`: Combines `loadfile()` - and `timeaverage()`, so it loads the line_plot.txt given by - `filename`, and averages it between `t1` and `t2`. -- `extractvars(dat, colheaders, varnames)`: Returns the variable(s) - matching the variables in varnames - -A short example of how to use the code is shown below. In this case, -we load the `line_plot.txt` file, average from 0 to 100 seconds, and -plot the averaged U velocity versus z. - -```python -import postproamrwind -import matplotlib.pylot as plt -avgdat, headers = postproamrwind.avglineplot('line_plot.txt',0,100) -amrvars = postproamrwind.extractvars(avgdat, headers, ['z','u_avg']) -plt.plot(amrvars['u_avg'], amrvars['z']) -plt.show() -``` +postprocessing utilities. Please consult the documentation more information, +including a brief summary of each one. The source code for tools written in C++ is +in the utilities directory. These are compiled with AMR-Wind, and their executables +are written to folders in the same build directory. \ No newline at end of file diff --git a/tools/abl_plots_compare.py b/tools/abl_plots_compare.py deleted file mode 100644 index 4d7a0f7b75..0000000000 --- a/tools/abl_plots_compare.py +++ /dev/null @@ -1,230 +0,0 @@ -# -*- coding: utf-8 -*- -# pylint: disable=too-many-locals - -""" -ABL Statistics plotting utility -""" - -import numpy as np -from matplotlib.backends.backend_pdf import PdfPages -import matplotlib.pyplot as plt -import netCDF4 as ncdf -import pandas as pd - - -class ABLStatsFile(object): - """Interface to ABL Statistics NetCDF file""" - - def __init__(self, stats_file="abl_statistics_sampling.nc"): - """ - Args: - stats_file (path): Absolute path to the NetCDF file - """ - self.stats_file = stats_file - self.abl_stats = ncdf.Dataset(stats_file) - self._time = self.abl_stats["time"][:] - self._heights = self.abl_stats["heights"][:] - self.hub_height = 0.0 - self.capinv_ht1 = 0.0 - self.capinv_ht2 = 0.0 - - @property - def time(self): - """The time array from the ABL stats file""" - return self._time - - @property - def heights(self): - """Heights where data is available""" - return self._heights - -def plot_velocity(stats, pdffile, field="velocity", num_steps=3600): - """Generate velocity plots""" - col_list = plt.rcParams['axes.prop_cycle'].by_key()['color'] - ht = stats.heights - time = stats.time - ttmp = time[-num_steps:] - print("Start time = %.1f s; end time = %.1f s"%(ttmp[0], ttmp[-1])) - vel_full = stats.abl_stats.variables[field][:] - vel = vel_full[-num_steps:, :, :] - vmag_full = np.linalg.norm(vel, axis=-1) - vmag_avg = np.average(vmag_full, axis=0) - vmag_min = np.min(vmag_full, axis=0) - vmag_max = np.max(vmag_full, axis=0) - vavg = np.average(vel, axis=0) - vmin = np.min(vel, axis=0) - vmax = np.max(vel, axis=0) - - a=pd.read_csv("averages.csv"); - - plt.figure() - plt.plot(vmag_avg, ht, col_list[0], label=r"$|U|$") - plt.plot(vavg[:, 0], ht, color=col_list[1], label=r"$U_x$") - plt.plot(vavg[:, 1], ht, color=col_list[2], label=r"$U_y$") - - plt.plot(np.sqrt(a.u*a.u+a.v*a.v),a.z,color=col_list[3], label=r"$|U|$ amr-wind") - plt.plot(a.u,a.z,color=col_list[4],label=r"$U_x$ amr-wind") - plt.plot(a.v,a.z,color=col_list[5],label=r"$U_y$ amr-wind") - - plt.fill_betweenx(ht, vmag_min, vmag_max, color=col_list[0], alpha=0.4) - plt.fill_betweenx(ht, vmin[:, 0], vmax[:, 0], color=col_list[1], alpha=0.4) - plt.fill_betweenx(ht, vmin[:, 1], vmax[:, 1], color=col_list[2], alpha=0.4) - plt.axhline(stats.hub_height, color='k', ls='--', lw=0.5) - plt.axhline(stats.capinv_ht1, color='k', ls='-.', lw=0.5) - plt.axhline(stats.capinv_ht2, color='k', ls='-.', lw=0.5) - plt.ylim(ht[0], ht[-1]) - plt.legend() - plt.grid() - plt.xlabel("Velocity (m/s)") - plt.ylabel("Height (m)") - pdffile.savefig(dpi=300, bbox_inches="tight") - plt.close() - - wdir = 180.0 + np.degrees(np.arctan2(vavg[:, 0], vavg[:, 1])) - wdir2 = 180.0 + np.degrees(np.arctan2(a.u,a.v)) - plt.figure() - plt.plot(wdir, ht,label="nalu-wind") - plt.plot(wdir2,a.z, color=col_list[1], label="amr-wind") - plt.axhline(stats.hub_height, color='k', ls='-.', lw=0.5) - plt.axhline(stats.capinv_ht1, color='k', ls='-.', lw=0.5) - plt.axhline(stats.capinv_ht2, color='k', ls='-.', lw=0.5) - plt.ylim(ht[0], ht[-1]) - plt.legend() - plt.grid() - plt.xlabel("Wind direction (deg.)") - plt.ylabel("Height (m)") - pdffile.savefig(dpi=300, bbox_inches="tight") - plt.close() - - plt.figure() - plt.plot(vavg[:, 2], ht, color=col_list[3], label=r"$U_z$") - plt.plot(a.w, a.z, color=col_list[4], label=r"$U_z$ amr-wind") - plt.ylim(ht[0], ht[-1]) - plt.legend() - plt.grid() - plt.xlabel(r"$U_z$ (m/s)") - plt.ylabel("Height (m)") - pdffile.savefig(dpi=300, bbox_inches="tight") - plt.close() - -def plot_res_stress(stats, pdffile, field="resolved_stress", num_steps=3600): - """Resolved stress""" - col_list = plt.rcParams['axes.prop_cycle'].by_key()['color'] - ht = stats.heights - idx = [0, 3, 5] - labels = [r"$\left\langle u'u' \right \rangle$", - r"$\left\langle v'v' \right \rangle$", - r"$\left\langle w'w' \right \rangle$"] - field = stats.abl_stats.variables[field][:] - field = field[-num_steps:, :, :] - utau = stats.abl_stats.variables["utau"][:] - utau = np.average(utau[-num_steps:]) - utau = utau * utau - ravg = np.average(field, axis=0) / utau - rmin = np.min(field, axis=0) / utau - rmax = np.max(field, axis=0) / utau - a=pd.read_csv("averages.csv"); - - plt.figure() - for i, ii in enumerate(idx): - plt.plot(ravg[:, ii], ht, col_list[i], label=labels[i]) - plt.fill_betweenx(ht, rmin[:, ii], rmax[:, ii], color=col_list[i], alpha=0.4) - utau2 = .52*.52 - plt.plot(a.uu/utau2,a.z,color=col_list[4],label=r"$\left\langle u'u' \right \rangle$ amr-wind") - plt.plot(a.vv/utau2,a.z,color=col_list[5],label=r"$\left\langle v'v' \right \rangle$ amr-wind") - plt.plot(a.ww/utau2,a.z,color=col_list[6],label=r"$\left\langle w'w' \right \rangle$ amr-wind") - plt.axhline(stats.hub_height, color='k', ls='--', lw=0.5) - plt.axhline(stats.capinv_ht1, color='k', ls='-.', lw=0.5) - plt.axhline(stats.capinv_ht2, color='k', ls='-.', lw=0.5) - plt.ylim(ht[0], ht[-1]) - plt.legend() - plt.grid() - plt.ylabel("Height (m)") - plt.xlabel(r"$\left\langle u_i u_i \right\rangle/u_\tau^2$") - pdffile.savefig(dpi=300, bbox_inches="tight") - plt.close() - - plt.figure() - labels = [r"$\left\langle u'v' \right \rangle$", - r"$\left\langle u'w' \right \rangle$", - r"$\left\langle v'w' \right \rangle$"] - for i, ii in enumerate([1, 2, 4]): - plt.plot(ravg[:, ii], ht, col_list[i], label=labels[i]) - plt.fill_betweenx(ht, rmin[:, ii], rmax[:, ii], color=col_list[i], alpha=0.4) - - plt.plot(a.uv/utau2,a.z,color=col_list[4],label=r"$\left\langle u'v' \right \rangle$ amr-wind") - plt.plot(a.uw/utau2,a.z,color=col_list[5],label=r"$\left\langle u'w' \right \rangle$ amr-wind") - plt.plot(a.vw/utau2,a.z,color=col_list[6],label=r"$\left\langle v'w' \right \rangle$ amr-wind") - - plt.axhline(stats.hub_height, color='k', ls='--', lw=0.5) - plt.axhline(stats.capinv_ht1, color='k', ls='-.', lw=0.5) - plt.axhline(stats.capinv_ht2, color='k', ls='-.', lw=0.5) - plt.ylim(ht[0], ht[-1]) - plt.legend() - plt.grid() - plt.ylabel("Height (m)") - plt.xlabel(r"$\left\langle u_i u_j \right\rangle/u_\tau^2$") - pdffile.savefig(dpi=300, bbox_inches="tight") - plt.close() - -def plot_phim(stats, pdffile, field="velocity", num_steps=3600): - """Generate velocity plots""" - a=pd.read_csv("averages.csv"); - col_list = plt.rcParams['axes.prop_cycle'].by_key()['color'] - ht = stats.heights - vel_full = stats.abl_stats.variables[field][:] - vel = vel_full[-num_steps:, :, :] - vmag_full = np.linalg.norm(vel, axis=-1) - vmag_avg = np.average(vmag_full, axis=0) - utau = stats.abl_stats.variables["utau"][:] - utau = np.average(utau[-num_steps:]) - dudz = np.gradient(vmag_avg, ht) - phim = 0.4 / utau * ht * dudz - plt.figure() - plt.plot(phim, ht,label="nalu-wind") - print(utau) - dudz = np.gradient(np.sqrt(a.u*a.u+a.v*a.v), a.z[1]-a.z[0]) - phim = 0.4 / utau * a.z * dudz - plt.plot(phim,a.z,color=col_list[1],label="amr-wind") - plt.axhline(stats.hub_height, color='k', ls='--', lw=0.5) - plt.ylim(0, 600) - plt.xlim(0, 3) - plt.legend() - plt.grid() - plt.xlabel(r"$\phi_m = {\kappa}/{u_\tau}\ \left({du}/{dz}\right) \ z$") - plt.ylabel("Height (m)") - pdffile.savefig(dpi=300, bbox_inches="tight") - plt.close() - -def plot_temperature(stats, pdffile, field="temperature", num_steps=3600): - """Plot the temperature profile""" - a=pd.read_csv("averages.csv"); - col_list = plt.rcParams['axes.prop_cycle'].by_key()['color'] - ht = stats.heights - temp_full = stats.abl_stats.variables[field][:] - temp_avg = np.average(temp_full[-num_steps:, :], axis=0) - - plt.figure() - plt.plot(temp_avg, ht,label="nalu-wind") - plt.plot(a.theta, a.z, color=col_list[1],label="amr-wind") - plt.axhline(stats.hub_height, color='k', ls='--', lw=0.5) - plt.axhline(stats.capinv_ht1, color='k', ls='-.', lw=0.5) - plt.axhline(stats.capinv_ht2, color='k', ls='-.', lw=0.5) - plt.legend() - plt.grid() - plt.ylim(ht[0], ht[-1]) - plt.ylabel("Height (m)") - plt.xlabel("Temperature (K)") - pdffile.savefig(dpi=300, bbox_inches="tight") - plt.close() - -if __name__ == "__main__": - statsfile = ABLStatsFile() - statsfile.hub_height = 90.0 - statsfile.capinv_ht1 = 650.0 - statsfile.capinv_ht2 = 750.0 - with PdfPages("owez_abl_precursor.pdf") as pdf: - plot_velocity(statsfile, pdf) - plot_res_stress(statsfile, pdf) - plot_phim(statsfile, pdf) - plot_temperature(statsfile, pdf) diff --git a/tools/avg_lines.py b/tools/avg_lines.py deleted file mode 100644 index e3f3f8b2cc..0000000000 --- a/tools/avg_lines.py +++ /dev/null @@ -1,93 +0,0 @@ -import yt -import numpy as np -from matplotlib import pylab -from sys import argv -import os -import shutil -import glob -import pandas as pd -from mpi4py import MPI -import copy - - -# inputs -filename = "line_plot" - -start = 96051 -end = 99999 -skip = 1 - -fns = [] -for i in range(start,end+1,skip): - fn = filename+str(i).zfill(5); - fns.append(fn) - -start = 100000 -end = 105111 -for i in range(start,end+1,skip): - fn = filename+str(i).zfill(6); - fns.append(fn) - - -comm = MPI.COMM_WORLD -rank = comm.Get_rank() -nprocs = comm.Get_size() - -lfns = fns[rank::nprocs] -print(lfns) - -# load one file to get the number of cells in 1D -ds = yt.load(fns[0]) - -# ds.print_stats() -# ds.field_list -# ds.derived_field_list -# ds.all_data() - -g = ds.index.grids[0] -z = np.array(g["z"].flatten()) -n = len(z) -a=pd.DataFrame(np.zeros((n,20)),columns=["z","u","v","w","uu","vv","ww","uv","uw","vw","theta","wuu","wuv","wuw","wvv","wvw","www","Tu","Tv","Tw"]) -a.z += z[:] - -print(a.z) -N = np.zeros(1) - -for i, fn in enumerate(lfns): - ds = yt.load(fn); - g = ds.index.grids[0] - a.u += g[""].flatten() #[0,0,:] - a.v += g[""].flatten() - a.w += g[""].flatten() - a.uu += g[""].flatten() - a.vv += g[""].flatten() - a.ww += g[""].flatten() - a.uv += g[""].flatten() - a.uw += g[""].flatten() - a.vw += g[""].flatten() - a.theta += g[""].flatten() - a.wuu += g[""].flatten() - a.wuv += g[""].flatten() - a.wuw += g[""].flatten() - a.wvv += g[""].flatten() - a.wvw += g[""].flatten() - a.www += g[""].flatten() - a.Tu += g[""].flatten() - a.Tv += g[""].flatten() - a.Tw += g[""].flatten() - N[0] += 1.0 - -b = copy.copy(a.values) -# not sure how to do this with dataframe yet so for now use ndarray -comm.Reduce(a.values,b, MPI.SUM, 0) -a_avg = pd.DataFrame(b,columns=a.columns) - -Ntotal = np.zeros(1) -comm.Reduce(N,Ntotal, MPI.SUM, 0) - - -if rank == 0: - a_avg /= Ntotal[0] - a_avg.z[:]= a.z[:] - print(a_avg) - a_avg.to_csv("averages.csv", index=False) diff --git a/tools/naluwind2amrwind.py b/tools/naluwind2amrwind.py deleted file mode 100755 index be26390510..0000000000 --- a/tools/naluwind2amrwind.py +++ /dev/null @@ -1,352 +0,0 @@ -#!/usr/bin/env python -# -# Script to convert from a Nalu-wind YAML file to an AMR-wind input file -# -# -# usage: naluwind2amrwind.py [-h] [--outfile OUTFILE] yamlfile -# -# positional arguments: -# yamlfile -# -# optional arguments: -# -h, --help show this help message and exit -# --outfile OUTFILE write output to this file - - -# In case anybody tries to use print() in python2 -from __future__ import print_function - -import sys -import math -import argparse - -# Load the appropriate yaml reader -try: - import ruamel.yaml as yaml - useruemel=True - yaml = yaml.YAML() -except: - import yaml as yaml - useruemel=False - -# Get rid of yaml warnings (if present) -try: - yaml.warnings({'YAMLLoadWarning': False}) -except: - stuff=0 - -# Return the dictionary corresponding to key -# listdic = list of dictionaries, indexed by key -def getdict(listdic, key): - for dic in listdic: - if key in dic: return dic[key] - return [] - -# Return the dictionary which contains a given key -# listdic = list of dictionaries, one of which might have key -def getdict2(listdic, key): - for dic in listdic: - if key in dic: return dic - return [] - -# Return the dictionary which has keyname equal to va -def getdicwithname(listdic, val, keyname='name'): - for dic in listdic: - if dic[keyname] == val: return dic - print("Cannot find entry with name: "+name) - sys.exit(1) # Should not get here - return - -# ---------- -# Write a parameter to the amr input file -# ---------- -# param: parameter name in AMR-wind file -# data: data to be written -# outfile: file handle (or sys.stdout) -# isstring: if True, will wrap quotes around the string data -# comment: optional comment after data -# commentonly: if True, don't write any data, just the comment -# prefix: Add prefix to the beginning of the line -def writeAMRparam(param, data, outfile, - isstring=False, comment="", commentonly=False, - prefix=''): - if commentonly: - print(comment, file=outfile) - elif data != None: # Do not write anything if data is empty - # -- construct the payload string -- - if isinstance(data, list): datalist=data - else: datalist=[data] - if isstring: datastring = ' '.join('\"'+str(x)+'\"' for x in datalist) - else: datastring = ' '.join(str(x) for x in datalist) - # -- add comments -- - if len(comment)>0: commentstring="# "+comment - else: commentstring="" - # -- Add the full string -- - fullstring = prefix+'%-30s = %-30s %s'%(param, datastring, commentstring) - print(fullstring, file=outfile) - return - - -### ========== Set up files and input/output args ================== - -# Load the command line arguments -parser = argparse.ArgumentParser(description='Convert Nalu-Wind input file to AMR-wind input file') -parser.add_argument('--outfile', default='', help="write output to this file") -parser.add_argument('yamlfile') #, nargs='+') -args=parser.parse_args() - -# Load the yaml file -infile = args.yamlfile -with open(infile) as fp: - #yamldata=yaml.load(fp, Loader=yaml.FullLoader) - yamldata=yaml.load(fp) - -# Set up the output stream -# Either write to outfile, or write to screen -if len(args.outfile)>0: - outfile=open(args.outfile, 'w') -else: - outfile=sys.stdout - -### ========== Start reading nalu yaml file ================== -### --- sim start/stop/dt ---- -timeinputyaml=yamldata['Time_Integrators'][0]['StandardTimeIntegrator'] -max_step = timeinputyaml['termination_step_count'] -fixed_dt = timeinputyaml['time_step'] -steptype = timeinputyaml['time_stepping_type'] - -stop_time=fixed_dt*max_step - -### --- output and restart ---- -outputyaml = yamldata['realms'][0]['output'] -try: plot_interval = outputyaml['output_frequency'] -except: plot_interval = 0 -try: checkpoint_interval = yamldata['realms'][0]['restart']['restart_frequency'] -except: checkpoint_interval = 0 - -### --- Set the mesh and grid sizes ---- -if 'nalu_abl_mesh' in yamldata: - # -- mesh extents -- - meshvertices = yamldata['nalu_abl_mesh']['vertices'] - x0 = meshvertices[0] - x1 = meshvertices[1] - # -- mesh dimensions -- - meshdimensions = yamldata['nalu_abl_mesh']['mesh_dimensions'] - # Check periodicity - is_periodic = [1,1,0] # Change this later -else: - # Use the defaults - x0 = [0, 0, 0] - x1 = [1000, 1000, 1000] - meshdimensions = [48, 48, 48] - # Check periodicity - is_periodic = [1,1,0] - -### -- Set physics --- -soloptsyaml = yamldata['realms'][0]['solution_options'] -gravity = getdict(soloptsyaml['options'], 'user_constants')['gravity'] -density = getdict(soloptsyaml['options'], 'user_constants')['reference_density'] -temp = getdict(soloptsyaml['options'], 'user_constants')['reference_temperature'] -sourceterms = getdict(soloptsyaml['options'], 'source_terms')['momentum'] -kappa = getdict(soloptsyaml['options'], 'turbulence_model_constants')['kappa'] -smagCs = getdict(soloptsyaml['options'], 'turbulence_model_constants')['cmuCs'] -laminar_prandtl = getdict(soloptsyaml['options'], 'laminar_prandtl')['enthalpy'] -turbulent_prandtl = getdict(soloptsyaml['options'], 'turbulent_prandtl')['enthalpy'] -turbmodel = soloptsyaml['turbulence_model'] - -# coriolis parameters -latitude = getdict(soloptsyaml['options'], 'user_constants')['latitude'] -try: east_vector = getdict(soloptsyaml['options'], 'user_constants')['east_vector'] -except: east_vector = None -try: north_vector = getdict(soloptsyaml['options'], 'user_constants')['north_vector'] -except: north_vector = None -try: secperrev = 2*math.pi/getdict(soloptsyaml['options'], 'user_constants')['earth_angular_velocity'] -except: secperrev = None - -# Turbulence model -if turbmodel != "smagorinsky": - print("AMR-wind only supports smagorinsky model (so far), not "+turbmodel) - sys.exit(1) -else: - turbmodel = "Smagorinsky" - -# Get surface roughness -z0 = getdict2(yamldata['realms'][0]['boundary_conditions'], 'wall_boundary_condition')['wall_user_data']['roughness_height'] -zlotemp = getdict2(yamldata['realms'][0]['boundary_conditions'], 'wall_boundary_condition')['wall_user_data']['reference_temperature'] - temp -# Get upper temperature gradient -zhiTgrad=getdict2(yamldata['realms'][0]['boundary_conditions'], 'abltop_boundary_condition')['abltop_user_data']['normal_temperature_gradient'] - -# viscosity -viscosityyaml = getdicwithname(yamldata['realms'][0]['material_properties']['specifications'], 'viscosity') -viscosity = viscosityyaml['value'] - -# Get forcing terms -forcingterms = getdict(soloptsyaml['options'], 'source_terms')['momentum'] -# Remap the forcing terms to AMR-wind forcing terms -forcingdictmap = { 'buoyancy_boussinesq':'BoussinesqBuoyancy', - 'EarthCoriolis':'CoriolisForcing', - 'abl_forcing':'ABLForcing' - } -forcingterms = [forcingdictmap[x] for x in forcingterms] -#forcingterms = ["BoussinesqBuoyancy","CoriolisForcing","ABLForcing"] - -# ABL forcing -ablyaml = yamldata['realms'][0]['abl_forcing']['momentum'] -abl_forcing_height = ablyaml['heights'] -abl_velx = ablyaml['velocity_x'][0][1] -abl_vely = ablyaml['velocity_y'][0][1] -abl_velz = ablyaml['velocity_z'][0][1] -abl_velocity = [abl_velx, abl_vely, abl_velz] - -# temperature of inversion layer -if 'nalu_preprocess' in yamldata: - tempheights = yamldata['nalu_preprocess']['init_abl_fields']['temperature']['heights'] - tempvals = yamldata['nalu_preprocess']['init_abl_fields']['temperature']['values'] -else: - tempheights = [0, 0.5*(x1[2]-x0[2])+x0[2], x1[2]] - tempvals = [temp, temp, temp] - -### --- set other defaults ---- -# Some stuff that wouldn't be specified in nalu -cfl = 0.95 - -# AMR defaults -AMRdefaults=[] - -# Physics defaults -physicsdefaults = [ - ['incflo.use_godunov', 1, ''], - ['incflo.physics', 'ABL', ''], -] - -# verbose defaults -verbosedefaults = [ - ['incflo.verbose', 3, 'incflo.level'], -] - -# tolerances and debug defaults -debugdefaults = [ - ['amrex.fpe_trap_invalid', 0, 'Trap NaNs'], - ['diffusion.mg_verbose', 0, ''], - ['diffusion.mg_cg_verbose', 0, ''], - ['diffusion.mg_rtol', 1.0e-6, ''], - ['diffusion.mg_atol', 1.0e-8, ''], - ['mac_proj.mg_rtol', 1.0e-6, ''], - ['mac_proj.mg_atol', 1.0e-8, ''], - ['nodal_proj.mg_verbose', 0, ''], - ['nodal_proj.mg_rtol', 1.0e-6, ''], - ['nodal_proj.mg_atol', 1.0e-8, ''], -] - -### ========== Start writing out AMR inputfile ================== -stopheader=""" -#---------------------------------------# -# SIMULATION STOP # -#---------------------------------------# -""" -writeAMRparam([], [], outfile, comment=stopheader[:-1], commentonly=True) -writeAMRparam('time.stop_time', stop_time, outfile, comment="Max (simulated) time to evolve") -writeAMRparam('time.max_step', max_step, outfile, comment="Max number of time steps") - -timeheader=""" -#---------------------------------------# -# TIME STEP COMPUTATION # -#---------------------------------------# -""" -writeAMRparam([], [], outfile, comment=timeheader[:-1], commentonly=True) -writeAMRparam('time.fixed_dt', fixed_dt, outfile, comment="Use this constant dt if > 0") -writeAMRparam('time.cfl', cfl, outfile, comment="CFL factor") - -ioheader=""" -#---------------------------------------# -# INPUT AND OUTPUT # -#---------------------------------------# -""" -writeAMRparam([], [], outfile, comment=ioheader[:-1], commentonly=True) -writeAMRparam('time.plot_interval', plot_interval, outfile, comment="Steps between plot files") -writeAMRparam('time.checkpoint_interval', checkpoint_interval, outfile, comment="Steps between checkpoint files") -# Other defaults -for default in AMRdefaults: - writeAMRparam(default[0], default[1], outfile, comment=default[2]) - -physicsheader=""" -#---------------------------------------# -# PHYSICS # -#---------------------------------------# -""" -writeAMRparam([], [], outfile, comment=physicsheader[:-1], commentonly=True) -writeAMRparam('incflo.gravity', gravity, outfile, comment="Gravitational force (3D)") -writeAMRparam('incflo.density', density, outfile, comment="Reference density") -writeAMRparam('transport.viscosity', viscosity, outfile) -writeAMRparam('transport.laminar_prandtl', laminar_prandtl, outfile) -writeAMRparam('transport.turbulent_prandtl', turbulent_prandtl, outfile) -writeAMRparam('turbulence.model', turbmodel, outfile) -writeAMRparam('Smagorinsky_coeffs.Cs', smagCs, outfile) - -writeAMRparam([], [], outfile, comment="\n# ABL forcing", commentonly=True) -writeAMRparam('ICNS.source_terms', forcingterms, outfile) -writeAMRparam('BoussinesqBuoyancy.reference_temperature', temp, outfile) -writeAMRparam('ABLForcing.abl_forcing_height', abl_forcing_height, outfile) -writeAMRparam('incflo.velocity', abl_velocity, outfile) - -writeAMRparam([], [], outfile, comment="", commentonly=True) -writeAMRparam('ABL.temperature_heights', tempheights[1:], outfile) -writeAMRparam('ABL.temperature_values', tempvals[1:], outfile) -writeAMRparam('ABL.kappa', kappa, outfile) -writeAMRparam('ABL.surface_roughness_z0', z0, outfile) - -writeAMRparam([], [], outfile, comment="# Coriolis forcing", commentonly=True) -writeAMRparam('CoriolisForcing.latitude', latitude, outfile) -writeAMRparam('CoriolisForcing.east_vector', east_vector, outfile) -writeAMRparam('CoriolisForcing.north_vector', north_vector, outfile) -writeAMRparam('CoriolisForcing.rotational_time_period', secperrev, outfile) - - -for default in physicsdefaults: - writeAMRparam(default[0], default[1], outfile, comment=default[2]) - -amrheader=""" -#---------------------------------------# -# ADAPTIVE MESH REFINEMENT # -#---------------------------------------# -""" -writeAMRparam([], [], outfile, comment=amrheader[:-1], commentonly=True) -writeAMRparam('amr.n_cell', meshdimensions, outfile, comment="Grid cells at coarsest AMRlevel"); -writeAMRparam('amr.max_level', 0, outfile, comment="Max AMR level in hierarchy"); - -geomheader=""" -#---------------------------------------# -# GEOMETRY # -#---------------------------------------# -""" -writeAMRparam([], [], outfile, comment=geomheader[:-1], commentonly=True) -writeAMRparam('geometry.prob_lo', x0, outfile, comment="Lo corner coordinates") -writeAMRparam('geometry.prob_hi', x1, outfile, comment="Hi corner coordinates") -writeAMRparam('geometry.is_periodic', is_periodic, outfile, comment="Periodicity x y z (0/1)") -writeAMRparam([], [], outfile, comment="\n# Boundary conditions", commentonly=True) -writeAMRparam('zlo.type', 'wall_model', outfile, isstring=True) -writeAMRparam('zlo.temperature', zlotemp, outfile) -writeAMRparam('zhi.type', 'slip_wall', outfile, isstring=True) -writeAMRparam('zhi.temperature', zhiTgrad, outfile) - -verbheader=""" -#---------------------------------------# -# VERBOSITY # -#---------------------------------------# -""" -writeAMRparam([], [], outfile, comment=verbheader[:-1], commentonly=True) -for default in verbosedefaults: - writeAMRparam(default[0], default[1], outfile, comment=default[2]) - -debugheader=""" -#---------------------------------------# -# DEBUGGING # -#---------------------------------------# -""" -writeAMRparam([], [], outfile, comment=debugheader[:-1], commentonly=True) -writeAMRparam([], [], outfile, comment="##possible debugging parameters", commentonly=True) -for default in debugdefaults: # write defaults (as comments) - writeAMRparam(default[0], default[1], outfile, comment=default[2], prefix='# ') - -### ========== Finished writing out AMR inputfile ================== -outfile.close() diff --git a/tools/plot_lines.py b/tools/plot_lines.py deleted file mode 100644 index f61cc9cfe1..0000000000 --- a/tools/plot_lines.py +++ /dev/null @@ -1,141 +0,0 @@ -import numpy as np -from matplotlib import pylab -from sys import argv -import os -import shutil -import glob -import pandas as pd - - -# inputs -# an average utau -utau = 0.53 -height = 1 -vel = 8 -T0 = 300 -kappa = 0.4 -nondim = 1 - -a=pd.read_csv("averages.csv"); - -utau2 = utau*utau -#if nondim == 1: -a.z /= height -a.uu /= utau2 -a.vv /= utau2 -a.ww /= utau2 -a.uv /= utau2 -a.uw /= utau2 -a.vw /= utau2 - - -wind_dir = 180.0 + np.arctan2(a.u,a.v)*180.0/np.pi - - -pylab.plot(np.sqrt(np.multiply(a.u,a.u) + np.multiply(a.v,a.v)),a.z, label='|U|') -pylab.plot(a.u,a.z, label=r'$U_x$') -pylab.plot(a.v,a.z, label=r'$U_y$') -pylab.xlabel("Velocity (m/s)") -pylab.ylabel("Height (m)") -pylab.legend() -pylab.grid() -pylab.savefig("velmean.pdf") - - -pylab.clf() -pylab.plot(a.w,a.z, label=r'$U_z$') -pylab.xlabel("Velocity (m/s)") -pylab.ylabel("Height (m)") -pylab.legend() -pylab.grid() -pylab.savefig("wmean.pdf") - - - -pylab.clf() -pylab.plot(wind_dir,a.z, label='wind direction') -pylab.xlabel("Wind direction (deg)") -pylab.ylabel("Height (m)") -#pylab.legend() -pylab.grid() -pylab.savefig("wind_dir.pdf") - - - - -pylab.clf() -pylab.plot(a.uu,a.z, label="") -pylab.plot(a.vv,a.z, label="") -pylab.plot(a.ww,a.z, label="") -pylab.xlabel(r"$/u_\tau^2$") -pylab.ylabel("Height (m)") -pylab.legend() -pylab.grid() -pylab.savefig("uu.pdf") - - -pylab.clf() -pylab.plot(a.uv,a.z, label="") -pylab.plot(a.uw,a.z, label="") -pylab.plot(a.vw,a.z, label="") -pylab.xlabel(r"$/u_\tau^2$") -pylab.ylabel("Height (m)") -pylab.legend() -pylab.grid() -pylab.savefig("uv.pdf") - - -pylab.clf() -pylab.plot(a.theta,a.z, label="") -pylab.xlabel("Temperature [K]") -pylab.ylabel("Height (m)") -pylab.legend() -pylab.grid() -pylab.savefig("T.pdf") - -umag = np.sqrt(a.u*a.u + a.v*a.v) -dudz = np.gradient(umag,a.z[1]-a.z[0]) -#dudz = np.gradient(a.u,a.z[1]-a.z[0]) -pylab.clf() -pylab.plot(a.z*kappa/utau*dudz,a.z, label=r"$\phi$") -pylab.ylim(0, 600.0/height) -pylab.xlim(0, 3) -pylab.xlabel(r"$\frac{\kappa z}{u_\tau} \frac{\partial u}{ \partial z}$") -pylab.ylabel("Height (m)") -#pylab.legend() -pylab.grid() -pylab.savefig("phi.pdf") - - -pylab.clf() -pylab.plot(a.wuu,a.z, label="") -pylab.plot(a.wuv,a.z, label="") -pylab.plot(a.wuw,a.z, label="") -pylab.plot(a.wvv,a.z, label="") -pylab.plot(a.wvw,a.z, label="") -pylab.plot(a.www,a.z, label="") -pylab.xlabel(r"$$") -pylab.ylabel("Height (m)") -pylab.legend() -pylab.grid() -pylab.savefig("www.pdf") - - -pylab.clf() -pylab.plot(a.www/np.float_power(a.ww,3.0/2.0),a.z, label="") -pylab.xlabel(r"$/^{3/2}$") -pylab.ylabel("Height (m)") -#pylab.legend() -pylab.grid() -pylab.savefig("www_o_ww.pdf") - - -pylab.clf() -pylab.plot(a.Tu,a.z, label="") -pylab.plot(a.Tv,a.z, label="") -pylab.plot(a.Tw,a.z, label="") -pylab.xlabel(r"$$") -pylab.ylabel("Height (m)") -pylab.legend() -pylab.grid() -pylab.savefig("Tu.pdf") diff --git a/tools/postproamrwind.py b/tools/postproamrwind.py deleted file mode 100644 index 88c6a1bf9f..0000000000 --- a/tools/postproamrwind.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -from pylab import * - -# ---- -# Time averages the line_plot data given by dat, between times t1 and -# t2. -# -def timeaverage(dat, t1, t2, Ncells, Ncomp): - t = dat[:,1] - datfiltered=dat[(t>=t1)&(t<=t2),:] - tstart = datfiltered[0,1] - tend = datfiltered[-1,1] - istart = int(datfiltered[0,0]) - iend = int(datfiltered[-1,0]) - Nlines = len(datfiltered) - Nblocks = int(Nlines/Ncells) - if (Nlines%Ncells) != 0: - print("Something wrong with number of blocks") - avgdat=zeros((Ncells, Ncomp)) - for i in range(Nblocks-1): - i1 = i*Ncells - i2 = (i+1)*Ncells - i3 = (i+2)*Ncells - dt = datfiltered[i2, 1] - datfiltered[i1, 1] - avgdat=avgdat+0.5*dt*(datfiltered[i1:i2,:] + datfiltered[i2:i3,:]) - return avgdat/(tend-tstart) - -# ---- -# Loads the line_plot.txt file given by filename, for all times -# between t1 and t2. -# -# If the line_plot.txt file is missing a header, provide the input -# headerinfo=[ncell ncomp] -def loadfile(filename, t1, t2, headerinfo=[]): - if len(headerinfo)>0: hasheader=False - else: hasheader=True - # Load the header info - if hasheader: - with open(filename) as fp: - header = fp.readline() - line = fp.readline().strip().split(',') - ncell = int(line[0]) - ncomp = int(line[1]) - else: - ncell=headerinfo[0] - ncomp=headerinfo[1] - dat=[] - with open(filename) as fp: - if hasheader: # Read past the headers - junk = fp.readline() - junk = fp.readline() - headerline= fp.readline() - colheaders=headerline.replace('#','',1).replace('\n','').split(',') - # Read the result - line = fp.readline() - iline= 1 - while line: - splitline=line.strip().split(',') - time=float(splitline[1]) - if (t1 <= time) and (time <= t2): - dat.append([float(x) for x in splitline]) - #print("Adding time: %f"%time) - if (time>t2): - line=False - else: - line=fp.readline() - iline=iline+1 - return array(dat), ncell, ncomp, colheaders - -# ---- -# Load the line_plot.txt file in filename, and average it from time t1 -# to t2. If line_plot.txt has no headers, supply headerinfo=[ncell -# ncomp] -# -def avglineplot(filename, t1,t2, headerinfo=[]): - dat, ncell, ncomp, colheaders=loadfile(filename, t1, t2, headerinfo) - tavgdat=timeaverage(dat, t1,t2, ncell, ncomp) - return tavgdat, colheaders - -# ---- -# Returns the variable(s) matching the variables in varnames -# Behavior depends on varnames: -# - If varnames is a single string (like varnames='z'), then returns -# only that variable on output. -# - If varnames is a list of strings (varnames=['z', 'u_avg', -# 'v_avg']), then returns a dictionary with those variables. -# -def extractvars(dat, colheaders, varnames): - if isinstance(varnames, list): - vardict=dict() - for var in varnames: - vardict[var] = dat[:,colheaders.index(var)] - return vardict - elif isinstance(varnames, str): - return dat[:, colheaders.index(varnames)] - else: - print("varnames = "+repr(varnames)+" is not valid input") - -# Split the data into z, u, v, w, and uu, vv, www columns -def splitdat(dat): - z = dat[:,2] - u = dat[:,3] - v = dat[:,4] - w = dat[:,5] - uu = dat[:,8] - vv = dat[:,11] - ww = dat[:,13] - return z, u, v, w, uu, vv, ww - From f42ac27d37f9bb908e17db7734948bf9f2d114b5 Mon Sep 17 00:00:00 2001 From: Michael Kuhn Date: Fri, 18 Jul 2025 14:03:10 -0600 Subject: [PATCH 12/12] add reference to frontend --- docs/sphinx/user/tools.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/sphinx/user/tools.rst b/docs/sphinx/user/tools.rst index 5488ccad92..27e36ba419 100644 --- a/docs/sphinx/user/tools.rst +++ b/docs/sphinx/user/tools.rst @@ -15,6 +15,10 @@ are compiled as separate executables when AMR-Wind is compiled, are saved in ``t After compilation, these executables can be found in the build directory, where each has its own folder within ``tools/utilities/`` there. +These capabilities are not meant to be exhaustive and are not maintained as actively as the +solver source code. More tools for interacting with AMR-Wind data (pre- and post-processing) +can be found in `amr-wind-frontend `_. + Python scripts --------------