From 57ddb54808386c2befdd0a63673fef4f027e8ced Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Thu, 29 Aug 2024 09:41:59 +0200 Subject: [PATCH 01/15] FIX: adjust doc parse fcts in ci to new black formatting --- ci/parse_pyrad_name_mappings.py | 4 ++-- ci/parse_pyrad_products.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/parse_pyrad_name_mappings.py b/ci/parse_pyrad_name_mappings.py index 5418a3f4f..cda4092a3 100644 --- a/ci/parse_pyrad_name_mappings.py +++ b/ci/parse_pyrad_name_mappings.py @@ -8,7 +8,7 @@ FUNCTIONS_TO_PARSE = ['get_fieldname_pyart', 'get_datatype_odim', 'get_datatype_metranet', - 'get_fieldname_cosmo'] + 'get_fieldname_icon'] mainpath = Path(__file__).resolve().parent.parent OUT_DIRECTORY = str(Path(mainpath, 'doc', 'source', 'overview', 'mappings')) @@ -24,7 +24,7 @@ for line in srccode: if ('datatype' in line or 'field_name' in line) and '==' in line: pyrad_dtypes.append(line.split('==')[1].split(':')[ - 0].strip().replace("'", "")) + 0].strip().replace('"', '').replace("'","")) if 'return' in line: returnline = line.replace('return', '').strip() diff --git a/ci/parse_pyrad_products.py b/ci/parse_pyrad_products.py index 6f9a0cbcd..23e9bcf9e 100644 --- a/ci/parse_pyrad_products.py +++ b/ci/parse_pyrad_products.py @@ -109,7 +109,7 @@ def process_file(filepath): if reading_params and product: all_products[function][product]['parameters'] += " " + \ " ".join(line.replace('\n', ' ').split()) - if "prdcfg['type']" in line and '==' in line: + if ('prdcfg["type"]' in line or "prdcfg['type']" in line) and '==' in line: for product in all_products[function].keys(): if product in line: all_products[function][product]['link'] = (funcpath_to_docpath(filepath) + From ec59914e2c7988520a92a1513e71840c9a6fc40e Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Thu, 29 Aug 2024 10:24:49 +0200 Subject: [PATCH 02/15] ADD: add CSCS data retrieval functions from rainforest to pyrad --- src/pyrad_proc/pyrad/util/__init__.py | 49 +- .../pyrad/util/data_retrieval_utils.py | 729 ++++++++++++++++++ 2 files changed, 737 insertions(+), 41 deletions(-) create mode 100644 src/pyrad_proc/pyrad/util/data_retrieval_utils.py diff --git a/src/pyrad_proc/pyrad/util/__init__.py b/src/pyrad_proc/pyrad/util/__init__.py index d7334df54..fc86119f4 100755 --- a/src/pyrad_proc/pyrad/util/__init__.py +++ b/src/pyrad_proc/pyrad/util/__init__.py @@ -7,47 +7,6 @@ Functions to read and write data and configuration files. -Radar Utilities -=============== - -.. autosummary:: - :toctree: generated/ - - get_data_along_rng - get_data_along_azi - get_data_along_ele - get_ROI - rainfall_accumulation - time_series_statistics - find_contiguous_times - join_time_series - get_range_bins_to_avg - find_ray_index - find_rng_index - find_nearest_gate - find_colocated_indexes - get_fixed_rng_data - time_avg_range - get_closest_solar_flux - create_sun_hits_field - create_sun_retrieval_field - compute_quantiles - compute_quantiles_from_hist - compute_quantiles_sweep - compute_2d_hist - compute_1d_stats - compute_2d_stats - compute_histogram - compute_histogram_sweep - belongs_roi_indices - get_cercle_coords - get_box_coords - compute_profile_stats - project_to_vertical - - quantiles_weighted - ratio_bootstrapping - compute_average_vad """ from .radar_utils import time_avg_range, get_closest_solar_flux # noqa @@ -68,4 +27,12 @@ from .stat_utils import quantiles_weighted, ratio_bootstrapping # noqa +from .data_retrieval_utils import retrieve_hzt_prod # noqa +from .data_retrieval_utils import retrieve_hzt_RT # noqa +from .data_retrieval_utils import retrieve_mch_prod # noqa +from .data_retrieval_utils import retrieve_mch_prod_RT # noqa +from .data_retrieval_utils import retrieve_CPCCV # noqa +from .data_retrieval_utils import retrieve_AQC_XLS # noqa + + __all__ = [s for s in dir() if not s.startswith("_")] diff --git a/src/pyrad_proc/pyrad/util/data_retrieval_utils.py b/src/pyrad_proc/pyrad/util/data_retrieval_utils.py new file mode 100644 index 000000000..524ec36a7 --- /dev/null +++ b/src/pyrad_proc/pyrad/util/data_retrieval_utils.py @@ -0,0 +1,729 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Functions to retrieve MeteoSwiss products from the archives + +Daniel Wolfensberger, Rebecca Gugerli +MeteoSwiss/EPFL +daniel.wolfensberger@epfl.ch, rebecca.gugerli@epfl.ch +December 2019, July 2022 + +NOTE: these codes are very specific to MeteoSwiss and have no +practical use to external people +""" + +import numpy as np +import os +import zipfile +import datetime +import glob +import socket +import subprocess +import fnmatch +import re +import pandas as pd # function used in retrieve_hzt_prod + +# MeteoSwiss constants +OFFSET_CCS4 = [297, -100] +# Folder depends on server: +if ("lom" in socket.gethostname()) or ("meteoswiss" in socket.gethostname()): + FOLDER_RADAR = "/srn/data/" + FOLDER_ISO0 = "/srn/data/HZT/" +elif "tsa" or "balfrin" in socket.gethostname(): + FOLDER_DATABASE = "/store/msrad/radar/radar_database/" + FOLDER_RADAR = "/store/msrad/radar/swiss/data/" + FOLDER_RADARH = "/store/msrad/radar/polarHR/data/" + FOLDER_CPCCV = "/store/msrad/radar/cpc_validation/daily/" + FOLDER_ISO0 = "/store/msrad/radar/swiss/data/" + + +def _make_timezone_aware(dt, tz=datetime.timezone.utc): + """ + Makes a naive datetime timezone-aware by setting it to the provided timezone. + If the datetime is already timezone-aware, it is returned unchanged. + + Parameters: + dt (datetime): The datetime object to check. + tz (timezone): The timezone to set if the datetime is naive (default is UTC). + + Returns: + datetime: The timezone-aware datetime object. + """ + if dt.tzinfo is None: + return dt.replace(tzinfo=tz) + return dt + + +def retrieve_hzt_prod(folder_out, start_time, end_time, pattern_type="shell"): + """Retrieves the preprocessed HZT products from the CSCS repository for a specified + time range, unzips them and places them in a specified folder + + Parameters + ---------- + + folder_out: str + directory where to store the unzipped files + start_time : datetime.datetime instance + starting time of the time range + end_time : datetime.datetime instance + end time of the time range + pattern_type: either 'shell' or 'regex' (optional) + use 'shell' for standard shell patterns, which use * as wildcard + use 'regex' for more advanced regex patterns + + Returns + ------- + A list containing all the filepaths of the retrieved files + + """ + dt = datetime.timedelta(hours=1) + delta = end_time - start_time + if delta.total_seconds() == 0: + times = [start_time] + else: + times = start_time + np.arange(int(delta.total_seconds() / (60 * 60)) + 2) * dt + dates = [] + for t in times: + dates.append(datetime.datetime(year=t.year, month=t.month, day=t.day)) + dates = np.unique(dates) + + t0 = _make_timezone_aware(start_time) + t1 = _make_timezone_aware(end_time) + + all_files = [] + for i, d in enumerate(dates): + if i == 0: + start_time = datetime.datetime( + year=t0.year, month=t0.month, day=t0.day, hour=t0.hour + ) + # print('*first start time: ', start_time) + else: + start_time = datetime.datetime(year=d.year, month=d.month, day=d.day) + # print('*all other start times', start_time) + if i == len(dates) - 1: + end_time = datetime.datetime( + year=t1.year, month=t1.month, day=t1.day, hour=t1.hour + ) + datetime.timedelta(hours=1) + else: + end_time = datetime.datetime(year=d.year, month=d.month, day=d.day, hour=23) + # print('*end_time: ', end_time) + + files = _retrieve_hzt_prod_daily(folder_out, start_time, end_time, pattern_type) + + if files is not None: + all_files.extend(files) + + return all_files + + +def retrieve_hzt_RT(tstep): + """Retrieves the preprocessed HZT products + A version adapted to real time implementation + Only used in for the function retrieve_hzt_prod + + Parameters + ---------- + + tstep: datetime + directory where to store the unzipped files + + Returns + ------- + A list containing all the filepaths of the retrieved files + + """ + + # Get list of available files + folder_in = FOLDER_ISO0 + content_zip = np.array( + [ + c + for c in os.listdir(folder_in) + if (len(c.split(".")) == 2) and (int(c.split(".")[-1]) >= 800) + ] + ) + + # HZT files are produced once an hour + start_time = tstep.replace(minute=0) + end_time = start_time + datetime.timedelta(hours=1) + start_time = _make_timezone_aware(start_time) + end_time = _make_timezone_aware(end_time) + + try: + # Sort filelist to most recent prediction + content_filt = np.array([c for c in content_zip if c.endswith("800")]) + times_filt = np.array( + [ + datetime.datetime.strptime(c[3:12], "%y%j%H%M").replace( + tzinfo=datetime.timezone.utc + ) + + datetime.timedelta(hours=int(c[-2::])) + for c in content_filt + ] + ) + conditions = np.array( + [np.logical_and((t >= start_time), (t <= end_time)) for t in times_filt] + ) + + content_filt = content_filt[conditions] + times_filt = times_filt[conditions] + except (ValueError, TypeError, IndexError): + print("HZT data does not exist for " + start_time.strftime("%d-%b-%y")) + files = None + return + + # Check that an hourly estimate is available + all_hours = pd.date_range(start=start_time, end=end_time, freq="H") + + if len(all_hours) != len(times_filt): + content_times = np.array( + [ + datetime.datetime.strptime(c[3:12], "%y%j%H%M").replace( + tzinfo=datetime.timezone.utc + ) + + datetime.timedelta(hours=int(c[-2::])) + for c in content_zip + ] + ) + # Find time that is missing: + for hh in all_hours: + if hh not in times_filt: + hh_last = np.where(hh == content_times) + times_filt = np.sort(np.append(times_filt, content_times[hh_last][-1])) + content_filt = np.sort( + np.append(content_filt, content_zip[hh_last][-1]) + ) + + # Get a list of all files to retrieve + conditions = np.array( + [np.logical_and(t >= start_time, t <= end_time) for t in times_filt] + ) + + if not np.any(conditions): + msg = """ + No file was found corresponding to this format, verify pattern and product_name + """ + raise ValueError(msg) + + files = sorted( + np.array([folder_in + c for c in np.array(content_filt)[conditions]]) + ) + + return files + + +def _retrieve_hzt_prod_daily(folder_out, start_time, end_time, pattern_type="shell"): + """Retrieves the preprocessed HZT products from the CSCS repository for a day, + Only used in for the function retrieve_hzt_prod + + Parameters + ---------- + + folder_out: str + directory where to store the unzipped files + start_time : datetime.datetime instance + starting time of the time range + end_time : datetime.datetime instance + end time of the time range + pattern_type: either 'shell' or 'regex' (optional) + use 'shell' for standard shell patterns, which use * as wildcard + use 'regex' for more advanced regex patterns + + Returns + ------- + A list containing all the filepaths of the retrieved files + + """ + + folder_out += "/" + start_time = _make_timezone_aware(start_time) + end_time = _make_timezone_aware(end_time) + + suffix = str(start_time.year)[-2:] + str(start_time.timetuple().tm_yday).zfill(3) + folder_in = FOLDER_ISO0 + str(start_time.year) + "/" + suffix + "/" + name_zipfile = "HZT" + suffix + ".zip" + + try: + # Get list of files in zipfile + zipp = zipfile.ZipFile(folder_in + name_zipfile) + content_zip = np.sort(np.array(zipp.namelist())) + + # Sort filelist to most recent prediction + content_filt = np.array([c for c in content_zip if c.endswith("800")]) + times_filt = np.array( + [ + datetime.datetime.strptime(c[3:12], "%y%j%H%M") + + datetime.timedelta(hours=int(c[-2::])) + for c in content_filt + ] + ) + content_filt = content_filt[ + np.where((times_filt >= start_time) & (times_filt <= end_time)) + ] + times_filt = times_filt[ + np.where((times_filt >= start_time) & (times_filt <= end_time)) + ] + except (ValueError, TypeError, IndexError): + print( + "Zip file with HZT data does not exist for " + + start_time.strftime("%d-%b-%y") + ) + files = None + return + + # Check that an hourly estimate is available + all_hours = pd.date_range(start=start_time, end=end_time, freq="H") + + if len(all_hours) != len(times_filt): + content_times = np.array( + [ + datetime.datetime.strptime(c[3:12], "%y%j%H%M") + + datetime.timedelta(hours=int(c[-2::])) + for c in content_zip + ] + ) + # Find time that is missing: + for hh in all_hours: + if hh not in times_filt: + hh_last = np.where(hh == content_times) + times_filt = np.sort(np.append(times_filt, content_times[hh_last][-1])) + content_filt = np.sort( + np.append(content_filt, content_zip[hh_last][-1]) + ) + + # Get a list of all files to retrieve + conditions = np.array( + [np.logical_and(t >= start_time, t <= end_time) for t in times_filt] + ) + + if not np.any(conditions): + msg = """ + No file was found corresponding to this format, verify pattern and product_name + """ + raise ValueError(msg) + + files_to_retrieve = " ".join(content_filt[conditions]) + + # Check if files are already unzipped (saves time if they already exist) + for fi in content_filt[conditions]: + if os.path.exists(folder_out + fi): + files_to_retrieve = files_to_retrieve.replace(fi, "") + + # Only unzip if at least one file does not exist + if len(files_to_retrieve.strip()) > 0: + print("Unzippping: " + files_to_retrieve) + cmd = 'unzip -j -o -qq "{:s}" {:s} -d {:s}'.format( + folder_in + name_zipfile, files_to_retrieve, folder_out + ) + subprocess.call(cmd, shell=True) + + files = sorted(np.array([folder_out + c for c in content_filt[conditions]])) + + return files + + +def retrieve_mch_prod( + folder_out, + start_time, + end_time, + product_name, + pattern=None, + pattern_type="shell", + sweeps=None, +): + """Retrieves radar data from the CSCS repository for a specified + time range, unzips them and places them in a specified folder + + Parameters + ---------- + + folder_out: str + directory where to store the unzipped files + start_time : datetime.datetime instance + starting time of the time range + end_time : datetime.datetime instance + end time of the time range + product_name: str + name of the product, as stored on CSCS, e.g. RZC, CPCH, MZC, BZC... + pattern: str + pattern constraint on file names, can be used for products which contain + multiple filetypes, f.ex CPCH folders contain both rda and gif files, + if only gifs are wanted : file_type = '*.gif' + pattern_type: either 'shell' or 'regex' (optional) + use 'shell' for standard shell patterns, which use * as wildcard + use 'regex' for more advanced regex patterns + sweeps: list of int (optional) + For polar products, specifies which sweeps (elevations) must be + retrieved, if not specified all available sweeps will be retrieved + + Returns + ------- + A list containing all the filepaths of the retrieved files + + """ + start_time = _make_timezone_aware(start_time) + end_time = _make_timezone_aware(end_time) + + if product_name == "ZZW" or product_name == "ZZP": # no vpr for PPM and WEI + product_name = "ZZA" + + if product_name == "CPC": + folder_out = folder_out + "/CPC" + if product_name == "CPCH": + folder_out = folder_out + "/CPCH" + + if not os.path.exists(folder_out): + os.makedirs(folder_out) + + # Check if times are aware or naive + if start_time.tzinfo is None: + start_time.replace(tzinfo=datetime.timezone.utc) + if end_time.tzinfo is None: + end_time.replace(tzinfo=datetime.timezone.utc) + + dt = datetime.timedelta(minutes=5) + delta = end_time - start_time + if delta.total_seconds() == 0: + times = [start_time] + else: + times = start_time + np.arange(int(delta.total_seconds() / (5 * 60)) + 1) * dt + dates = [] + for t in times: + dates.append(datetime.datetime(year=t.year, month=t.month, day=t.day)) + dates = np.unique(dates) + + t0 = start_time + t1 = end_time + + all_files = [] + for i, d in enumerate(dates): + if i == 0: + start_time = t0 + else: + start_time = datetime.datetime( + year=d.year, month=d.month, day=d.day, tzinfo=datetime.timezone.utc + ) + if i == len(dates) - 1: + end_time = t1 + else: + end_time = datetime.datetime( + year=d.year, + month=d.month, + day=d.day, + hour=23, + minute=59, + tzinfo=datetime.timezone.utc, + ) + files = _retrieve_prod_daily( + folder_out, + start_time, + end_time, + product_name, + pattern, + pattern_type, + sweeps, + ) + + all_files.extend(files) + + return all_files + + +def retrieve_mch_prod_RT( + time, product_name, pattern=None, pattern_type="shell", sweeps=None +): + """Adapted function from rainforest.common.retrieve_data + Here, it reads the data per timestep, and in the real-time + operation, the radar data is not zipped + + Args: + time (datetime object): timestamp to extract + product_name (string): Name of the product to be extracted + sweeps (list): List of sweeps if not all want to be extracted. Defaults to None. + + Raises: + ValueError: If no data is found + + Returns: + dict: dictionary containing with the the file list + """ + time = _make_timezone_aware(time) + + # Get all files + folder_radar = FOLDER_RADAR + folder_in = folder_radar + product_name + "/" + + # Get list of available files + content_zip = np.array(os.listdir(folder_in)) + + if pattern is not None: + if pattern_type == "shell": + content_zip = [ + c for c in content_zip if fnmatch.fnmatch(os.path.basename(c), pattern) + ] + elif pattern_type == "regex": + content_zip = [ + c + for c in content_zip + if re.match(os.path.basename(c), pattern) is not None + ] + else: + raise ValueError('Unknown pattern_type, must be either "shell" or "regex".') + + # Derive datetime of each file + times_zip = np.array( + [ + datetime.datetime.strptime(c[3:12], "%y%j%H%M").replace( + tzinfo=datetime.timezone.utc + ) + for c in content_zip + ] + ) + + # Get a list of all files to retrieve + conditions = times_zip == time + + # Filter on sweeps: + if sweeps is not None: + sweeps_zip = np.array([int(c[-3:]) for c in content_zip]) + # Get a list of all files to retrieve + conditions_sweep = np.array([s in sweeps for s in sweeps_zip]) + conditions = np.logical_and(conditions, conditions_sweep) + + if not np.any(conditions): + msg = """ + No file was found corresponding to this format, verify pattern and product_name + """ + raise ValueError(msg) + + files = sorted(np.array([folder_in + c for c in np.array(content_zip)[conditions]])) + + return files + + +def _retrieve_prod_daily( + folder_out, + start_time, + end_time, + product_name, + pattern=None, + pattern_type="shell", + sweeps=None, +): + """This is a version that works only for a given day (i.e. start and end + time on the same day) + """ + start_time = _make_timezone_aware(start_time) + end_time = _make_timezone_aware(end_time) + + if product_name[0:2] == "MH": + folder_radar = FOLDER_RADARH + else: + folder_radar = FOLDER_RADAR + + folder_out += "/" + + suffix = str(start_time.year)[-2:] + str(start_time.timetuple().tm_yday).zfill(3) + folder_in = folder_radar + str(start_time.year) + "/" + suffix + "/" + name_zipfile = product_name + suffix + ".zip" + + # Get list of files in zipfile + zipp = zipfile.ZipFile(folder_in + name_zipfile) + content_zip = np.array(zipp.namelist()) + + if pattern is not None: + if pattern_type == "shell": + content_zip = [ + c for c in content_zip if fnmatch.fnmatch(os.path.basename(c), pattern) + ] + elif pattern_type == "regex": + content_zip = [ + c + for c in content_zip + if re.match(os.path.basename(c), pattern) is not None + ] + else: + raise ValueError('Unknown pattern_type, must be either "shell" or "regex".') + + content_zip = np.array(content_zip) + + times_zip = np.array( + [ + datetime.datetime.strptime(c[3:12], "%y%j%H%M").replace( + tzinfo=datetime.timezone.utc + ) + for c in content_zip + ] + ) + + # Get a list of all files to retrieve + conditions = np.array( + [np.logical_and(t >= start_time, t <= end_time) for t in times_zip] + ) + + # Filter on sweeps: + if sweeps is not None: + sweeps_zip = np.array([int(c[-3:]) for c in content_zip]) + # Get a list of all files to retrieve + conditions_sweep = np.array([s in sweeps for s in sweeps_zip]) + conditions = np.logical_and(conditions, conditions_sweep) + + if not np.any(conditions): + msg = """ + No file was found corresponding to this format, verify pattern and product_name + """ + raise ValueError(msg) + + # Create string to retrieve files over unzip + files_to_retrieve = " ".join(content_zip[conditions]) + + # Check if files are already unzipped (saves time if they already exist) + for fi in content_zip[conditions]: + if os.path.exists(folder_out + fi): + files_to_retrieve = files_to_retrieve.replace(fi, "") + + # Only unzip if at least one file does not exist + if len(files_to_retrieve.strip()) > 0: + cmd = 'unzip -j -o -qq "{:s}" {:s} -d {:s}'.format( + folder_in + name_zipfile, files_to_retrieve, folder_out + ) + subprocess.call(cmd, shell=True) + + files = sorted(np.array([folder_out + c for c in content_zip[conditions]])) + + return files + + +def retrieve_CPCCV(time, stations): + """Retrieves cross-validation CPC data for a set of stations from + the xls files prepared by Yanni + + Parameters + ---------- + + time : datetime.datetime instance + starting time of the time range + stations : list of str + list of weather stations at which to retrieve the CPC.CV data + + Returns + ------- + A numpy array corresponding at the CPC.CV estimations at every specified + station + """ + + time = _make_timezone_aware(time) + + from ..io.read_data_other import read_xls + + year = time.year + + folder = FOLDER_CPCCV + str(year) + "/" + + files = sorted([f for f in glob.glob(folder + "*.xls") if ".s" not in f]) + + def _start_time(fname): + bname = os.path.basename(fname) + times = bname.split(".")[1] + tend = times.split("_")[1] + return datetime.datetime.strptime(tend, "%Y%m%d%H00") + + tend = np.array([_start_time(f) for f in files]) + + match = np.where(time < tend)[0] + + if not len(match): + print("Could not find CPC CV file for time {:s}".format(time)) + return np.zeros((len(stations))) + np.nan + + data = read_xls(files[match[0]]) + + hour = int(datetime.datetime.strftime(time, "%Y%m%d%H00")) + idx = np.where(np.array(data["time.stamp"]) == hour)[0] + data_hour = data.iloc[idx] + data_hour_stations = data_hour.iloc[ + np.isin(np.array(data_hour["nat.abbr"]), stations) + ] + cpc_cv = [] + cpc_xls = [] + for sta in stations: + if sta in np.array(data_hour_stations["nat.abbr"]): + cpc_cv.append( + float( + data_hour_stations.loc[data_hour_stations["nat.abbr"] == sta][ + "CPC.CV" + ] + ) + ) + cpc_xls.append( + float( + data_hour_stations.loc[data_hour_stations["nat.abbr"] == sta]["CPC"] + ) + ) + else: + cpc_cv.append(np.nan) + cpc_xls.append(np.nan) + + return np.array(cpc_cv), np.array(cpc_xls) + + +def retrieve_AQC_XLS(time, stations): + """Retrieves cross-validation CPC data for a set of stations from + the xls files prepared by Yanni + + Parameters + ---------- + + time : datetime.datetime instance + starting time of the time range + stations : list of str + list of weather stations at which to retrieve the CPC.CV data + + Returns + ------- + A numpy array corresponding at the CPC.CV estimations at every specified + station + """ + from ..io.read_data_other import read_xls + + time = _make_timezone_aware(time) + year = time.year + + folder = FOLDER_CPCCV + str(year) + "/" + + files = sorted([f for f in glob.glob(folder + "*.xls") if ".s" not in f]) + + def _start_time(fname): + bname = os.path.basename(fname) + times = bname.split(".")[1] + tend = times.split("_")[1] + return datetime.datetime.strptime(tend, "%Y%m%d%H00") + + tend = np.array([_start_time(f) for f in files]) + + match = np.where(time < tend)[0] + + if not len(match): + print("Could not find CPC CV file for time {:s}".format(time)) + return np.zeros((len(stations))) + np.nan + + data = read_xls(files[match[0]]) + + hour = int(datetime.datetime.strftime(time, "%Y%m%d%H00")) + idx = np.where(np.array(data["time.stamp"]) == hour)[0] + data_hour = data.iloc[idx] + data_hour_stations = data_hour.iloc[ + np.isin(np.array(data_hour["nat.abbr"]), stations) + ] + aqc_xls = [] + for sta in stations: + if sta in np.array(data_hour_stations["nat.abbr"]): + aqc_xls.append( + float( + data_hour_stations.loc[data_hour_stations["nat.abbr"] == sta]["AQC"] + ) + ) + else: + aqc_xls.append(np.nan) + + return np.array(aqc_xls) From bac97778d3651c457148eb8884fe986828f60213 Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Thu, 29 Aug 2024 12:10:21 +0200 Subject: [PATCH 03/15] Improved doc by adding more subdivisions in categories --- .gitignore | 1 + doc/source/API/index.rst | 3 +- .../overview/mappings/pyrad_to_icon.txt | 5 ++ .../overview/mappings/pyrad_to_odim.txt | 2 +- .../overview/mappings/pyrad_to_pyart.txt | 4 +- src/pyart | 2 +- src/pyrad_proc/pyrad/graph/plots_grid.py | 3 +- src/pyrad_proc/pyrad/io/io_aux.py | 1 - src/pyrad_proc/pyrad/io/read_data_other.py | 25 ++++++++ src/pyrad_proc/pyrad/proc/__init__.py | 1 + src/pyrad_proc/pyrad/util/__init__.py | 62 +++++++++++++++++++ 11 files changed, 102 insertions(+), 7 deletions(-) create mode 100644 doc/source/overview/mappings/pyrad_to_icon.txt diff --git a/.gitignore b/.gitignore index 3f7f3fa63..11269497a 100644 --- a/.gitignore +++ b/.gitignore @@ -92,6 +92,7 @@ src/pyrad_proc/pyrad/version.py ####################### doc/source/overview/list_products.rst doc/source/overview/list_process.rst +doc/source/API/generated # radar data files # #################### diff --git a/doc/source/API/index.rst b/doc/source/API/index.rst index 6af536620..b5ccf435a 100644 --- a/doc/source/API/index.rst +++ b/doc/source/API/index.rst @@ -22,4 +22,5 @@ Documentation is broken down by directory and module. io proc prod - util \ No newline at end of file + util + diff --git a/doc/source/overview/mappings/pyrad_to_icon.txt b/doc/source/overview/mappings/pyrad_to_icon.txt new file mode 100644 index 000000000..d16d27bb7 --- /dev/null +++ b/doc/source/overview/mappings/pyrad_to_icon.txt @@ -0,0 +1,5 @@ +pyrad_name,icon_name +temperature,T +wind_speed,FF +wind_direction,DD +vertical_wind_shear,WSHEAR diff --git a/doc/source/overview/mappings/pyrad_to_odim.txt b/doc/source/overview/mappings/pyrad_to_odim.txt index f74b2e9c4..397714885 100644 --- a/doc/source/overview/mappings/pyrad_to_odim.txt +++ b/doc/source/overview/mappings/pyrad_to_odim.txt @@ -83,7 +83,7 @@ PIDAc,PIDAC,corrected_path_integrated_differential_attenuation TEMP,TEMP,temperature ISO0,ISO0,iso0 H_ISO0,HISO0,height_over_iso0 -cosmo_index,COSMOIND,cosmo_index +icon_index,ICONIND,icon_index hzt_index,HZTIND,hzt_index ml,ML,melting_layer VIS,VIS,visibility diff --git a/doc/source/overview/mappings/pyrad_to_pyart.txt b/doc/source/overview/mappings/pyrad_to_pyart.txt index b47ba709d..2cb8faa3b 100644 --- a/doc/source/overview/mappings/pyrad_to_pyart.txt +++ b/doc/source/overview/mappings/pyrad_to_pyart.txt @@ -124,7 +124,7 @@ H_ISO0,height_over_iso0 H_ISO0c,corrected_height_over_iso0 HZT,iso0_height HZTc,corrected_iso0_height -cosmo_index,cosmo_index +icon_index,icon_index hzt_index,hzt_index ml,melting_layer VIS,visibility @@ -150,6 +150,8 @@ occurrence,occurrence freq_occu,frequency_of_occurrence RR,radar_estimated_rain_rate RR_MP,Marshall_Palmer_radar_estimated_rain_rate +RR_Z,radar_reflectivity_estimated_rain_rate +RR_KDP,radar_kdp_estimated_rain_rate RR_flag,radar_estimated_rain_rate_flag RRc,corrected_radar_estimated_rain_rate Raccu,rainfall_accumulation diff --git a/src/pyart b/src/pyart index 9ab36e9f7..84d8b9f3d 160000 --- a/src/pyart +++ b/src/pyart @@ -1 +1 @@ -Subproject commit 9ab36e9f7b5d932eb8dbf80c5d587fe9543a9e17 +Subproject commit 84d8b9f3dd3e6d8bd095703e8cf9b73087c96ec2 diff --git a/src/pyrad_proc/pyrad/graph/plots_grid.py b/src/pyrad_proc/pyrad/graph/plots_grid.py index 481633821..344a5fbd3 100755 --- a/src/pyrad_proc/pyrad/graph/plots_grid.py +++ b/src/pyrad_proc/pyrad/graph/plots_grid.py @@ -14,8 +14,7 @@ plot_longitude_slice plot_cross_section plot_dda_map - plot_dda_latitude_slice - plot_dda_longitude_slice + plot_dda_slice """ from warnings import warn diff --git a/src/pyrad_proc/pyrad/io/io_aux.py b/src/pyrad_proc/pyrad/io/io_aux.py index d96182046..dccdd2ea2 100755 --- a/src/pyrad_proc/pyrad/io/io_aux.py +++ b/src/pyrad_proc/pyrad/io/io_aux.py @@ -2400,7 +2400,6 @@ def get_fieldname_pyart(datatype): field_name = "quant90_dealiased_velocity" elif datatype == "quant95dealV": field_name = "quant95_dealiased_velocity" - else: raise ValueError("ERROR: Unknown data type " + datatype) diff --git a/src/pyrad_proc/pyrad/io/read_data_other.py b/src/pyrad_proc/pyrad/io/read_data_other.py index 111adf50d..ed99cd844 100755 --- a/src/pyrad_proc/pyrad/io/read_data_other.py +++ b/src/pyrad_proc/pyrad/io/read_data_other.py @@ -37,12 +37,14 @@ read_intercomp_scores_ts_old_v0 read_selfconsistency read_antenna_pattern + read_xls """ import os import glob import datetime +import pandas as pd import csv import xml.etree.ElementTree as et from warnings import warn @@ -1915,3 +1917,26 @@ def read_antenna_pattern(fname, linear=False, twoway=False): pattern["attenuation"] = 10.0 ** (pattern["attenuation"] / 10.0) return pattern + + +def read_xls(xls_file): + """Reads an excel file such as those used for CPC vlaidation + + Parameters + ---------- + fname : str + Full path of the excel file to be read + + Returns + ------- + The excel file as a Pandas dataframe + """ + + data = pd.read_excel(xls_file, sheet_name = None) + keys = list(data.keys()) + hourly_keys = [] + for k in keys: + if 'Data Hourly' in k: + hourly_keys.append(k) + out = pd.concat([data[k] for k in hourly_keys]) + return out diff --git a/src/pyrad_proc/pyrad/proc/__init__.py b/src/pyrad_proc/pyrad/proc/__init__.py index be15534e5..31b085c5a 100755 --- a/src/pyrad_proc/pyrad/proc/__init__.py +++ b/src/pyrad_proc/pyrad/proc/__init__.py @@ -206,6 +206,7 @@ process_windshear process_windshear_lidar process_vad + process_dda Time series functions ==================== diff --git a/src/pyrad_proc/pyrad/util/__init__.py b/src/pyrad_proc/pyrad/util/__init__.py index fc86119f4..0ae9c9ff2 100755 --- a/src/pyrad_proc/pyrad/util/__init__.py +++ b/src/pyrad_proc/pyrad/util/__init__.py @@ -7,6 +7,68 @@ Functions to read and write data and configuration files. +Radar Utilities +=========================== + +.. autosummary:: + :toctree: generated/ + + time_avg_range + get_closest_solar_flux + create_sun_hits_field + create_sun_retrieval_field + compute_histogram + compute_histogram_sweep + compute_quantiles + compute_quantiles_sweep + compute_quantiles_from_hist + get_range_bins_to_avg + find_ray_index + find_rng_index + find_nearest_gate + find_colocated_indexes + find_contiguous_times + compute_2d_hist + compute_1d_stats + compute_2d_stats + time_series_statistics + join_time_series + rainfall_accumulation + get_ROI + belongs_roi_indices + project_to_vertical + get_data_along_rng + get_data_along_azi + get_data_along_ele + get_fixed_rng_data + get_cercle_coords + get_box_coords + compute_profile_stats + compute_average_vad + +Statistical Utilities +=========================== + +.. autosummary:: + :toctree: generated/ + + quantiles_weighted + ratio_bootstrapping + +Data Retrieval Utilities +=========================== + +.. autosummary:: + :toctree: generated/ + + retrieve_hzt_prod + retrieve_hzt_RT + retrieve_mch_prod + retrieve_mch_prod_RT + retrieve_CPCCV + retrieve_AQC_XLS + + """ from .radar_utils import time_avg_range, get_closest_solar_flux # noqa From dc3c523f4d3913bda85bb358d75cd2276d99142e Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Thu, 29 Aug 2024 12:16:01 +0200 Subject: [PATCH 04/15] FIX: lint fixes --- src/pyart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pyart b/src/pyart index 84d8b9f3d..970baf985 160000 --- a/src/pyart +++ b/src/pyart @@ -1 +1 @@ -Subproject commit 84d8b9f3dd3e6d8bd095703e8cf9b73087c96ec2 +Subproject commit 970baf985aa2648d48faadab5c599afa80b6bfd8 From 1c736980c9ea686ab4252304c6cadd92d186ddf3 Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Thu, 29 Aug 2024 15:44:27 +0200 Subject: [PATCH 05/15] merge with master --- src/pyrad_proc/pyrad/util/data_retrieval_utils.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/pyrad_proc/pyrad/util/data_retrieval_utils.py b/src/pyrad_proc/pyrad/util/data_retrieval_utils.py index 2cf86f12d..dafa51157 100644 --- a/src/pyrad_proc/pyrad/util/data_retrieval_utils.py +++ b/src/pyrad_proc/pyrad/util/data_retrieval_utils.py @@ -590,11 +590,6 @@ def _retrieve_prod_daily( subprocess.call(cmd, shell=True) files = sorted(np.array([folder_out + c for c in content_zip[conditions]])) -<<<<<<< HEAD - -======= - ->>>>>>> master return files From a2911fedab527cc98836965cdcfa63745b25b623 Mon Sep 17 00:00:00 2001 From: radarv Date: Thu, 5 Sep 2024 15:23:38 +0000 Subject: [PATCH 06/15] FIX: #70 --- src/pyrad_proc/pyrad/proc/process_intercomp.py | 9 ++++----- src/pyrad_proc/pyrad/prod/process_intercomp_products.py | 1 - 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/pyrad_proc/pyrad/proc/process_intercomp.py b/src/pyrad_proc/pyrad/proc/process_intercomp.py index ea9ba6204..2d6dcbb83 100755 --- a/src/pyrad_proc/pyrad/proc/process_intercomp.py +++ b/src/pyrad_proc/pyrad/proc/process_intercomp.py @@ -2034,9 +2034,9 @@ def process_intercomp_time_avg(procstatus, dscfg, radar_list=None): ): warn("Unable to compare radar time avg fields. " + "Fields missing") return None, None - + + dscfg["global_data"].update({"timeinfo": dscfg["timeinfo"]}) if not dscfg["initialized"]: - dscfg["global_data"].update({"timeinfo": dscfg["timeinfo"]}) dscfg["global_data"].update( {"rad1_name": dscfg["RadarName"][ind_radar_list[0]]} ) @@ -2044,7 +2044,7 @@ def process_intercomp_time_avg(procstatus, dscfg, radar_list=None): {"rad2_name": dscfg["RadarName"][ind_radar_list[1]]} ) dscfg["initialized"] = 1 - + refl1 = radar1.fields[rad1_refl_field]["data"] refl2 = radar2.fields[rad2_refl_field]["data"] @@ -2263,7 +2263,7 @@ def process_intercomp_time_avg(procstatus, dscfg, radar_list=None): intercomp_dict["rad1_time"] = np.empty( len(rad1_ray_ind), dtype=datetime.datetime ) - intercomp_dict["rad1_time"][:] = dscfg["global_data"]["timeinfo"] + intercomp_dict["rad1_time"][:] = dscfg["timeinfo"] intercomp_dict["rad1_ray_ind"] = rad1_ray_ind intercomp_dict["rad1_rng_ind"] = rad1_rng_ind intercomp_dict["rad1_ele"] = radar1.elevation["data"][rad1_ray_ind] @@ -2402,7 +2402,6 @@ def process_intercomp_time_avg(procstatus, dscfg, radar_list=None): "timeinfo": dscfg["global_data"]["timeinfo"], "final": True, } - return new_dataset, None diff --git a/src/pyrad_proc/pyrad/prod/process_intercomp_products.py b/src/pyrad_proc/pyrad/prod/process_intercomp_products.py index 92bb3b783..67f238ed6 100755 --- a/src/pyrad_proc/pyrad/prod/process_intercomp_products.py +++ b/src/pyrad_proc/pyrad/prod/process_intercomp_products.py @@ -142,7 +142,6 @@ def generate_intercomp_products(dataset, prdcfg): timeinfo=dataset["timeinfo"], timeformat="%Y%m%d", ) - fname = savedir + fname[0] write_colocated_data_time_avg(dataset["intercomp_dict"], fname) print("saved colocated time averaged data file: " + fname) From 3e751738dc226dde090a53a651f546c9a7fef8fc Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Sun, 8 Sep 2024 11:12:30 +0200 Subject: [PATCH 07/15] ENH: improved main_precipitation_comparison.py to be be more generic --- .../scripts/main_precipitation_comparison.py | 137 ++--- src/scripts/common_colocated_gates.py | 158 ------ src/scripts/main_precipitation_comparison.py | 172 ------- src/scripts/main_process_cosmo.py | 136 ----- src/scripts/main_process_cosmo_rt.py | 151 ------ src/scripts/main_process_data.py | 175 ------- src/scripts/main_process_data_birds.py | 266 ---------- src/scripts/main_process_data_period.py | 182 ------- src/scripts/main_process_data_rt.py | 151 ------ src/scripts/main_process_data_trt.py | 478 ------------------ src/scripts/main_process_euclid_data.py | 298 ----------- src/scripts/main_process_gecsx.py | 177 ------- src/scripts/movie_maker.py | 148 ------ src/scripts/rewrite_monitoring.py | 236 --------- 14 files changed, 73 insertions(+), 2792 deletions(-) delete mode 100755 src/scripts/common_colocated_gates.py delete mode 100755 src/scripts/main_precipitation_comparison.py delete mode 100644 src/scripts/main_process_cosmo.py delete mode 100644 src/scripts/main_process_cosmo_rt.py delete mode 100755 src/scripts/main_process_data.py delete mode 100644 src/scripts/main_process_data_birds.py delete mode 100755 src/scripts/main_process_data_period.py delete mode 100755 src/scripts/main_process_data_rt.py delete mode 100755 src/scripts/main_process_data_trt.py delete mode 100644 src/scripts/main_process_euclid_data.py delete mode 100755 src/scripts/main_process_gecsx.py delete mode 100644 src/scripts/movie_maker.py delete mode 100644 src/scripts/rewrite_monitoring.py diff --git a/src/pyrad_proc/scripts/main_precipitation_comparison.py b/src/pyrad_proc/scripts/main_precipitation_comparison.py index 8702c1a0b..6f9a254a7 100755 --- a/src/pyrad_proc/scripts/main_precipitation_comparison.py +++ b/src/pyrad_proc/scripts/main_precipitation_comparison.py @@ -18,6 +18,7 @@ import glob from warnings import warn import os +import argparse import numpy as np @@ -27,71 +28,91 @@ print(__doc__) - +def parse_args(): + """Parse command-line arguments.""" + parser = argparse.ArgumentParser(description="Compare radar data with a point measurement sensor.") + parser.add_argument('directory', + help="Directory where the precipitation products are stored") + parser.add_argument('--parameters', default = ["RR"], nargs='+', + help="Directory where the precipitation products are stored") + + return parser.parse_args() + +def find_max_min_dates(file_paths): + dates = [] + + for file_path in file_paths: + # Extract the date component from the filename + filename = os.path.basename(file_path) + date_str = filename.split('_')[0] + + try: + # Convert the date string to a datetime object + date = datetime.datetime.strptime(date_str, '%Y%m%d') + dates.append(date) + except ValueError: + # Handle files that don't have a valid date format + print(f"Warning: '{file_path}' does not contain a valid date format.") + + if dates: + # Find the minimum and maximum dates + min_date = min(dates) + max_date = max(dates) + return min_date, max_date + else: + return None, None + def main(): - """ - """ - param_vec = ['RR_Z', 'RR_hydro'] - smn_station_vec = ['CIM', 'MAG', 'OTL'] - tstart = '20180401' - tend = '20180430' - + """Main function.""" + args = parse_args() + + fpath = args.directory + params = args.parameters + np_radar_min = 6 np_sensor_min = 6 min_val = 0.2 - fbase = '/data/pyrad_products/mals_loc_dataquality/' img_ext = 'png' avg_time = 3600 print("====== precipitation comparison started: %s" % datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== comparison finished: ") - - startdate = datetime.datetime.strptime(tstart, '%Y%m%d') - enddate = datetime.datetime.strptime(tend, '%Y%m%d') - ndays = (enddate - startdate).days + 1 - print('Number of days to process: ' + str(ndays) + '\n\n') - - for param in param_vec: + # Find all products within file structure + all_RR_files = glob.glob(os.path.join(fpath, '**', + '*acc_ts_comp_POINT_MEASUREMENT_RR*.csv'), recursive = True) + + print(f'Found {len(all_RR_files)} files with acc. precip.') + + startdate, enddate = find_max_min_dates(all_RR_files) + print(f'Start date: {startdate}') + print(f'End date: {enddate}') + + # Sort files by parameter + list_params = sorted(params)[::-1] + RR_files_by_param = {} + for file in all_RR_files: + for param in list_params: + if param in file: + if param not in RR_files_by_param: + RR_files_by_param[param] = [] + RR_files_by_param[param].append(file) + + for param in list_params: ts_vec = np.array([]) val_radar = np.ma.array([]) np_radar = np.array([]) val_sensor = np.ma.array([]) np_sensor = np.array([]) - - for station in smn_station_vec: - for day in range(ndays): - current_date = startdate + datetime.timedelta(days=day) - day_dir = current_date.strftime("%Y-%m-%d") - daybase = current_date.strftime("%Y%m%d") - - fpath = ( - fbase + - day_dir + - '/rg' + - station + - '_' + - param + - '/RRcum' + - str(avg_time) + - 's/') - fname = glob.glob( - fpath + daybase + '_' + str(avg_time) + - 's_acc_ts_comp_POINT_MEASUREMENT_*.csv') - if not fname: - warn('No file found in ' + fpath) - continue - else: - (ts_aux, np_radar_aux, radar_value_aux, np_sensor_aux, - sensor_value_aux) = read_ts_cum(fname[0]) - ts_vec = np.append(ts_vec, ts_aux) - val_radar = np.ma.append(val_radar, radar_value_aux) - np_radar = np.append(np_radar, np_radar_aux) - val_sensor = np.ma.append(val_sensor, sensor_value_aux) - np_sensor = np.append(np_sensor, np_sensor_aux) + for file in RR_files_by_param[param]: + (ts_aux, np_radar_aux, radar_value_aux, np_sensor_aux, + sensor_value_aux) = read_ts_cum(file) + ts_vec = np.append(ts_vec, ts_aux) + val_radar = np.ma.append(val_radar, radar_value_aux) + np_radar = np.append(np_radar, np_radar_aux) + val_sensor = np.ma.append(val_sensor, sensor_value_aux) + np_sensor = np.append(np_sensor, np_sensor_aux) # filter out undesired data ind = np.where(np.logical_and( @@ -106,12 +127,12 @@ def main(): stats = compute_1d_stats(val_sensor, val_radar) # create output image - fpath = fbase + 'RR/' + fpath = fpath + 'RR/' if os.path.isdir(fpath): pass else: os.makedirs(fpath) - + print(f'Saving outputs to {fpath}') figfname = [ startdate.strftime('%Y%m%d') + '-' + @@ -149,19 +170,7 @@ def main(): def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ + """Prints end message.""" print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) diff --git a/src/scripts/common_colocated_gates.py b/src/scripts/common_colocated_gates.py deleted file mode 100755 index 3e8b65ab9..000000000 --- a/src/scripts/common_colocated_gates.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -common_colocated_gates -================================================ - -This program reads colocated gates files from two radars -and creates a new file with the gates that are common to -both radars - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import atexit -import numpy as np -import pandas as pd - -from pyrad.io import read_colocated_gates, write_colocated_gates - -print(__doc__) - - -def main(): - """ - """ - - file_path = ( - '/srn/analysis/pyrad_products/rad4alp_intercomp/colocated_gates/') - rad1_vec = ['A', 'A', 'A', 'A', 'D', 'D', 'D', 'L', 'L', 'P'] - rad2_vec = ['D', 'L', 'P', 'W', 'L', 'P', 'W', 'P', 'W', 'W'] - - print("====== common colocated gates started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== common colocated gates finished: ") - - for i, rad1 in enumerate(rad1_vec): - rad2 = rad2_vec[i] - - print('Radars: ' + rad1 + ' ' + rad2) - fname1 = (file_path + 'PL' + rad1 + '_' + 'PL' + rad2 + - '/info_COLOCATED_GATES_PL' + rad1 + '_PL' + rad2 + '.csv') - fname2 = (file_path + 'PL' + rad2 + '_' + 'PL' + rad1 + - '/info_COLOCATED_GATES_PL' + rad2 + '_PL' + rad1 + '.csv') - - (rad1_ray_ind, rad1_rng_ind, rad1_ele, rad1_azi, rad1_rng, - rad2_ray_ind, rad2_rng_ind, rad2_ele, rad2_azi, rad2_rng) = ( - read_colocated_gates(fname1)) - - print('Number of gates rad1-rad2 ', np.shape(rad1_ray_ind)) - - (rad2_ray_ind_aux, rad2_rng_ind_aux, rad2_ele_aux, rad2_azi_aux, - rad2_rng_aux, rad1_ray_ind_aux, rad1_rng_ind_aux, rad1_ele_aux, - rad1_azi_aux, rad1_rng_aux) = read_colocated_gates(fname2) - - print('Number of gates rad2-rad1 ', np.shape(rad2_ray_ind_aux)) - - # make a pool of data - rad1_ray_ind = np.ma.concatenate((rad1_ray_ind, rad1_ray_ind_aux)) - rad1_rng_ind = np.ma.concatenate((rad1_rng_ind, rad1_rng_ind_aux)) - rad1_ele = np.ma.concatenate((rad1_ele, rad1_ele_aux)) - rad1_azi = np.ma.concatenate((rad1_azi, rad1_azi_aux)) - rad1_rng = np.ma.concatenate((rad1_rng, rad1_rng_aux)) - rad2_ray_ind = np.ma.concatenate((rad2_ray_ind, rad2_ray_ind_aux)) - rad2_rng_ind = np.ma.concatenate((rad2_rng_ind, rad2_rng_ind_aux)) - rad2_ele = np.ma.concatenate((rad2_ele, rad2_ele_aux)) - rad2_azi = np.ma.concatenate((rad2_azi, rad2_azi_aux)) - rad2_rng = np.ma.concatenate((rad2_rng, rad2_rng_aux)) - - print('Total number of gates ', np.shape(rad1_ray_ind)) - - # create dictionary and put it in pandas framework - coloc_dict = { - 'rad1_ray_ind': rad1_ray_ind, - 'rad1_rng_ind': rad1_rng_ind, - 'rad1_ele': rad1_ele, - 'rad1_azi': rad1_azi, - 'rad1_rng': rad1_rng, - 'rad2_ray_ind': rad2_ray_ind, - 'rad2_rng_ind': rad2_rng_ind, - 'rad2_ele': rad2_ele, - 'rad2_azi': rad2_azi, - 'rad2_rng': rad2_rng} - df = pd.DataFrame(data=coloc_dict) - - # keep only duplicated data - df_common = df[df.duplicated(keep=False)].drop_duplicates() - common_dict = df_common.to_dict(orient='list') - - print('Number of common gates', df_common.shape) - print('rad1 elev min/max', np.min(common_dict['rad1_ele']), - np.max(common_dict['rad1_ele'])) - print('rad2 elev min/max', np.min(common_dict['rad2_ele']), - np.max(common_dict['rad2_ele'])) - - # write resultant output - fname1_out = ( - file_path + 'PL' + rad1 + '_' + 'PL' + rad2 + - '/info_common_COLOCATED_GATES_PL' + rad1 + '_PL' + rad2 + '.csv') - fname2_out = ( - file_path + 'PL' + rad2 + '_' + 'PL' + rad1 + - '/info_common_COLOCATED_GATES_PL' + rad2 + '_PL' + rad1 + '.csv') - - rad1_dict = { - 'rad1_ray_ind': np.asarray(common_dict['rad1_ray_ind']), - 'rad1_rng_ind': np.asarray(common_dict['rad1_rng_ind']), - 'rad1_ele': np.asarray(common_dict['rad1_ele']), - 'rad1_azi': np.asarray(common_dict['rad1_azi']), - 'rad1_rng': np.asarray(common_dict['rad1_rng']), - 'rad2_ray_ind': np.asarray(common_dict['rad2_ray_ind']), - 'rad2_rng_ind': np.asarray(common_dict['rad2_rng_ind']), - 'rad2_ele': np.asarray(common_dict['rad2_ele']), - 'rad2_azi': np.asarray(common_dict['rad2_azi']), - 'rad2_rng': np.asarray(common_dict['rad2_rng'])} - - rad2_dict = { - 'rad1_ray_ind': np.asarray(common_dict['rad2_ray_ind']), - 'rad1_rng_ind': np.asarray(common_dict['rad2_rng_ind']), - 'rad1_ele': np.asarray(common_dict['rad2_ele']), - 'rad1_azi': np.asarray(common_dict['rad2_azi']), - 'rad1_rng': np.asarray(common_dict['rad2_rng']), - 'rad2_ray_ind': np.asarray(common_dict['rad1_ray_ind']), - 'rad2_rng_ind': np.asarray(common_dict['rad1_rng_ind']), - 'rad2_ele': np.asarray(common_dict['rad1_ele']), - 'rad2_azi': np.asarray(common_dict['rad1_azi']), - 'rad2_rng': np.asarray(common_dict['rad1_rng'])} - - write_colocated_gates(rad1_dict, fname1_out) - write_colocated_gates(rad2_dict, fname2_out) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_precipitation_comparison.py b/src/scripts/main_precipitation_comparison.py deleted file mode 100755 index 8702c1a0b..000000000 --- a/src/scripts/main_precipitation_comparison.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -main_precipitation_comparison -================================================ - -This program compares radar data with a point measurement sensor. - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import atexit -import glob -from warnings import warn -import os - -import numpy as np - -from pyrad.io import read_ts_cum -from pyrad.graph import plot_scatter_comp -from pyrad.util import compute_1d_stats - -print(__doc__) - - -def main(): - """ - """ - param_vec = ['RR_Z', 'RR_hydro'] - smn_station_vec = ['CIM', 'MAG', 'OTL'] - tstart = '20180401' - tend = '20180430' - - np_radar_min = 6 - np_sensor_min = 6 - min_val = 0.2 - - fbase = '/data/pyrad_products/mals_loc_dataquality/' - img_ext = 'png' - avg_time = 3600 - - print("====== precipitation comparison started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== comparison finished: ") - - startdate = datetime.datetime.strptime(tstart, '%Y%m%d') - enddate = datetime.datetime.strptime(tend, '%Y%m%d') - - ndays = (enddate - startdate).days + 1 - print('Number of days to process: ' + str(ndays) + '\n\n') - - for param in param_vec: - ts_vec = np.array([]) - val_radar = np.ma.array([]) - np_radar = np.array([]) - val_sensor = np.ma.array([]) - np_sensor = np.array([]) - - for station in smn_station_vec: - for day in range(ndays): - current_date = startdate + datetime.timedelta(days=day) - day_dir = current_date.strftime("%Y-%m-%d") - daybase = current_date.strftime("%Y%m%d") - - fpath = ( - fbase + - day_dir + - '/rg' + - station + - '_' + - param + - '/RRcum' + - str(avg_time) + - 's/') - fname = glob.glob( - fpath + daybase + '_' + str(avg_time) + - 's_acc_ts_comp_POINT_MEASUREMENT_*.csv') - if not fname: - warn('No file found in ' + fpath) - continue - else: - (ts_aux, np_radar_aux, radar_value_aux, np_sensor_aux, - sensor_value_aux) = read_ts_cum(fname[0]) - ts_vec = np.append(ts_vec, ts_aux) - val_radar = np.ma.append(val_radar, radar_value_aux) - np_radar = np.append(np_radar, np_radar_aux) - val_sensor = np.ma.append(val_sensor, sensor_value_aux) - np_sensor = np.append(np_sensor, np_sensor_aux) - - # filter out undesired data - ind = np.where(np.logical_and( - np.logical_and( - np_radar >= np_radar_min, np_sensor >= np_sensor_min), - np.logical_and(val_sensor >= min_val, val_radar >= min_val)))[0] - - val_sensor = val_sensor[ind] - val_radar = val_radar[ind] - - # compute statistics - stats = compute_1d_stats(val_sensor, val_radar) - - # create output image - fpath = fbase + 'RR/' - if os.path.isdir(fpath): - pass - else: - os.makedirs(fpath) - - figfname = [ - startdate.strftime('%Y%m%d') + - '-' + - enddate.strftime('%Y%m%d') + - '_' + - str(avg_time) + - 's_acc_ts_comp_' + - param + - '.' + - img_ext] - - for i in range(len(figfname)): - figfname[i] = fpath + figfname[i] - - labelx = 'RG (mm)' - labely = 'Radar (mm)' - titl = ( - str(avg_time) + - ' s Acc. Comp. ' + - startdate.strftime('%Y%m%d') + - '-' + - enddate.strftime('%Y%m%d')) - - metadata = ( - 'npoints: ' + str(stats['npoints']) + '\n' + - 'NB: ' + '{:.2f}'.format(float(stats['NB'])) + '\n' + - 'corr: ' + '{:.2f}'.format(float(stats['corr'])) + '\n' + - 'RMS: ' + '{:.2f}'.format(float(stats['RMS'])) + '\n' + - 'Nash: ' + '{:.2f}'.format(float(stats['Nash'])) + '\n') - - plot_scatter_comp( - val_sensor, val_radar, figfname, labelx=labelx, - labely=labely, titl=titl, axis='equal', metadata=metadata, - dpi=300) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_cosmo.py b/src/scripts/main_process_cosmo.py deleted file mode 100644 index aacad519f..000000000 --- a/src/scripts/main_process_cosmo.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -Pyrad: The MeteoSwiss Radar Processing framework -================================================ - -Welcome to Pyrad! - -This program processes and post-processes data over a time span - -To run the processing framework type: - python main_process_data.py \ -[config_file] --starttime [process_start_time] --endtime [process_end_time] \ ---postproc_cfgfile [postproc_config_file] --cfgpath [cfgpath] - -If startime and endtime are not specified the program determines them from -the trajectory file or the last processed volume. -postproc_cfgfile is an optional argument with default: None -cfgpath is an optional argument with default: \ -'$HOME/pyrad/config/processing/' -The trajectory file can be of type plane, lightning or proc_periods. If it is \ -of type lightning the flash number can be specified - -Example: - python main_process_data.py 'paradiso_fvj_vol.txt' --starttime \ -'20140523000000' --endtime '20140523001000' --postproc_cfgfile \ -'paradiso_fvj_vol_postproc.txt' --cfgpath '$HOME/pyrad/config/processing/' - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit -import os - -from pyrad.flow.flow_control import main_cosmo - -print(__doc__) - - -def main(): - """ - """ - - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - # positional arguments - parser.add_argument( - 'proc_cfgfile', type=str, help='name of main configuration file') - - # keyword arguments - parser.add_argument( - '--starttime', type=str, default=None, - help=('starting time of the data to be processed. ' + - 'Format ''YYYYMMDDhhmmss''')) - parser.add_argument( - '--endtime', type=str, default=None, - help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''') - parser.add_argument( - '--cfgpath', type=str, - default=os.path.expanduser('~') + '/pyrad/config/processing/', - help='configuration file path') - parser.add_argument("-i", "--infostr", type=str, - help="Information string about the actual data " - "processing (e.g. 'RUN57'). This string is added " - "to the filenames of the product files.", - default="") - parser.add_argument("-t", "--trajfile", type=str, default='', - help="Definition file of plane trajectory. " - "Configuration of scan sector, products, ...") - - args = parser.parse_args() - - print("====== PYRAD data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== PYRAD data processing finished: ") - - print('config path: ' + args.cfgpath) - print('config file: ' + args.proc_cfgfile) - if args.starttime is not None: - print('start time: ' + args.starttime) - else: - print('start time not defined by user') - if args.endtime is not None: - print('end time: ' + args.endtime) - else: - print('end time not defined by user') - - proc_starttime = None - if args.starttime is not None: - proc_starttime = datetime.datetime.strptime( - args.starttime, '%Y%m%d%H%M%S') - proc_endtime = None - if args.endtime is not None: - proc_endtime = datetime.datetime.strptime(args.endtime, '%Y%m%d%H%M%S') - cfgfile_proc = args.cfgpath + args.proc_cfgfile - - if args.infostr == 'None': - infostr = '' - else: - infostr = args.infostr - - main_cosmo(cfgfile_proc, starttime=proc_starttime, endtime=proc_endtime, - trajfile=args.trajfile, infostr=infostr) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_cosmo_rt.py b/src/scripts/main_process_cosmo_rt.py deleted file mode 100644 index 0060f4d94..000000000 --- a/src/scripts/main_process_cosmo_rt.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -Pyrad: The MeteoSwiss Radar Processing framework -================================================ - -Welcome to Pyrad! - -This program performs real time processing of the data - -To run the processing framework type: - python main_process_data_rt.py \ -[config_files] --starttime [process_start_time] --endtime [process_end_time] \ ---cfgpath [cfgpath] --proc_period [proc_period] - -If startime or endtime are specified the program will start processing at the -specified time and end at the specified time. Otherwise the program ends when -the user interrupts it. -cfgpath is an optional argument with default: \ -'$HOME/pyrad/config/processing/' -proc_period is the time that has to pass before attempting to restart the -processing in [s] -if proc_finish is not none it indicates the time the program is allowed to ran -berfore forcing it to end - - -Example: - python main_process_data_rt.py 'paradiso_fvj_vol.txt' \ -'paradiso_fvj_rhi.txt' --starttime '20140523000000' \ ---endtime '20140523001000' --cfgpath '$HOME/pyrad/config/processing/' \ ---proc_period 60 --proc_finish 120 - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit -import os -import traceback -from warnings import warn - -from pyrad.flow.flow_control import main_cosmo_rt - -print(__doc__) - - -def main(): - """ - """ - - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - # positional arguments - parser.add_argument( - 'cfgfiles', nargs='+', type=str, - help='name of main configuration file') - - # keyword arguments - parser.add_argument( - '--starttime', type=str, default=None, - help=('starting time of the data to be processed. ' + - 'Format ''YYYYMMDDhhmmss''')) - parser.add_argument( - '--endtime', type=str, default=None, - help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''') - parser.add_argument( - '--cfgpath', type=str, - default=os.path.expanduser('~') + '/pyrad/config/processing/', - help='configuration file path') - - parser.add_argument( - '--proc_period', type=int, default=60, - help='Period between processing rounds (s)') - - parser.add_argument( - '--proc_finish', type=int, default=None, - help='Processing time allowed before shutdown (s)') - - args = parser.parse_args() - - print("====== PYRAD data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== PYRAD data processing finished: ") - - print('config path: ' + args.cfgpath) - cfgfile_list = [] - for ind, cfgfile in enumerate(args.cfgfiles): - print('config file ' + str(ind) + ': ' + cfgfile) - cfgfile_list.append(args.cfgpath + cfgfile) - if args.starttime is not None: - print('start time: ' + args.starttime) - else: - print('start time not defined by user') - if args.endtime is not None: - print('end time: ' + args.endtime) - else: - print('end time not defined by user') - - proc_starttime = None - if args.starttime is not None: - proc_starttime = datetime.datetime.strptime( - args.starttime, '%Y%m%d%H%M%S') - proc_endtime = None - if args.endtime is not None: - proc_endtime = datetime.datetime.strptime(args.endtime, '%Y%m%d%H%M%S') - - end_proc = False - while not end_proc: - try: - end_proc = main_cosmo_rt( - cfgfile_list, starttime=proc_starttime, endtime=proc_endtime, - proc_period=args.proc_period, proc_finish=args.proc_finish) - except BaseException: - traceback.print_exc() - if args.proc_finish is None: - warn("An exception occurred. " + - "Restarting the real time processing") - else: - end_proc = True - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_data.py b/src/scripts/main_process_data.py deleted file mode 100755 index 176ac41e8..000000000 --- a/src/scripts/main_process_data.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -Pyrad: The MeteoSwiss Radar Processing framework -================================================ - -Welcome to Pyrad! - -This program processes and post-processes data over a time span - -To run the processing framework type: - python main_process_data.py \ -[config_file] --starttime [process_start_time] --endtime [process_end_time] \ ---postproc_cfgfile [postproc_config_file] --cfgpath [cfgpath] - -If startime and endtime are not specified the program determines them from -the trajectory file or the last processed volume. -postproc_cfgfile is an optional argument with default: None -cfgpath is an optional argument with default: \ -'$HOME/pyrad/config/processing/' -The trajectory file can be of type plane, lightning or proc_periods. If it is \ -of type lightning the flash number can be specified - -Example: - python main_process_data.py 'paradiso_fvj_vol.txt' --starttime \ -'20140523000000' --endtime '20140523001000' --postproc_cfgfile \ -'paradiso_fvj_vol_postproc.txt' --cfgpath '$HOME/pyrad/config/processing/' - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit -import os - -from pyrad.flow.flow_control import main as pyrad_main - -print(__doc__) - - -def main(): - """ - """ - - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - # positional arguments - parser.add_argument( - 'proc_cfgfile', type=str, help='name of main configuration file') - - # keyword arguments - parser.add_argument( - '--starttime', type=str, default=None, - help=('starting time of the data to be processed. ' + - 'Format ''YYYYMMDDhhmmss''')) - parser.add_argument( - '--endtime', type=str, default=None, - help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''') - parser.add_argument( - '--postproc_cfgfile', type=str, default=None, - help='name of main post-processing configuration file') - parser.add_argument( - '--cfgpath', type=str, - default=os.path.expanduser('~') + '/pyrad/config/processing/', - help='configuration file path') - parser.add_argument("-i", "--infostr", type=str, - help="Information string about the actual data " - "processing (e.g. 'RUN57'). This string is added " - "to the filenames of the product files.", - default="") - parser.add_argument("-t", "--trajfile", type=str, default='', - help="Definition file of plane trajectory. " - "Configuration of scan sector, products, ...") - parser.add_argument("--trajtype", type=str, default='plane', - help="Type of trajectory. " - "Can be either 'plane', 'lightning' or 'proc_periods'") - parser.add_argument("--flashnr", type=int, default=0, - help="If type of trajectory is 'lightning', " - "flash number the data of which will be processed" - "0 means that all lightning data will be processed") - parser.add_argument("--MULTIPROCESSING_DSET", type=int, default=0, - help="If 1 the generation of the datasets at the " - "same processing level will be parallelized") - parser.add_argument("--MULTIPROCESSING_PROD", type=int, default=0, - help="If 1 the generation of the products of each " - "dataset will be parallelized") - parser.add_argument("--PROFILE_MULTIPROCESSING", type=int, default=0, - help="If 1 the multiprocessing is profiled") - - args = parser.parse_args() - - print("====== PYRAD data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== PYRAD data processing finished: ") - - print('config path: ' + args.cfgpath) - print('config file: ' + args.proc_cfgfile) - print('postproc config file: ' + str(args.postproc_cfgfile)) - if args.starttime is not None: - print('start time: ' + args.starttime) - else: - print('start time not defined by user') - if args.endtime is not None: - print('end time: ' + args.endtime) - else: - print('end time not defined by user') - if args.MULTIPROCESSING_DSET: - print('Dataset generation will be parallelized') - if args.MULTIPROCESSING_PROD: - print('Product generation will be parallelized') - if args.PROFILE_MULTIPROCESSING: - print('Parallel processing performance will be profiled') - - proc_starttime = None - if args.starttime is not None: - proc_starttime = datetime.datetime.strptime( - args.starttime, '%Y%m%d%H%M%S') - proc_endtime = None - if args.endtime is not None: - proc_endtime = datetime.datetime.strptime(args.endtime, '%Y%m%d%H%M%S') - cfgfile_proc = args.cfgpath + args.proc_cfgfile - - if args.infostr == 'None': - infostr = '' - else: - infostr = args.infostr - - pyrad_main(cfgfile_proc, starttime=proc_starttime, endtime=proc_endtime, - trajfile=args.trajfile, infostr=infostr, - trajtype=args.trajtype, flashnr=args.flashnr, - MULTIPROCESSING_DSET=args.MULTIPROCESSING_DSET, - MULTIPROCESSING_PROD=args.MULTIPROCESSING_PROD, - PROFILE_MULTIPROCESSING=args.PROFILE_MULTIPROCESSING) - - if args.postproc_cfgfile is not None: - cfgfile_postproc = args.cfgpath + args.postproc_cfgfile - pyrad_main(cfgfile_postproc, starttime=proc_starttime, - endtime=proc_endtime, trajfile=args.trajfile, - infostr=infostr, trajtype=args.trajtype, - flashnr=args.flashnr, - MULTIPROCESSING_DSET=args.MULTIPROCESSING_DSET, - MULTIPROCESSING_PROD=args.MULTIPROCESSING_PROD, - PROFILE_MULTIPROCESSING=args.PROFILE_MULTIPROCESSING) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_data_birds.py b/src/scripts/main_process_data_birds.py deleted file mode 100644 index dff6a7de3..000000000 --- a/src/scripts/main_process_data_birds.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -Pyrad: The MeteoSwiss Radar Processing framework -================================================ - -Welcome to Pyrad! - -This program processes bird data - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit -import os -import glob -from warnings import warn - -from pyrad.flow.flow_control import main as pyrad_main -from pyrad.io import get_fieldname_pyart -from pyrad.io import read_profile_ts -from pyrad.graph import get_field_name, _plot_time_range - -from pyart.config import get_metadata - -print(__doc__) - - -def main(): - """ - """ - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - # positional arguments - parser.add_argument( - 'proc_cfgfile', type=str, help='name of main configuration file') - parser.add_argument( - 'starttime', type=str, - help=('starting time of the data to be processed. ' + - 'Format ''YYYYMMDDhhmmss''')) - parser.add_argument( - 'endtime', type=str, - help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''') - - # keyword arguments - parser.add_argument( - '--cfgpath', type=str, - default=os.path.expanduser('~') + '/pyrad/config/processing/', - help='configuration file path') - - parser.add_argument( - '--storepath', type=str, - default='/store/msrad/radar/pyrad_products/rad4alp_birds_PHA/', - help='Base data storing path') - - parser.add_argument( - '--hres', type=int, default=200, help='Height resolution [m]') - - args = parser.parse_args() - - print("====== PYRAD data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== PYRAD data processing finished: ") - - print('config path: ' + args.cfgpath) - print('config file: ' + args.proc_cfgfile) - print('start time: ' + args.starttime) - print('end time: ' + args.endtime) - - proc_starttime = datetime.datetime.strptime( - args.starttime, '%Y%m%d%H%M%S') - proc_endtime = datetime.datetime.strptime( - args.endtime, '%Y%m%d%H%M%S') - cfgfile_proc = args.cfgpath + args.proc_cfgfile - - pyrad_main(cfgfile_proc, starttime=proc_starttime, endtime=proc_endtime) - - # Plot time-height - file_base = args.storepath - hres = args.hres - - datatype_list = [ - 'dBZc', 'eta_h', 'bird_density', 'WIND_SPEED', 'WIND_DIRECTION', - 'wind_vel_h_u', 'wind_vel_h_v', 'wind_vel_v'] - - startdate = proc_starttime.replace( - hour=0, minute=0, second=0, microsecond=0) - enddate = proc_endtime.replace(hour=0, minute=0, second=0, microsecond=0) - ndays = int((enddate - startdate).days) + 1 - for datatype in datatype_list: - flist = [] - for i in range(ndays): - time_dir = ( - proc_starttime + - datetime.timedelta( - days=i)).strftime('%Y-%m-%d') - - filepath = ( - file_base + time_dir + '/VAD/PROFILE_WIND/' + - '*_wind_profile_VAD_WIND_hres' + str(hres) + '.csv') - labels = [ - 'u_wind', 'std_u_wind', 'np_u_wind', - 'v_wind', 'std_v_wind', 'np_v_wind', - 'w_wind', 'std_w_wind', 'np_w_wind', - 'mag_h_wind', 'dir_h_wind'] - label_nr = 0 - if datatype == 'dBZc': - filepath = ( - file_base + time_dir + '/velFilter/PROFILE_dBZc/' + - '*_rhi_profile_*_dBZc_hres' + str(hres) + '.csv') - labels = [ - '50.0-percentile', '25.0-percentile', '75.0-percentile'] - - # dBZ mean data - # filepath = ( - # file_base+time_dir+'/velFilter/PROFILE_dBZc_mean/' + - # '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv') - # labels = [ - # 'Mean', 'Min', 'Max'] - - # dBZ linear mean data - # filepath = ( - # file_base+time_dir+'/velFilter/PROFILE_dBZc_linear_mean/' + - # '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv') - # labels = [ - # 'Mean', 'Min', 'Max'] - - # dBZ before filtering with fitted velocity - # filepath = ( - # file_base+time_dir+'/echoFilter/PROFILE_dBZc/' + - # '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv') - # labels = [ - # '50.0-percentile', '25.0-percentile', '75.0-percentile'] - # - # dBZ before filtering with fitted velocity. Linear mean - # filepath = ( - # file_base+time_dir+'/echoFilter/PROFILE_dBZc_linear_mean/' + - # '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv') - # labels = [ - # 'Mean', 'Min', 'Max'] - elif datatype == 'eta_h': - filepath = ( - file_base + time_dir + '/vol_refl/PROFILE/' + - '*_rhi_profile_*_eta_h_hres' + str(hres) + '.csv') - labels = [ - '50.0-percentile', '25.0-percentile', '75.0-percentile'] - - # mean data - # filepath = ( - # file_base+time_dir+'/vol_refl/PROFILE_mean/' + - # '*_rhi_profile_*_eta_h_hres'+str(hres)+'.csv') - # labels = [ - # 'Mean', 'Min', 'Max'] - elif datatype == 'bird_density': - filepath = ( - file_base + time_dir + '/bird_density/PROFILE/' + - '*_rhi_profile_*_bird_density_hres' + str(hres) + '.csv') - labels = [ - '50.0-percentile', '25.0-percentile', '75.0-percentile'] - - # mean data - # filepath = ( - # file_base+time_dir+'/bird_density/PROFILE_mean/' + - # '*_rhi_profile_*_bird_density_hres'+str(hres)+'.csv') - # labels = [ - # 'Mean', 'Min', 'Max'] - elif datatype == 'WIND_SPEED': - label_nr = 9 - elif datatype == 'WIND_DIRECTION': - label_nr = 10 - elif datatype == 'wind_vel_h_u': - label_nr = 0 - elif datatype == 'wind_vel_h_v': - label_nr = 3 - elif datatype == 'wind_vel_v': - label_nr = 6 - - flist_aux = glob.glob(filepath) - if not flist_aux: - warn('No profile files found in ' + filepath) - continue - flist.extend(flist_aux) - - if not flist: - warn('No profile files found') - continue - flist.sort() - - field_name = get_fieldname_pyart(datatype) - field_dict = get_metadata(field_name) - titl = 'bird retrieval ' + args.starttime + '\n' + get_field_name( - field_dict, field_name) - - tbin_edges, hbin_edges, np_ma, data_ma, t_start = read_profile_ts( - flist, labels, hres=hres, label_nr=label_nr) - - basepath_out = os.path.dirname(flist[0]) - fname = ( - basepath_out + '/' + args.starttime + '_TIME_HEIGHT_' + - datatype + '_hres' + str(hres) + '.png') - - vmin = vmax = None - _plot_time_range( - tbin_edges, hbin_edges / 1000., data_ma, field_name, [fname], - titl=titl, figsize=[10, 8], vmin=vmin, vmax=vmax, dpi=72) - - print("----- plot to '%s'" % fname) - - # Plot number of points - field_dict = get_metadata('number_of_samples') - titl = 'bird retrieval ' + args.starttime + '\n' + get_field_name( - field_dict, 'number_of_samples') - - fname = ( - basepath_out + '/' + args.starttime + '_TIME_HEIGHT_' + - datatype + 'nsamples_hres' + str(hres) + '.png') - - vmin = vmax = None - _plot_time_range( - tbin_edges, - hbin_edges / 1000., - np_ma, - 'number_of_samples', - [fname], - titl=titl, - figsize=[ - 10, - 8], - vmin=vmin, - vmax=vmax, - dpi=72) - - print("----- plot to '%s'" % fname) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_data_period.py b/src/scripts/main_process_data_period.py deleted file mode 100755 index 4c9358c37..000000000 --- a/src/scripts/main_process_data_period.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env python - -""" -================================================ -Pyrad: The MeteoSwiss Radar Processing framework -================================================ - -Welcome to Pyrad! - -This program does the daily processing and post-processing over a period of \ -time. - -To run the processing framework type: - python main_process_data_period.py \ -[config_file] [process_start_date] [process_end_date] \ ---starttime [process_start_time] --endtime [process_end_time] \ ---postproc_cfgfile [postproc_config_file] --cfgpath [cfgpath] - -starttime is an optional argument with default: '000000' -endtime is an optional argument with default: '235959' -postproc_cfgfile is an optional argument with default: None -cfgpath is an optional argument with default: \ -'$HOME/pyrad/config/processing/' - -Example: - python main_process_data_period.py 'paradiso_fvj_vol.txt' '20140523' \ -'20140525' --starttime '000000' --endtime '001000' \ ---postproc_cfgfile 'mals_emm_vol_postproc.txt' \ ---cfgpath '$HOME/pyrad/config/processing/' - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit -import os - -from pyrad.flow import main as pyrad_main - -print(__doc__) - - -def main(): - """ - """ - - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - parser.add_argument( - 'proc_cfgfile', type=str, help='name of main configuration file') - parser.add_argument( - 'startdate', type=str, - help='starting date of the data to be processed. Format ''YYYYMMDD'' ') - parser.add_argument( - 'enddate', type=str, - help='end date of the data to be processed. Format ''YYYYMMDD'' ') - - # keyword arguments - parser.add_argument( - '--starttime', type=str, default='000000', - help='starting date of the data to be processed. Format ''hhmmss'' ') - parser.add_argument( - '--endtime', type=str, default='235959', - help='end date of the data to be processed. Format ''hhmmss'' ') - - parser.add_argument("-i", "--infostr", - help="Information string about the actual data " - "processing (e.g. 'RUN57'). This string is added " - "to the filenames of the product files.", - default="") - - parser.add_argument("--MULTIPROCESSING_DSET", type=int, default=0, - help="If 1 the generation of the datasets at the " - "same processing level will be parallelized") - parser.add_argument("--MULTIPROCESSING_PROD", type=int, default=0, - help="If 1 the generation of the products of each " - "dataset will be parallelized") - parser.add_argument("--PROFILE_MULTIPROCESSING", type=int, default=0, - help="If 1 the multiprocessing is profiled") - - parser.add_argument( - '--postproc_cfgfile', type=str, default=None, - help='name of main post-processing configuration file') - parser.add_argument( - '--cfgpath', type=str, - default=os.path.expanduser('~') + '/pyrad/config/processing/', - help='configuration file path') - - args = parser.parse_args() - - print("====== PYRAD data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== PYRAD data processing finished: ") - - print('config path: ' + args.cfgpath) - print('config file: ' + args.proc_cfgfile) - print('postproc config file: ' + str(args.postproc_cfgfile)) - print('start date: ' + args.startdate) - print('end date: ' + args.enddate) - print('start time each day: ' + args.starttime) - print('end time each day: ' + args.endtime) - if args.MULTIPROCESSING_DSET: - print('Dataset generation will be parallelized') - if args.MULTIPROCESSING_PROD: - print('Product generation will be parallelized') - if args.PROFILE_MULTIPROCESSING: - print('Parallel processing performance will be profiled') - - proc_startdate = datetime.datetime.strptime( - args.startdate, '%Y%m%d') - proc_enddate = datetime.datetime.strptime( - args.enddate, '%Y%m%d') - proc_starttime = datetime.timedelta( - hours=float(args.starttime[0:2]), minutes=float(args.starttime[2:4]), - seconds=float(args.starttime[4:6])) - proc_endtime = datetime.timedelta( - hours=float(args.endtime[0:2]), minutes=float(args.endtime[2:4]), - seconds=float(args.endtime[4:6])) - - cfgfile_proc = args.cfgpath + args.proc_cfgfile - if args.postproc_cfgfile is not None: - cfgfile_postproc = args.cfgpath + args.postproc_cfgfile - - ndays = (proc_enddate - proc_startdate).days + 1 - print('Number of days to process: ' + str(ndays) + '\n\n') - - if args.infostr == 'None': - infostr = '' - else: - infostr = args.infostr - - for day in range(ndays): - current_date = proc_startdate + datetime.timedelta(days=day) - proc_startdatetime = current_date + proc_starttime - proc_enddatetime = current_date + proc_endtime - try: - pyrad_main(cfgfile_proc, starttime=proc_startdatetime, - endtime=proc_enddatetime, infostr=infostr, - MULTIPROCESSING_DSET=args.MULTIPROCESSING_DSET, - MULTIPROCESSING_PROD=args.MULTIPROCESSING_PROD, - PROFILE_MULTIPROCESSING=args.PROFILE_MULTIPROCESSING) - if args.postproc_cfgfile is not None: - pyrad_main( - cfgfile_postproc, - starttime=proc_startdatetime, - endtime=proc_enddatetime, - infostr=infostr, - MULTIPROCESSING_DSET=args.MULTIPROCESSING_DSET, - MULTIPROCESSING_PROD=args.MULTIPROCESSING_PROD, - PROFILE_MULTIPROCESSING=args.PROFILE_MULTIPROCESSING) - except ValueError: - print(ValueError) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_data_rt.py b/src/scripts/main_process_data_rt.py deleted file mode 100755 index 65b33ccdc..000000000 --- a/src/scripts/main_process_data_rt.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -Pyrad: The MeteoSwiss Radar Processing framework -================================================ - -Welcome to Pyrad! - -This program performs real time processing of the data - -To run the processing framework type: - python main_process_data_rt.py \ -[config_files] --starttime [process_start_time] --endtime [process_end_time] \ ---cfgpath [cfgpath] --proc_period [proc_period] - -If startime or endtime are specified the program will start processing at the -specified time and end at the specified time. Otherwise the program ends when -the user interrupts it. -cfgpath is an optional argument with default: \ -'$HOME/pyrad/config/processing/' -proc_period is the time that has to pass before attempting to restart the -processing in [s] -if proc_finish is not none it indicates the time the program is allowed to ran -berfore forcing it to end - - -Example: - python main_process_data_rt.py 'paradiso_fvj_vol.txt' \ -'paradiso_fvj_rhi.txt' --starttime '20140523000000' \ ---endtime '20140523001000' --cfgpath '$HOME/pyrad/config/processing/' \ ---proc_period 60 --proc_finish 120 - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit -import os -import traceback -from warnings import warn - -from pyrad.flow.flow_control import main_rt as pyrad_main - -print(__doc__) - - -def main(): - """ - """ - - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - # positional arguments - parser.add_argument( - 'cfgfiles', nargs='+', type=str, - help='name of main configuration file') - - # keyword arguments - parser.add_argument( - '--starttime', type=str, default=None, - help=('starting time of the data to be processed. ' + - 'Format ''YYYYMMDDhhmmss''')) - parser.add_argument( - '--endtime', type=str, default=None, - help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''') - parser.add_argument( - '--cfgpath', type=str, - default=os.path.expanduser('~') + '/pyrad/config/processing/', - help='configuration file path') - - parser.add_argument( - '--proc_period', type=int, default=60, - help='Period between processing rounds (s)') - - parser.add_argument( - '--proc_finish', type=int, default=None, - help='Processing time allowed before shutdown (s)') - - args = parser.parse_args() - - print("====== PYRAD data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== PYRAD data processing finished: ") - - print('config path: ' + args.cfgpath) - cfgfile_list = [] - for ind, cfgfile in enumerate(args.cfgfiles): - print('config file ' + str(ind) + ': ' + cfgfile) - cfgfile_list.append(args.cfgpath + cfgfile) - if args.starttime is not None: - print('start time: ' + args.starttime) - else: - print('start time not defined by user') - if args.endtime is not None: - print('end time: ' + args.endtime) - else: - print('end time not defined by user') - - proc_starttime = None - if args.starttime is not None: - proc_starttime = datetime.datetime.strptime( - args.starttime, '%Y%m%d%H%M%S') - proc_endtime = None - if args.endtime is not None: - proc_endtime = datetime.datetime.strptime(args.endtime, '%Y%m%d%H%M%S') - - end_proc = False - while not end_proc: - try: - end_proc = pyrad_main( - cfgfile_list, starttime=proc_starttime, endtime=proc_endtime, - proc_period=args.proc_period, proc_finish=args.proc_finish) - except BaseException: - traceback.print_exc() - if args.proc_finish is None: - warn("An exception occurred. " + - "Restarting the real time processing") - else: - end_proc = True - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_data_trt.py b/src/scripts/main_process_data_trt.py deleted file mode 100755 index 211662b5f..000000000 --- a/src/scripts/main_process_data_trt.py +++ /dev/null @@ -1,478 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -main_process_data_trt -================================================ - -This program activates the Pyrad processing to obtain -TRT cells trajectories and obtains post-processing products - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit -import os -import glob -from warnings import warn - -import numpy as np - -from pyrad.flow.flow_control import main as pyrad_main -from pyrad.io import get_fieldname_pyart, write_trt_cell_lightning -from pyrad.io import read_profile_ts, read_histogram_ts, read_quantiles_ts -from pyrad.io import read_trt_traj_data, read_thundertracking_info -from pyrad.graph import get_field_name, get_colobar_label -from pyrad.graph import _plot_time_range - -from pyart.config import get_metadata - -print(__doc__) - - -def main(): - """ - """ - - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - # positional arguments - parser.add_argument( - 'proc_cfgfile', type=str, help='name of main configuration file') - - # keyword arguments - parser.add_argument( - '--days', type=str, default=None, - help='Dates to process. Format YYYY-MM-DD. Coma separated') - - parser.add_argument( - '--info_file', type=str, - default='/store/msrad/radar/thundertracking/info/thundertracking_info.csv', - help='configuration file path') - - parser.add_argument( - '--years', type=str, - default=None, - help='Years to process. If None all years in file will be processed') - - parser.add_argument( - '--max_rank', type=float, - default=None, - help='Max rank to process') - - parser.add_argument( - '--trtbase', type=str, - default='/store/msrad/radar/trt/', - help='name of folder containing the TRT cell data') - - parser.add_argument( - '--postproc', type=int, - default=1, - help='If true the data will be post-processed') - - parser.add_argument( - '--radarbase', type=str, - default='/store/msrad/radar/pyrad_products/thundertracking/', - help='name of folder containing the radar data') - - parser.add_argument( - '--cfgpath', type=str, - default=os.path.expanduser('~') + '/pyrad/config/processing/', - help='configuration file path') - - parser.add_argument( - '--datatypes', type=str, - default='RR,hydro,KDPc,dBZc,RhoHVc,TEMP,ZDRc', - help='Name of the polarimetric moments to process. Coma separated') - - parser.add_argument( - '--datasets', type=str, - default='RR,hydro,KDPc,dBZc,RhoHVc,TEMP,ZDRc', - help='Name of the directory containing the datasets') - - parser.add_argument( - '--hres', type=float, default=250., help='Height resolution') - - parser.add_argument( - '--path_structure', type=int, default=2, - help='If true the data is at the cell center') - - args = parser.parse_args() - - print("====== PYRAD TRT data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== PYRAD TRT data processing finished: ") - - print('config path: ' + args.cfgpath) - print('config file: ' + args.proc_cfgfile) - print('trt path: ' + args.trtbase) - print('radar data path: ' + args.radarbase) - - cfgfile_proc = args.cfgpath + args.proc_cfgfile - trajtype = 'trt' - - if args.days is not None: - time_dir_list = args.days.split(',') - else: - # get the years to process - years = None - if args.years is not None: - years = list(map(int, args.years.split(','))) - - _, max_rank, _, trt_time_start, trt_time_end = read_thundertracking_info( - args.info_file) - trt_times = np.append(trt_time_start, trt_time_end) - - trt_dates = np.array([], dtype=datetime.date) - for trt_time in trt_times: - trt_dates = np.append(trt_dates, trt_time.date()) - trt_dates = np.sort(np.unique(trt_dates)) - time_dir_list = [] - for rank, trt_date in zip(max_rank, trt_dates): - if years is not None: - if trt_date.year not in years: - continue - - if args.max_rank is not None: - if rank > args.max_rank: - continue - - time_dir_list.append(trt_date.strftime("%Y-%m-%d")) - - if args.postproc: - datatype_list = args.datatypes.split(',') - dataset_list = args.datasets.split(',') - - if np.size(datatype_list) != np.size(dataset_list): - warn( - str(np.size(datatype_list)) + ' datatypes but ' + - str(np.size(dataset_list)) + - ' dataset directories. Their number must be equal') - return - - # Find all TRT files in directory - trt_list = [] - if args.days is not None: - for time_dir in time_dir_list: - trt_list.extend(glob.glob( - args.trtbase + time_dir + '/TRTC_cell_plots/All/*.trt')) - trt_list.extend(glob.glob( - args.trtbase + time_dir + '/TRTC_cell_plots/Some/*.trt')) - else: - for time_dir in time_dir_list: - trt_list.extend(glob.glob( - args.trtbase + time_dir + '/TRTC_cell/*_tt.trt')) - - if len(trt_list) == 0: - warn('No valid TRT files found in ' + args.trtbase) - return - - # Pyrad data processing - trt_cell_id_list = [] - trt_file_list = [] - for fname in trt_list: - print('processing TRT cell file ' + fname) - try: - infostr = os.path.basename(fname).split('.')[0] - infostr = infostr.replace('_tt', '') - pyrad_main( - cfgfile_proc, trajfile=fname, infostr=infostr, - trajtype=trajtype) - trt_cell_id_list.append(infostr) - trt_file_list.append(fname) - except BaseException: - warn('Unable to process TRT cell file ' + fname) - - if not args.postproc: - return - - # plot time series and get altitude of graupel column - if 'hydro' in datatype_list: - cell_ID_list = np.asarray([], dtype=int) - time_list = np.asarray([], dtype=datetime.datetime) - lon_list = np.asarray([], dtype=float) - lat_list = np.asarray([], dtype=float) - area_list = np.asarray([], dtype=float) - rank_list = np.asarray([], dtype=float) - rm_hmin_list = np.ma.asarray([], dtype=float) - rm_hmax_list = np.ma.asarray([], dtype=float) - - for i, trt_cell_id in enumerate(trt_cell_id_list): - print('\n\nPost-processing cell: ' + trt_cell_id) - dt_str = trt_cell_id[0:12] - dt_cell = datetime.datetime.strptime(dt_str, "%Y%m%d%H%M") - time_dir = dt_cell.strftime("%Y-%m-%d") - for j, datatype in enumerate(datatype_list): - dataset = dataset_list[j] - if args.path_structure == 1: - file_base2 = args.radarbase + time_dir + '/' + dataset + '_trt_center_traj/' - elif args.path_structure == 0: - file_base2 = args.radarbase + time_dir + '/' + dataset + '_trt_traj/' - elif args.path_structure == 2: - file_base2 = args.radarbase + time_dir + '/trt_traj_tt/' - - field_name = get_fieldname_pyart(datatype) - field_dict = get_metadata(field_name) - titl = 'TRT cell ' + trt_cell_id + '\n' + get_field_name( - field_dict, field_name) - - # plot time-height - if args.path_structure == 2: - flist = glob.glob(file_base2 + - 'PROFILE_' + - dataset + - '/*_' + - trt_cell_id + - '_rhi_profile_*_' + - datatype + - '_hres' + - str(int(args.hres)) + - '.csv') - else: - flist = glob.glob( - file_base2 + 'PROFILE/*_' + trt_cell_id + '_rhi_profile_*_' + - datatype + '_hres' + str(int(args.hres)) + '.csv') - - if not flist: - warn('No profile files found in ' + file_base2 + - 'PROFILE/ for TRT cell ' + - trt_cell_id + ' with resolution ' + str(args.hres)) - else: - if args.path_structure == 1: - labels = ['Mean', 'Min', 'Max'] - else: - labels = [ - '50.0-percentile', - '25.0-percentile', - '75.0-percentile'] - if datatype == 'RhoHVc': - labels = [ - '80.0-percentile', '65.0-percentile', - '95.0-percentile'] - elif datatype == 'hydro': - labels = [ - 'Mode', '2nd most common', '3rd most common', - '% points mode', '% points 2nd most common', - '% points 3rd most common'] - elif datatype == 'entropy' or 'prop' in datatype: - labels = ['Mean', 'Min', 'Max'] - - tbin_edges, hbin_edges, _, data_ma, start_time = ( - read_profile_ts(flist, labels, hres=args.hres, t_res=None)) - - basepath_out = os.path.dirname(flist[0]) - fname = ( - basepath_out + '/' + trt_cell_id + '_trt_TIME_HEIGHT_' + - datatype + '_hres' + str(args.hres) + '.png') - - vmin = vmax = None - if datatype == 'RhoHVc': - vmin = 0.95 - vmax = 1.00 - - xlabel = ( - 'time (s from ' + start_time.strftime("%Y-%m-%d %H:%M:%S") + ')') - _plot_time_range( - tbin_edges, hbin_edges, data_ma, field_name, [fname], - titl=titl, xlabel=xlabel, ylabel='height (m MSL)', - figsize=[10, 8], vmin=vmin, vmax=vmax, dpi=72) - - print("----- plot to '%s'" % fname) - - # Get min and max altitude of graupel/hail area - if datatype == 'hydro': - (traj_ID, yyyymmddHHMM, lon, lat, _, _, _, area, _, _, _, - RANKr, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, - _) = read_trt_traj_data(trt_file_list[i]) - - hmin, hmax = get_graupel_column( - tbin_edges, hbin_edges, data_ma, start_time, - yyyymmddHHMM) - - cell_ID_list = np.append(cell_ID_list, traj_ID) - time_list = np.append(time_list, yyyymmddHHMM) - lon_list = np.append(lon_list, lon) - lat_list = np.append(lat_list, lat) - area_list = np.append(area_list, area) - rank_list = np.append(rank_list, RANKr) - rm_hmin_list = np.ma.append(rm_hmin_list, hmin) - rm_hmax_list = np.ma.append(rm_hmax_list, hmax) - - # plot time-hist - if args.path_structure == 2: - flist = glob.glob( - file_base2 + - 'HISTOGRAM_' + - dataset + - '/*_' + - trt_cell_id + - '_histogram_*_' + - datatype + - '.csv') - else: - flist = glob.glob( - file_base2 + - 'HISTOGRAM/*_' + - trt_cell_id + - '_histogram_*_' + - datatype + - '.csv') - - if not flist: - warn('No histogram files found in ' + file_base2 + - 'HISTOGRAM/ for TRT cell ' + trt_cell_id) - else: - tbin_edges, bin_edges, data_ma, start_time = read_histogram_ts( - flist, datatype, t_res=None) - - basepath_out = os.path.dirname(flist[0]) - fname = ( - basepath_out + - '/' + - trt_cell_id + - '_trt_HISTOGRAM_' + - datatype + - '.png') - - data_ma[data_ma == 0.] = np.ma.masked - xlabel = ( - 'time (s from ' + start_time.strftime("%Y-%m-%d %H:%M:%S") + ')') - _plot_time_range( - tbin_edges, bin_edges, data_ma, 'frequency_of_occurrence', - [fname], titl=titl, xlabel=xlabel, - ylabel=get_colobar_label(field_dict, field_name), - vmin=0., vmax=np.max(data_ma), figsize=[10, 8], dpi=72) - - print("----- plot to '%s'" % fname) - - # plot quantiles - flist = glob.glob( - file_base2 + 'QUANTILES/*_' + trt_cell_id + '_quantiles_*_' + - datatype + '.csv') - - if not flist: - warn('No quantiles files found in ' + file_base2 + - 'QUANTILES/ for TRT cell ' + trt_cell_id) - continue - - tbin_edges, qbin_edges, data_ma, start_time = read_quantiles_ts( - flist, step=5., qmin=0., qmax=100., t_res=None) - - basepath_out = os.path.dirname(flist[0]) - fname = ( - basepath_out + - '/' + - trt_cell_id + - '_trt_QUANTILES_' + - datatype + - '.png') - - vmin = vmax = None - if datatype == 'RhoHVc': - vmin = 0.95 - vmax = 1.00 - xlabel = ( - 'time (s from ' + start_time.strftime("%Y-%m-%d %H:%M:%S") + - ')') - _plot_time_range( - tbin_edges, qbin_edges, data_ma, field_name, [fname], - titl=titl, xlabel=xlabel, ylabel='Quantile', vmin=vmin, - vmax=vmax, figsize=[10, 8], dpi=72) - - print("----- plot to '%s'" % fname) - - if 'hydro' in datatype_list: - fname = args.trtbase + 'cell_rimed_particles_column.csv' - write_trt_cell_lightning( - cell_ID_list, time_list, lon_list, lat_list, area_list, - rank_list, rm_hmin_list, rm_hmax_list, fname) - - print("----- written to '%s'" % fname) - - -def get_graupel_column(tbin_edges, hbin_edges, data_ma, start_time, - yyyymmddHHMM): - """ - Gets the minimum and maximum heigth of the graupel column - - Parameters - ---------- - tbin_edges : 1D array of floats - The time bin edges [s] - hbin_edges : 1D array of floats - The height bin edges [m MSL] - data_ma : 2D array of ints - Matrix containing the time-height hydrometeor classification - information - start_time : datetime object - start time of the radar data - yyyymmddHHMM : 1D array of datetime objects - time steps of the TRT cell - - Returns - ------- - hmin, hmax : 1D float arrays - the minimum and maximum altitude of the rimed particles column - - """ - tbin_rights = tbin_edges[1:] - hbin_lefts = hbin_edges[:-1] - hbin_rights = hbin_edges[1:] - H_lefts, _ = np.meshgrid(hbin_lefts, tbin_rights) - H_lefts = np.ma.asarray(H_lefts) - H_rights, _ = np.meshgrid(hbin_rights, tbin_rights) - H_rights = np.ma.asarray(H_rights) - mask = np.logical_not(np.logical_or(data_ma == 4, data_ma == 9)) - H_lefts[mask] = np.ma.masked - H_rights[mask] = np.ma.masked - hmin = np.ma.min(H_lefts, axis=1) - hmax = np.ma.max(H_rights, axis=1) - - # All TRT cell time steps have a corresponding rimed column height value - # Return the values - if tbin_rights.size == yyyymmddHHMM.size: - return hmin, hmax - - # Missing rimed height values. Determine those missing and put the data - # in the right time step - hmin_aux = np.ma.masked_all(yyyymmddHHMM.size, dtype=float) - hmax_aux = np.ma.masked_all(yyyymmddHHMM.size, dtype=float) - for k, dt_trt in enumerate(yyyymmddHHMM): - for l, dt_rad in enumerate(tbin_rights): - if dt_trt == start_time + datetime.timedelta(seconds=dt_rad): - hmin_aux[k] = hmin[l] - hmax_aux[k] = hmax[l] - break - return hmin_aux, hmax_aux - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_euclid_data.py b/src/scripts/main_process_euclid_data.py deleted file mode 100644 index 86acf49aa..000000000 --- a/src/scripts/main_process_euclid_data.py +++ /dev/null @@ -1,298 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -main_process_euclid_data -================================================ - -This program reads EUCLID raw data and plots several features such as sources -position, histograms, etc. - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit - -import numpy as np - -from pyrad.io import read_meteorage -from pyrad.util import belongs_roi_indices -from pyrad.graph import plot_pos, plot_histogram - -print(__doc__) - - -def main(): - """ - """ - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - # positional arguments - parser.add_argument( - 'days', nargs='+', type=str, - help='Dates to process. Format YYYYMMDD') - - # keyword arguments - parser.add_argument( - '--basepath', type=str, - default='/store/msrad/lightning/meteorage/', - help='name of folder containing the EUCLID lightning data') - - parser.add_argument( - '--lon', type=str, - default='8.9000010,9.2000000,9.4999970,9.4999970,8.9000010', - help=('longitude of the points defining the perimeter of the area ' + - 'of interest')) - - parser.add_argument( - '--lat', type=str, - default='47.0000030,47.0000030,47.0000030,47.5999930,47.5999930', - help=('latitude of the points defining the perimeter of the area ' + - 'of interest')) - - args = parser.parse_args() - - print("====== EUCLID data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== EUCLID data processing finished: ") - - day_vec = [] - for day in args.days: - day_vec.append(datetime.datetime.strptime(day, '%Y%m%d')) - - lons = args.lon.split(',') - lats = args.lat.split(',') - - if np.size(lons) != np.size(lats): - warn( - str(np.size(lons)) + ' longitudes but ' + str(np.size(lats)) + - ' latitudes. Their number must be equal') - return - - lon_list = [] - lat_list = [] - for i, lon in enumerate(lons): - lon_list.append(float(lon)) - lat_list.append(float(lats[i])) - - roi = { - 'lon': lon_list, - 'lat': lat_list - } - - bin_edges_intens = np.arange(-100., 101., 1.) - - lat_all = np.asarray([], dtype=float) - lon_all = np.asarray([], dtype=float) - intens_all = np.asarray([], dtype=float) - intra_all = np.asarray([], dtype=int) - for day in day_vec: - day_str = day.strftime('%y%m%d') - fname = args.basepath + 'THX/THX' + day.strftime('%y%j0000') + '.prd' - - print('Reading EUCLID data file ' + fname) - (stroke_time, lon, lat, intens, ns, mode, intra, ax, ki2, ecc, incl, - sind) = read_meteorage(fname) - - print('N strokes: ' + str(stroke_time.size)) - print('IC: ' + str(intra[intra == 1].size)) - print('CG: ' + str(intra[intra == 0].size)) - - inds, is_roi = belongs_roi_indices(lat, lon, roi) - - if is_roi == 'None': - print('No strokes in ROI') - continue - - lon_roi = lon[inds] - lat_roi = lat[inds] - intens_roi = intens[inds] - intra_roi = intra[inds] - - # add valid data to list - lat_all = np.append(lat_all, lat_roi) - lon_all = np.append(lon_all, lon_roi) - intens_all = np.append(intens_all, intens_roi) - intra_all = np.append(intra_all, intra_roi) - - print('N strokes: ' + str(lon_roi.size)) - print('IC: ' + str(intra_roi[intra_roi == 1].size)) - print('CG: ' + str(intra_roi[intra_roi == 0].size)) - - # plot position all strokes - figfname = args.basepath + day_str + '_EUCLID_strokes_pos.png' - figfname = plot_pos( - lat_roi, lon_roi, intens_roi, [figfname], - cb_label='stroke intensity [kA]', - titl=day_str + ' EUCLID stroke position') - print('Plotted ' + ' '.join(figfname)) - - # plot position IC - figfname = args.basepath + day_str + '_EUCLID_IC_pos.png' - figfname = plot_pos( - lat_roi[intra_roi == 1], lon_roi[intra_roi == 1], - intens_roi[intra_roi == 1], [figfname], - cb_label='stroke intensity [kA]', - titl=day_str + ' EUCLID IC position') - print('Plotted ' + ' '.join(figfname)) - - # plot position CG - lat_CG = lat_roi[intra_roi == 0] - lon_CG = lon_roi[intra_roi == 0] - intens_CG = intens_roi[intra_roi == 0] - - figfname = args.basepath + day_str + '_EUCLID_CG_pos.png' - figfname = plot_pos( - lat_CG, lon_CG, intens_CG, [figfname], - cb_label='stroke intensity [kA]', - titl=day_str + ' EUCLID CG position') - print('Plotted ' + ' '.join(figfname)) - - # plot position CGp - figfname = args.basepath + day_str + '_EUCLID_CGp_pos.png' - figfname = plot_pos( - lat_CG[intens_CG > 0.], lon_CG[intens_CG > 0.], - intens_CG[intens_CG > 0.], [figfname], - cb_label='stroke intensity [kA]', - titl=day_str + ' EUCLID CGp position') - print('Plotted ' + ' '.join(figfname)) - - # plot position CGn - figfname = args.basepath + day_str + '_EUCLID_CGn_pos.png' - figfname = plot_pos( - lat_CG[intens_CG < 0.], lon_CG[intens_CG < 0.], - intens_CG[intens_CG < 0.], [figfname], - cb_label='stroke intensity [kA]', - titl=day_str + ' EUCLID CGn position') - print('Plotted ' + ' '.join(figfname)) - - # Plot histogram intensity all strokes - fname_hist = args.basepath + day_str + '_EUCLID_strokes_hist_intens.png' - fname_hist = plot_histogram( - bin_edges_intens, intens_roi, [ - fname_hist], labelx='Intensity [kA]', - titl=day_str + ' EUCLID stroke intensity') - print('Plotted ' + ' '.join(fname_hist)) - - # Plot histogram intensity IC - fname_hist = args.basepath + day_str + '_EUCLID_IC_hist_intens.png' - fname_hist = plot_histogram( - bin_edges_intens, intens_roi[intra_roi == 1], [fname_hist], - labelx='Intensity [kA]', - titl=day_str + ' EUCLID IC intensity') - print('Plotted ' + ' '.join(fname_hist)) - - # Plot histogram intensity CG - fname_hist = args.basepath + day_str + '_EUCLID_CG_hist_intens.png' - fname_hist = plot_histogram( - bin_edges_intens, intens_roi[intra_roi == 0], [fname_hist], - labelx='Intensity [kA]', - titl=day_str + ' EUCLID CG intensity') - - lat_CG = lat_all[intra_all == 0] - lon_CG = lon_all[intra_all == 0] - intens_CG = intens_all[intra_all == 0] - - print('N strokes: ' + str(lon_all.size)) - print('IC: ' + str(intra_all[intra_all == 1].size)) - print('CG: ' + str(lon_CG.size)) - print('CGp: ' + str(lon_CG[intens_CG > 0].size)) - print('CGn: ' + str(lon_CG[intens_CG < 0].size)) - - # plot position all strokes - figfname = args.basepath + 'EUCLID_strokes_pos.png' - figfname = plot_pos( - lat_all, lon_all, intens_all, [figfname], - cb_label='stroke intensity [kA]', - titl=day_str + ' EUCLID stroke position') - print('Plotted ' + ' '.join(figfname)) - - # plot position IC - figfname = args.basepath + 'EUCLID_IC_pos.png' - figfname = plot_pos( - lat_all[intra_all == 1], lon_all[intra_all == 1], - intens_all[intra_all == 1], [figfname], - cb_label='stroke intensity [kA]', - titl='EUCLID IC position') - print('Plotted ' + ' '.join(figfname)) - - # plot position CG - figfname = args.basepath + 'EUCLID_CG_pos.png' - figfname = plot_pos( - lat_CG, lon_CG, intens_CG, [figfname], - cb_label='stroke intensity [kA]', - titl='EUCLID CG position') - print('Plotted ' + ' '.join(figfname)) - - # plot position CGp - figfname = args.basepath + 'EUCLID_CGp_pos.png' - figfname = plot_pos( - lat_CG[intens_CG > 0.], lon_CG[intens_CG > 0.], - intens_CG[intens_CG > 0.], [figfname], - cb_label='stroke intensity [kA]', - titl='EUCLID CGp position') - print('Plotted ' + ' '.join(figfname)) - - # plot position CGn - figfname = args.basepath + 'EUCLID_CGn_pos.png' - figfname = plot_pos( - lat_CG[intens_CG < 0.], lon_CG[intens_CG < 0.], - intens_CG[intens_CG < 0.], [figfname], - cb_label='stroke intensity [kA]', - titl='EUCLID CGn position') - print('Plotted ' + ' '.join(figfname)) - - # Plot histogram intensity all strokes - fname_hist = args.basepath + 'EUCLID_strokes_hist_intens.png' - fname_hist = plot_histogram( - bin_edges_intens, intens_all, [fname_hist], labelx='Intensity [kA]', - titl='EUCLID stroke intensity') - print('Plotted ' + ' '.join(fname_hist)) - - # Plot histogram intensity IC - fname_hist = args.basepath + 'EUCLID_IC_hist_intens.png' - fname_hist = plot_histogram( - bin_edges_intens, intens_all[intra_all == 1], [fname_hist], - labelx='Intensity [kA]', - titl='EUCLID IC intensity') - print('Plotted ' + ' '.join(fname_hist)) - - # Plot histogram intensity CG - fname_hist = args.basepath + 'EUCLID_CG_hist_intens.png' - fname_hist = plot_histogram( - bin_edges_intens, intens_all[intra_all == 0], [fname_hist], - labelx='Intensity [kA]', - titl='EUCLID CG intensity') - print('Plotted ' + ' '.join(fname_hist)) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/main_process_gecsx.py b/src/scripts/main_process_gecsx.py deleted file mode 100755 index 710c1a106..000000000 --- a/src/scripts/main_process_gecsx.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -Pyrad: The MeteoSwiss Radar Processing framework -================================================ - -Welcome to Pyrad! - -This program computes the radar visibility using the GECSX algorithm - -References - ---------- - Gabella, M., & Perona, G. (1998). Simulation of the Orographic Influence - on Weather Radar Using a Geometric–Optics Approach, Journal of Atmospheric - and Oceanic Technology, 15(6), 1485-1494. - -To run the processing framework type: - python main_process_data.py \ -[config_file] --starttime [process_start_time] --endtime [process_end_time] \ ---cfgpath [cfgpath] --gatherplots - -cfgpath is an optional argument with default: \ -'$HOME/pyrad/config/processing/' - -if gatherplots is set to 1, all generated figures will be copied into a -new directory called "ALL_FIGURES" located in the output folder. This is -convenient since GECSX can generate many figures and they are placed by Pyrad -in separate folders. - -There are two ways to use this program: - 1. By providing it with a set of valid radar scans, in this case the - "datapath" entry in the main config file must be defined and a valid radar - scan must be located in '/// - .', can be any name, and datatype - must correspond to the datatype entry for the GECSX dataset in the prod - config file. You also need to provide starttime and endtime that include - the timestamp of the radar scan - 2. Without radar data, by providing the following entries in the prod - file for the GECSX dataset (you can choose any value, these are examples) - rmax FLOAT 50000. # [m] maximum range - azmin FLOAT 0. # [deg] minimum azimuth angle - azmax FLOAT 360. # [deg] maximum azimuth angle - anglestep FLOAT 1. # [deg] azimuth angle step - range_resolution FLOAT 50. # [deg] range resolution - antenna_elevations FLTARR 2 # deg - 0.7 - 3.0 - as well as the following entries in the loc file (again choose any value) - RadarPosition STRUCT 3 - latitude FLOAT 46.842473 - longitude FLOAT 6.918370 - altitude FLOAT 449.5 - -See the two examples pay_main_DX50.txt and pay_main_norad.txt in -$HOME/pyrad/config/gecsx/ - -Example: - python main_process_gecsx.py pay_main_norad.txt ---cfgpath $HOME/pyrad/config/gecsx/ --gatherplots 1 - - python main_process_gecsx.py pay_main_DX50.txt --starttime \ -'20160101000000' --endtime '20170101001000' --cfgpath $HOME/pyrad/config/gecsx/ ---gatherplots 1 - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import argparse -import atexit -import os - -from pyrad.flow.flow_control import main_gecsx - -print(__doc__) - - -def main(): - """ - """ - - # parse the arguments - parser = argparse.ArgumentParser( - description='Entry to Pyrad processing framework') - - # positional arguments - parser.add_argument( - 'proc_cfgfile', type=str, help='name of main configuration file') - - # keyword arguments - parser.add_argument( - '--starttime', type=str, default=None, - help=('starting time of the data to be processed. ' + - 'Format ''YYYYMMDDhhmmss''')) - parser.add_argument( - '--endtime', type=str, default=None, - help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''') - parser.add_argument( - '--cfgpath', type=str, - default=os.path.expanduser('~') + '/pyrad/config/processing/', - help='configuration file path') - parser.add_argument("-i", "--infostr", type=str, - help="Information string about the actual data " - "processing (e.g. 'RUN57'). This string is added " - "to the filenames of the product files.", - default="") - parser.add_argument( - "-g", - "--gatherplots", - type=int, - help="If set to 1 will create a folder called ALL_FIGURES " - "in the output folder as defined by saveimgbasepath " - "in the main config file, and will copy all generated " - "figures in this folder (for convenience)", - default=1) - args = parser.parse_args() - - print("====== PYRAD data processing started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== PYRAD data processing finished: ") - - print('config path: ' + args.cfgpath) - print('config file: ' + args.proc_cfgfile) - if args.starttime is not None: - print('start time: ' + args.starttime) - else: - print('start time not defined by user') - if args.endtime is not None: - print('end time: ' + args.endtime) - else: - print('end time not defined by user') - - proc_starttime = None - if args.starttime is not None: - proc_starttime = datetime.datetime.strptime( - args.starttime, '%Y%m%d%H%M%S') - proc_endtime = None - if args.endtime is not None: - proc_endtime = datetime.datetime.strptime(args.endtime, '%Y%m%d%H%M%S') - cfgfile_proc = args.cfgpath + args.proc_cfgfile - gatherplots = args.gatherplots - if args.infostr == 'None': - infostr = '' - else: - infostr = args.infostr - - main_gecsx(cfgfile_proc, starttime=proc_starttime, endtime=proc_endtime, - infostr=infostr, gather_plots=gatherplots) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/movie_maker.py b/src/scripts/movie_maker.py deleted file mode 100644 index cfa57caa1..000000000 --- a/src/scripts/movie_maker.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -movie_maker -================================================ - -This program produces a movie and a gif out of all files present in a folder - -""" - -# Author: fvj -# License: BSD 3 clause - -import glob -import os -import datetime -import atexit - -from moviepy.editor import ImageSequenceClip - -print(__doc__) - - -def main(): - """ - main programme - """ - print("====== Movie maker started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== Movie maker finished: ") - - file_type = 'png' - movie_type = 'mp4' - codec = 'mpeg4' - frames_per_second = 1 - movie_path = '/utemp/mdso/figuerasiventuraj/movies/' - file_path_list = [ - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/dBuZ/PPI_EL00/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/dBuZ/PPI_EL01/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/dBuZ/PPI_EL02/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/dBuZ/PPI_EL03/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/dBZ/PPI_EL00/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/dBZ/PPI_EL01/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/dBZ/PPI_EL02/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/dBZ/PPI_EL03/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/hydroMF_oper/PPI_EL00/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/hydroMF_oper/PPI_EL01/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/hydroMF_oper/PPI_EL02/', - '/utemp/mdso/figuerasiventuraj/pyrad_products/MF_ODIM_OPOU_HAIL/2020-07-01/hydroMF_oper/PPI_EL03/'] - movie_name_list = [ - '20200701_OPOU_ppi_RAW_dBuZ_el0.6', - '20200701_OPOU_ppi_RAW_dBuZ_el1.0', - '20200701_OPOU_ppi_RAW_dBuZ_el1.4', - '20200701_OPOU_ppi_RAW_dBuZ_el1.8', - '20200701_OPOU_ppi_RAW_dBZ_el0.6', - '20200701_OPOU_ppi_RAW_dBZ_el1.0', - '20200701_OPOU_ppi_RAW_dBZ_el1.4', - '20200701_OPOU_ppi_RAW_dBZ_el1.8', - '20200701_OPOU_ppi_RAW_hydroMF_oper_el0.6', - '20200701_OPOU_ppi_RAW_hydroMF_oper_el1.0', - '20200701_OPOU_ppi_RAW_hydroMF_oper_el1.4', - '20200701_OPOU_ppi_RAW_hydroMF_oper_el1.8'] - - create_movie( - file_path_list, movie_name_list, movie_path, file_type=file_type, - fps=frames_per_second, movie_type=movie_type, codec=codec) - - -def create_movie(file_path_list, movie_name_list, movie_path, file_type='png', - fps=1, movie_type='mp4', codec='mpeg4'): - """ - creates the movie. - - can support any type supported by ffmpeg - some examples: - movie type / codec - .avi / rawvideo, png - .mp4 / libx264, mpeg4 - avi/rawvideo supported by libreoffice - mp4 supported by windows media player - - Parameters - ---------- - file_path_list : list of str - List of folders where to find the images for the movies - movie_name_list : list of str - List of movies to create_movie - movie_path : str - path where to store the movies - file_type : str - the individual images file type - fps : int - the frames per second - movie_type : str - the type of movie file - codec : str - the codec used for the movie - - Returns - ------- - Nothing - - """ - for movie_name, file_path in zip(movie_name_list, file_path_list): - file_list = sorted(glob.glob(file_path + '*.' + file_type)) - print(file_list) - - # Generate clip - clip = ImageSequenceClip(file_list, fps=fps) - # Write out clip - if not os.path.isdir(movie_path): - os.makedirs(movie_path) - clip.write_videofile( - movie_path + - movie_name + - '.' + - movie_type, - codec=codec) - clip.write_gif(movie_path + movie_name + '.gif') - - print('Created movie ' + movie_path + movie_name) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() diff --git a/src/scripts/rewrite_monitoring.py b/src/scripts/rewrite_monitoring.py deleted file mode 100644 index f9146d4cd..000000000 --- a/src/scripts/rewrite_monitoring.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -================================================ -rewrite_monitoring -================================================ - -This program rewrites a monitoring time series files into the correct -time order - -""" - -# Author: fvj -# License: BSD 3 clause - -import datetime -import atexit -import numpy as np -import os - -from pyrad.io import read_monitoring_ts, write_monitoring_ts -from pyrad.graph import plot_monitoring_ts -from pyrad.io import generate_field_name_str, get_fieldname_pyart - - -print(__doc__) - - -def main(): - """ - """ - - input_base = ( - '/store/msrad/radar/pyrad_products/') - output_base = ( - '/store/msrad/radar/pyrad_products/') - rad_vec = ['D'] - var_vec = ['PhiDP0', 'RhoHV_rain', 'ZDR_prec', 'ZDR_snow', 'dBZ_bias'] - year_vec = [datetime.datetime(2018, 1, 1)] - - plot_data = True - - print("====== Monitoring rewriting started: %s" % - datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - atexit.register(_print_end_msg, - "====== Monitoring rewriting finished: ") - - for i, rad in enumerate(rad_vec): - print('Processing Radar ' + rad) - for j, var in enumerate(var_vec): - if var == 'dBZ': - basedir = 'rad4alp_gc_PH' + rad - dsdir = 'monitoring_clt_Zh' - mon_type = 'GC_MONITORING' - quantiles = [50., 95., 99.] - elif var == 'dBZv': - basedir = 'rad4alp_gc_PH' + rad - dsdir = 'monitoring_clt_Zv' - mon_type = 'GC_MONITORING' - quantiles = [50., 95., 99.] - elif var == 'RhoHV_rain': - basedir = 'rad4alp_dataquality_PL' + rad - dsdir = 'monitoring_RhoHV' - mon_type = 'MONITORING' - quantiles = [65., 80., 95.] - elif var == 'PhiDP0': - basedir = 'rad4alp_dataquality_PL' + rad - dsdir = 'monitoring_PhiDP0' - mon_type = 'MONITORING' - quantiles = [25., 50., 75.] - elif var == 'ZDR_prec': - basedir = 'rad4alp_dataquality_PL' + rad - dsdir = 'monitoring_ZDR' - mon_type = 'MONITORING' - quantiles = [25., 50., 75.] - elif var == 'ZDR_snow': - basedir = 'rad4alp_dataquality_PL' + rad - dsdir = 'monitoring_ZDR_snow' - mon_type = 'MONITORING' - quantiles = [25., 50., 75.] - elif var == 'dBZ_bias': - basedir = 'rad4alp_dataquality_PL' + rad - dsdir = 'monitoring_Zh_bias' - mon_type = 'MONITORING' - quantiles = [25., 50., 75.] - - input_path = input_base + basedir + '/' + dsdir + '/VOL_TS/' - output_path = output_base + basedir + '/' + dsdir + '/VOL_TS/' - if not os.path.isdir(output_path): - os.makedirs(output_path) - - print('- Processing Variable ' + var) - for k, year in enumerate(year_vec): - print('-- Processing Year ' + year.strftime('%Y')) - fname_input = ( - input_path + year.strftime('%Y') + '_' + rad + - '_ts_' + mon_type + '_' + var + '.csv') - fname_output = ( - output_path + year.strftime('%Y') + '_' + rad + - '_ts_' + mon_type + '_' + var + '.csv') - figfname = [ - output_path + year.strftime('%Y') + '_' + rad + - '_ts_' + mon_type + '_' + var + '.png'] - - date, np_t_vec, cquant_vec, lquant_vec, hquant_vec = ( - read_monitoring_ts(fname_input, sort_by_date=True)) - - if date is None: - continue - - val_vec = np.ma.asarray( - [lquant_vec, cquant_vec, hquant_vec]).T - fname = write_monitoring_ts( - date, np_t_vec, val_vec, quantiles, var, - fname_output, rewrite=True) - - print('written file ' + fname) - - if not plot_data: - continue - - titldate = (date[0].strftime('%Y%m%d') + '-' + - date[-1].strftime('%Y%m%d')) - titl = rad + ' Monitoring ' + titldate - - labely = generate_field_name_str(var) - - if var == 'dBZ': - if rad == 'A': - ref_value = 49.5 - vmin = 44.5 - vmax = 54.5 - np_min = 100000 - elif rad == 'D': - ref_value = 48.5 - vmin = 43.5 - vmax = 53.5 - np_min = 20000 - elif rad == 'L': - ref_value = 67. - vmin = 62. - vmax = 72. - np_min = 100000 - elif rad == 'P': - ref_value = 69. - vmin = 64. - vmax = 74. - np_min = 100000 - elif rad == 'W': - ref_value = 27.5 - vmin = 22.5 - vmax = 32.5 - np_min = 100000 - elif var == 'dBZv': - if rad == 'A': - ref_value = 51.5 - vmin = 46.5 - vmax = 56.5 - np_min = 100000 - elif rad == 'D': - ref_value = 50.5 - vmin = 45.5 - vmax = 55.5 - np_min = 20000 - elif rad == 'L': - ref_value = 69.5 - vmin = 64.5 - vmax = 74.5 - np_min = 100000 - elif rad == 'P': - ref_value = 68.5 - vmin = 63.5 - vmax = 73.5 - np_min = 100000 - elif rad == 'W': - ref_value = 26.5 - vmin = 21.5 - vmax = 31.5 - np_min = 100000 - elif var == 'RhoHV_rain': - ref_value = 0.99 - vmin = 0.95 - vmax = 1.01 - np_min = 5000 - elif var == 'PhiDP0': - ref_value = 0. - vmin = -20. - vmax = 20. - np_min = 500000 - elif var == 'ZDR_prec': - ref_value = 0.2 - vmin = -2. - vmax = 2. - np_min = 5000 - elif var == 'ZDR_snow': - ref_value = 0.2 - vmin = -2. - vmax = 2. - np_min = 5000 - elif var == 'dBZ_bias': - ref_value = 0. - vmin = -30. - vmax = 30. - np_min = 100 - - fname = plot_monitoring_ts( - date, np_t_vec, cquant_vec, lquant_vec, hquant_vec, - get_fieldname_pyart(var), figfname, - ref_value=ref_value, vmin=vmin, vmax=vmax, np_min=np_min, - labelx='Time UTC', labely=labely, titl=titl) - print('plotted file ' + ' '.join(fname)) - - -def _print_end_msg(text): - """ - prints end message - - Parameters - ---------- - text : str - the text to be printed - - Returns - ------- - Nothing - - """ - print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) - - -# --------------------------------------------------------- -# Start main: -# --------------------------------------------------------- -if __name__ == "__main__": - main() From c2e02302fae569525876701c6627d77c7ddb0b1e Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Sun, 8 Sep 2024 11:36:47 +0200 Subject: [PATCH 08/15] ADD: python3.12 to ci --- .github/workflows/pyrad_tests_base.yml | 2 +- .github/workflows/pyrad_tests_base_dev.yml | 2 +- .github/workflows/pyrad_tests_mch.yml | 2 +- .github/workflows/pyrad_tests_mch_dev.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pyrad_tests_base.yml b/.github/workflows/pyrad_tests_base.yml index 8294774a1..8ef5afb66 100644 --- a/.github/workflows/pyrad_tests_base.yml +++ b/.github/workflows/pyrad_tests_base.yml @@ -19,7 +19,7 @@ jobs: fail-fast: false matrix: os: [ "ubuntu-latest" ] - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12"] max-parallel: 6 defaults: diff --git a/.github/workflows/pyrad_tests_base_dev.yml b/.github/workflows/pyrad_tests_base_dev.yml index 8e677e099..201de3a11 100644 --- a/.github/workflows/pyrad_tests_base_dev.yml +++ b/.github/workflows/pyrad_tests_base_dev.yml @@ -19,7 +19,7 @@ jobs: fail-fast: false matrix: os: [ "ubuntu-latest" , "windows-latest"] - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12"] max-parallel: 6 defaults: diff --git a/.github/workflows/pyrad_tests_mch.yml b/.github/workflows/pyrad_tests_mch.yml index 28023c878..1fa2fd29f 100644 --- a/.github/workflows/pyrad_tests_mch.yml +++ b/.github/workflows/pyrad_tests_mch.yml @@ -25,7 +25,7 @@ jobs: fail-fast: false matrix: os: [ "ubuntu-latest"] - python-version: [ "3.9", "3.10", "3.11"] + python-version: [ "3.9", "3.10", "3.11", "3.12"] max-parallel: 6 defaults: diff --git a/.github/workflows/pyrad_tests_mch_dev.yml b/.github/workflows/pyrad_tests_mch_dev.yml index e0bd1ef30..728b4526f 100644 --- a/.github/workflows/pyrad_tests_mch_dev.yml +++ b/.github/workflows/pyrad_tests_mch_dev.yml @@ -19,7 +19,7 @@ jobs: fail-fast: false matrix: os: [ "ubuntu-latest"] - python-version: [ "3.9", "3.10", "3.11"] + python-version: [ "3.9", "3.10", "3.11", "3.12"] max-parallel: 6 defaults: From 62f93ca6ecbae88f869de174d8e6fdec7c49ab79 Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Sun, 8 Sep 2024 11:44:33 +0200 Subject: [PATCH 09/15] FIX: removed pinning of gdal to 3.5.3 for compatibility with python3.12 --- .github/workflows/pyrad_tests_mch.yml | 2 +- .github/workflows/pyrad_tests_mch_dev.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pyrad_tests_mch.yml b/.github/workflows/pyrad_tests_mch.yml index 1fa2fd29f..e475ac99e 100644 --- a/.github/workflows/pyrad_tests_mch.yml +++ b/.github/workflows/pyrad_tests_mch.yml @@ -1,7 +1,7 @@ name: Test pyrad mch env: MINIMAL_DEPENDENCIES: Cython numpy cartopy - TEST_DEPENDENCIES: pytest imageio pygrib gdal==3.5.3 + TEST_DEPENDENCIES: pytest imageio pygrib gdal on: # Triggers the workflow on push or pull request events but only for the master branch diff --git a/.github/workflows/pyrad_tests_mch_dev.yml b/.github/workflows/pyrad_tests_mch_dev.yml index 728b4526f..241e988cb 100644 --- a/.github/workflows/pyrad_tests_mch_dev.yml +++ b/.github/workflows/pyrad_tests_mch_dev.yml @@ -1,7 +1,7 @@ name: Test pyrad mch dev env: MINIMAL_DEPENDENCIES: Cython numpy cartopy - TEST_DEPENDENCIES: pytest pygrib imageio gdal==3.5.3 + TEST_DEPENDENCIES: pytest pygrib imageio gdal on: # Triggers the workflow on push or pull request events but only for the master branch From f8b3cd5bed09e3606482620e702487b3f9583c74 Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Sun, 8 Sep 2024 12:12:42 +0200 Subject: [PATCH 10/15] FIX: rm old references to COSMO in doc --- doc/source/overview/list_datadescriptors.rst | 72 +++++++++----------- doc/source/overview/list_variables.rst | 6 +- 2 files changed, 34 insertions(+), 44 deletions(-) diff --git a/doc/source/overview/list_datadescriptors.rst b/doc/source/overview/list_datadescriptors.rst index 9c6f11ba5..4d6b3e23b 100644 --- a/doc/source/overview/list_datadescriptors.rst +++ b/doc/source/overview/list_datadescriptors.rst @@ -333,70 +333,60 @@ example MeteoSwiss specific ------------------------------------- -COSMO +ICON ----------------------------- description - Reads data from the COSMO model, previously ingested by pyrad and converted to polar coordinates with the `COSMO_LOOKUP dataset `_. + Reads data from the ICON model, previously ingested by pyrad and converted to polar coordinates with the `ICON_LOOKUP dataset `_. For this to work you will need to define the variable *iconpath* in the main pyrad configuration file. usage - *COSMO:datatype* where datatype is a pyrad variable name as listed `here `_. + *ICON:datatype* where datatype is a pyrad variable name as listed `here `_. example - *COSMO:TEMP* will use the COSMO temperature field as generated by the COSMO_LOOKUP dataset. This can be used for example in the HYDROCLASS dataset. :: + *ICON:TEMP* will use the ICON temperature field as generated by the ICON_LOOKUP dataset. This can be used for example in the HYDROCLASS dataset. + :: - TEMP STRUCT 6 - type STRING COSMO_LOOKUP - datatype STRARR 1 - dBZ - cosmo_type STRING TEMP - regular_grid INT 0 - lookup_table INT 1 - MAKE_GLOBAL INT 1 - - hydroclass STRUCT 6 - type STRING HYDROCLASS - datatype STRARR 5 - PROC:dBZc - PROC:ZDRc - PROC:RhoHVc - PROC:KDPc - COSMO:TEMP - HYDRO_METHOD STRING SEMISUPERVISED - RADARCENTROIDS STRING DX50 + type STRING HYDROCLASS + datatype STRARR 5 + PROC:dBZc + PROC:ZDRc + PROC:RhoHVc + PROC:KDPc + ICON:TEMP + HYDRO_METHOD STRING SEMISUPERVISED + RADARCENTROIDS STRING DX50 -CFRADIALCOSMO +CFRADIALICON ----------------------------- description - Used to read COSMO data interpolated to radar coordinates in a CFRadial file format. + Used to read ICON data interpolated to radar coordinates in a CFRadial file format. usage - *CFRADIALCOSMO:datatype,dataset* + *CFRADIALICON:datatype,dataset* where - - datatype is a pyrad variable name as listed `here `_ - - dataset is the name of the GECSX pyrad dataset for which the SAVEVOL or SAVEALL product has been generated + - datatype is a pyrad variable name as listed `here `_ + - dataset is the name of the pyrad dataset for which the SAVEVOL or SAVEALL product which contains the ICON data has been generated example :: - DX50_PLA_coloc_gates STRUCT 9 - type STRING COLOCATED_GATES - datatype STRARR 4 - RADAR001:CFRADIAL:dBZc,dBZ_avg,SAVEVOL - RADAR001:DEM:VIS - RADAR002:CFRADIAL:dBZc,dBZ_avg,SAVEVOL - RADAR002:RAD4ALPDEM:VIS - h_tol FLOAT 100. - latlon_tol FLOAT 0.005 # approx. 500 m tolerance - vol_d_tol FLOAT 100. - vismin FLOAT 99. - hmax FLOAT 10000. - elmax FLOAT 20. + hydroclass STRUCT 6 + type STRING HYDROCLASS + datatype STRARR 5 + PROC:dBZc + PROC:ZDRc + PROC:RhoHVc + PROC:KDPc + CFRADIALICON:TEMP,mals_mei21_vol + HYDRO_METHOD STRING SEMISUPERVISED + RADARCENTROIDS STRING DX50 + MAKE_GLOBAL INT 1 + RAD4ALP diff --git a/doc/source/overview/list_variables.rst b/doc/source/overview/list_variables.rst index 6d91eda85..d0ef26040 100644 --- a/doc/source/overview/list_variables.rst +++ b/doc/source/overview/list_variables.rst @@ -17,11 +17,11 @@ Py-ART to Pyrad :header-rows: 1 -COSMO to Pyrad +ICON to Pyrad ------------------------------------ -.. csv-table:: COSMO to Pyrad mappings - :file: mappings/pyrad_to_cosmo.txt +.. csv-table:: ICON to Pyrad mappings + :file: mappings/pyrad_to_icon.txt :header-rows: 1 From 2208223de5accee5027a9261d4ff8f196069e698 Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Sun, 8 Sep 2024 12:33:50 +0200 Subject: [PATCH 11/15] ENH: add small explanation for DataTypeIDInFiles --- doc/source/overview/list_variables.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/source/overview/list_variables.rst b/doc/source/overview/list_variables.rst index d0ef26040..f7045548a 100644 --- a/doc/source/overview/list_variables.rst +++ b/doc/source/overview/list_variables.rst @@ -1,6 +1,12 @@ List of pyrad variables ============================== +.. note:: + Pyrad uses the following mappings to map the variable names in your files to the short names used by pyrad (dBZ, dBZc, RhoHV and else). + the "Py-ART to Pyrad" mappings are used for CFRadial files and the "ODIM to Pyrad" mappings are used for ODIM HDF5 files. If your files do not contain the standard + names listed below, you can use the keyword DataTypeIDInFiles in the loc files to provide them. Please see :doc:`loc`. + + ODIM to Pyrad ------------------------------------ From b91c0d46b588d2e0904af3c5fb5630b7fdcd660c Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Wed, 18 Sep 2024 11:31:02 +0200 Subject: [PATCH 12/15] ENH: allow writing to s3 for monitoring products even if alarms are activated --- .../pyrad/prod/process_monitoring_products.py | 34 +++++++++++-------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/src/pyrad_proc/pyrad/prod/process_monitoring_products.py b/src/pyrad_proc/pyrad/prod/process_monitoring_products.py index f629ce880..4531516e1 100755 --- a/src/pyrad_proc/pyrad/prod/process_monitoring_products.py +++ b/src/pyrad_proc/pyrad/prod/process_monitoring_products.py @@ -556,30 +556,33 @@ def generate_monitoring_products(dataset, prdcfg): # generate alarms if needed alarm = prdcfg.get("alarm", False) + if not alarm: return figfname_list + output = figfname_list + if "tol_abs" not in prdcfg: warn("unable to send alarm. Missing tolerance on target") - return None + return output if "tol_trend" not in prdcfg: warn("unable to send alarm. Missing tolerance in trend") - return None + return output if "nevents_min" not in prdcfg: warn( "unable to send alarm. " + "Missing minimum number of events to compute trend" ) - return None + return output if "sender" not in prdcfg: warn("unable to send alarm. Missing email sender") - return None + return output if "receiver_list" not in prdcfg: warn("unable to send alarm. Missing email receivers") - return None + return output tol_abs = prdcfg["tol_abs"] tol_trend = prdcfg["tol_trend"] @@ -592,7 +595,7 @@ def generate_monitoring_products(dataset, prdcfg): if np_last < np_min: warn("No valid data on day " + date[-1].strftime("%d-%m-%Y")) - return None + return output # check if absolute value exceeded abs_exceeded = False @@ -684,7 +687,7 @@ def generate_monitoring_products(dataset, prdcfg): ) send_msg(sender, receiver_list, subject, alarm_fname) - return alarm_fname + return output.append(alarm_fname) if prdcfg["type"] == "CUMUL_VOL_TS": field_name = get_fieldname_pyart(prdcfg["voltype"]) @@ -849,27 +852,28 @@ def generate_monitoring_products(dataset, prdcfg): if not alarm: return figfname_list + output = figfname_list if "tol_abs" not in prdcfg: warn("unable to send alarm. Missing tolerance on target") - return None + return output if "tol_trend" not in prdcfg: warn("unable to send alarm. Missing tolerance in trend") - return None + return output if "nevents_min" not in prdcfg: warn( "unable to send alarm. " + "Missing minimum number of events to compute trend" ) - return None + return output if "sender" not in prdcfg: warn("unable to send alarm. Missing email sender") - return None + return output if "receiver_list" not in prdcfg: warn("unable to send alarm. Missing email receivers") - return None + return output tol_abs = prdcfg["tol_abs"] tol_trend = prdcfg["tol_trend"] @@ -882,7 +886,7 @@ def generate_monitoring_products(dataset, prdcfg): if np_last < np_min: warn("No valid data on day " + date[-1].strftime("%d-%m-%Y")) - return None + return output # check if absolute value exceeded abs_exceeded = False @@ -928,7 +932,7 @@ def generate_monitoring_products(dataset, prdcfg): trend_exceeded = True if abs_exceeded is False and trend_exceeded is False: - return None + return output alarm_dir = savedir + "/alarms/" if not os.path.isdir(alarm_dir): @@ -974,7 +978,7 @@ def generate_monitoring_products(dataset, prdcfg): ) send_msg(sender, receiver_list, subject, alarm_fname) - return alarm_fname + return output.append(alarm_fname) if prdcfg["type"] == "SAVEVOL": field_name = get_fieldname_pyart(prdcfg["voltype"]) From 1196941ba796af0c167e3c6387a4778e04a14848 Mon Sep 17 00:00:00 2001 From: radarv Date: Thu, 19 Sep 2024 11:37:58 +0000 Subject: [PATCH 13/15] ENH: rewrite read_smn to use pandas + support for csv.gz --- src/pyrad_proc/pyrad/io/read_data_sensor.py | 82 +++++++++------------ 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/src/pyrad_proc/pyrad/io/read_data_sensor.py b/src/pyrad_proc/pyrad/io/read_data_sensor.py index 0dd1ab47b..92048c735 100755 --- a/src/pyrad_proc/pyrad/io/read_data_sensor.py +++ b/src/pyrad_proc/pyrad/io/read_data_sensor.py @@ -2317,10 +2317,9 @@ def get_sensor_data(date, datatype, cfg): return sensordate, sensorvalue, label, period - def read_smn(fname): """ - Reads SwissMetNet data contained in a csv file + Reads SwissMetNet data contained in a csv or gzipped csv file. Parameters ---------- @@ -2329,54 +2328,45 @@ def read_smn(fname): Returns ------- - smn_id, date , pressure, temp, rh, precip, wspeed, wdir : tupple + smn_id, date, pressure, temp, rh, precip, wspeed, wdir : tuple The read values - """ fill_value = 10000000.0 - try: - with open(fname, "r", newline="") as csvfile: - # first count the lines - reader = csv.DictReader(csvfile) - nrows = sum(1 for row in reader) - smn_id = np.ma.empty(nrows, dtype="float32") - pressure = np.ma.empty(nrows, dtype="float32") - temp = np.ma.empty(nrows, dtype="float32") - rh = np.ma.empty(nrows, dtype="float32") - precip = np.ma.empty(nrows, dtype="float32") - wspeed = np.ma.empty(nrows, dtype="float32") - wdir = np.ma.empty(nrows, dtype="float32") - # now read the data - csvfile.seek(0) - reader = csv.DictReader(csvfile) - date = [] - for i, row in enumerate(reader): - smn_id[i] = float(row["StationID"]) - date.append(datetime.datetime.strptime(row["DateTime"], "%Y%m%d%H%M%S")) - pressure[i] = float(row["AirPressure"]) - temp[i] = float(row["2mTemperature"]) - rh[i] = float(row["RH"]) - precip[i] = float(row["Precipitation"]) - wspeed[i] = float(row["Windspeed"]) - wdir[i] = float(row["Winddirection"]) - - pressure = np.ma.masked_values(pressure, fill_value) - temp = np.ma.masked_values(temp, fill_value) - rh = np.ma.masked_values(rh, fill_value) - precip = np.ma.masked_values(precip, fill_value) - wspeed = np.ma.masked_values(wspeed, fill_value) - wdir = np.ma.masked_values(wdir, fill_value) - - # convert precip from mm/10min to mm/h - precip *= 6.0 - - csvfile.close() - - return smn_id, date, pressure, temp, rh, precip, wspeed, wdir - except EnvironmentError as ee: - warn(str(ee)) - warn("Unable to read file " + fname) + try: + # Use pandas to read the file (supports .gz files directly) + df = pd.read_csv(fname, compression='gzip' if fname.endswith('.gz') else None) + + # Convert date strings to datetime objects + df['DateTime'] = pd.to_datetime(df['DateTime'], format='%Y%m%d%H%M%S') + + # Identify columns by searching for keywords (handles cases like AirPressure:degC) + air_pressure_col = [col for col in df.columns if 'AirPressure' in col][0] + temp_col = [col for col in df.columns if '2mTemperature' in col][0] + rh_col = [col for col in df.columns if 'RH' in col][0] + precip_col = [col for col in df.columns if 'Precipitation' in col][0] + wspeed_col = [col for col in df.columns if 'Windspeed' in col][0] + wdir_col = [col for col in df.columns if 'Winddirection' in col][0] + + # Mask invalid data (fill_value) + pressure = np.ma.masked_values(df[air_pressure_col].astype('float32'), fill_value) + temp = np.ma.masked_values(df[temp_col].astype('float32'), fill_value) + rh = np.ma.masked_values(df[rh_col].astype('float32'), fill_value) + precip = np.ma.masked_values(df[precip_col].astype('float32'), fill_value) + wspeed = np.ma.masked_values(df[wspeed_col].astype('float32'), fill_value) + wdir = np.ma.masked_values(df[wdir_col].astype('float32'), fill_value) + + # Convert precip from mm/10min to mm/h + precip *= 6.0 + + # Extract smn_id and date + smn_id = df['StationID'].astype('float32').values + date = df['DateTime'].tolist() + + return smn_id, date, pressure, temp, rh, precip, wspeed, wdir + + except Exception as e: + warn(f"Unable to read file {fname}: {e}") return None, None, None, None, None, None, None, None From 78d1170c47bd3a963a1961eb4843d05c908ea885 Mon Sep 17 00:00:00 2001 From: radarv Date: Mon, 23 Sep 2024 15:30:05 +0000 Subject: [PATCH 14/15] ENH: rewrite read_sensor_data to allow radarV structure --- src/pyrad_proc/pyrad/io/read_data_sensor.py | 22 +++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/src/pyrad_proc/pyrad/io/read_data_sensor.py b/src/pyrad_proc/pyrad/io/read_data_sensor.py index 92048c735..b53ac6b2a 100755 --- a/src/pyrad_proc/pyrad/io/read_data_sensor.py +++ b/src/pyrad_proc/pyrad/io/read_data_sensor.py @@ -2218,12 +2218,26 @@ def get_sensor_data(date, datatype, cfg): """ if cfg["sensor"] == "rgage": - datapath = cfg["smnpath"] + date.strftime("%Y%m") + "/" - datafile = date.strftime("%Y%m%d") + "_" + cfg["sensorid"] + ".csv" + # Try several file formats + datapath1 = os.path.join(cfg["smnpath"], date.strftime("%Y%m")) + datapath2 = os.path.join(cfg["smnpath"], date.strftime("%Y-%m-%d"), + cfg["sensorid"]) + datafile1 = os.path.join(datapath1, date.strftime("%Y%m%d") + + "_" + cfg["sensorid"] + ".csv*") + datafile2 = os.path.join(datapath2, date.strftime("%Y%m%d") + + "_" + cfg["sensorid"] + ".csv*") + + files1 = glob.glob(datafile1) + files2 = glob.glob(datafile2) + if len(files1): + datafile = files1[0] + if len(files2): + datafile = files2[0] + try: - _, sensordate, _, _, _, sensorvalue, _, _ = read_smn(datapath + datafile) + _, sensordate, _, _, _, sensorvalue, _, _ = read_smn(datafile) except Exception: - _, sensordate, sensorvalue = read_smn2(datapath + datafile) + _, sensordate, sensorvalue = read_smn2(datafile) if sensordate is None: return None, None, None, None label = "RG" From efa652c119b43653a6d51bc228c83bda3660e90e Mon Sep 17 00:00:00 2001 From: Daniel Wolfensberger Date: Mon, 23 Sep 2024 20:56:28 +0200 Subject: [PATCH 15/15] ENH: added description of required datatypes in all dataset docstrings --- src/pyrad_proc/pyrad/proc/process_Doppler.py | 56 ++++-- src/pyrad_proc/pyrad/proc/process_calib.py | 54 +++-- src/pyrad_proc/pyrad/proc/process_dem.py | 19 +- .../pyrad/proc/process_echoclass.py | 135 +++++++++---- src/pyrad_proc/pyrad/proc/process_grid.py | 73 ++++--- src/pyrad_proc/pyrad/proc/process_icon.py | 39 ++-- .../pyrad/proc/process_intercomp.py | 61 ++++-- src/pyrad_proc/pyrad/proc/process_iq.py | 72 ++++--- .../pyrad/proc/process_monitoring.py | 76 +++++-- src/pyrad_proc/pyrad/proc/process_phase.py | 65 ++++-- src/pyrad_proc/pyrad/proc/process_retrieve.py | 104 +++++++--- src/pyrad_proc/pyrad/proc/process_spectra.py | 187 ++++++++++++++---- .../pyrad/proc/process_timeseries.py | 36 ++-- src/pyrad_proc/pyrad/proc/process_traj.py | 18 +- 14 files changed, 704 insertions(+), 291 deletions(-) diff --git a/src/pyrad_proc/pyrad/proc/process_Doppler.py b/src/pyrad_proc/pyrad/proc/process_Doppler.py index 241bc1dbb..bac1b9c2d 100644 --- a/src/pyrad_proc/pyrad/proc/process_Doppler.py +++ b/src/pyrad_proc/pyrad/proc/process_Doppler.py @@ -65,7 +65,9 @@ def process_turbulence(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "dBuZ" or "dBZ" or "dBZc" or "dBuZv" or "dBZv" or "dBZvc" or "CNRc", and, + "W" or "Wv" or "Wu" or "Wvu" or "WD" or "WDc" radius : float. Dataset keyword Search radius for calculating Eddy Dissipation Rate (EDR). Default 2 @@ -97,7 +99,7 @@ def process_turbulence(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "EDR" ind_rad : int radar index @@ -235,7 +237,8 @@ def process_dealias_fourdd(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain + "V" or "Vc" filt : int. Dataset keyword Flag controlling Bergen and Albers filter, 1 = yes, 0 = no. sign : int. Dataset keyword @@ -252,7 +255,7 @@ def process_dealias_fourdd(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "dealV" or "dealVc" (if Vc was provided) ind_rad : int radar index @@ -357,7 +360,8 @@ def process_dealias_region_based(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "V" or "Vc" interval_splits : int, optional Number of segments to split the nyquist interval into when finding regions of similar velocity. More splits creates a larger number @@ -387,7 +391,7 @@ def process_dealias_region_based(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "dealV" or "dealVc" (if Vc was provided) ind_rad : int radar index @@ -458,7 +462,8 @@ def process_dealias_unwrap_phase(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "V" or "Vc" unwrap_unit : {'ray', 'sweep', 'volume'}, optional Unit to unwrap independently. 'ray' will unwrap each ray individually, 'sweep' each sweep, and 'volume' will unwrap the @@ -473,7 +478,7 @@ def process_dealias_unwrap_phase(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "dealV" or "dealVc" (if Vc was provided) ind_rad : int radar index @@ -534,7 +539,10 @@ def process_radial_velocity(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain + WIND_SPEED, and, + WIND_DIRECTION, and, + wind_vel_v latitude, longitude : float arbitrary coordinates [deg] from where to compute the radial velocity. If any of them is None it will be the radar position @@ -547,7 +555,7 @@ def process_radial_velocity(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "V" ind_rad : int radar index @@ -665,7 +673,8 @@ def process_wind_vel(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain + "V" or "Vc" vert_proj : Boolean If true the vertical projection is computed. Otherwise the horizontal projection is computed @@ -675,7 +684,9 @@ def process_wind_vel(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "wind_vel_h_az", (if vert_proj is False), or, + "wind_vel_v" (if vert_proj is True) ind_rad : int radar index @@ -737,7 +748,7 @@ def process_windshear(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "windshear_v" ind_rad : int radar index @@ -786,7 +797,8 @@ def process_windshear_lidar(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain + "V" or "Vc" az_tol : float The tolerance in azimuth when looking for gates on top of the gate when computation is performed @@ -797,7 +809,7 @@ def process_windshear_lidar(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "windshear_v" ind_rad : int radar index @@ -847,14 +859,17 @@ def process_vad(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain + "V" or "Vc" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields + "wind_vel_h_u", "wind_vel_h_v", "wind_vel_v", + "estV", "stdV", and "diffV" ind_rad : int radar index @@ -923,7 +938,9 @@ def process_dda(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain + "V" or "Vc", and, + "dBuZ", "dBZ", or "dBZc" gridconfig : dictionary. Dataset keyword Dictionary containing some or all of this keywords: @@ -997,7 +1014,8 @@ def process_dda(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields + "wind_vel_h_u", "wind_vel_h_v" and "wind_vel_v" ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_calib.py b/src/pyrad_proc/pyrad/proc/process_calib.py index 3765d5c08..d2f2fbc13 100755 --- a/src/pyrad_proc/pyrad/proc/process_calib.py +++ b/src/pyrad_proc/pyrad/proc/process_calib.py @@ -53,7 +53,7 @@ def process_correct_bias(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type to correct for bias + The data type to correct for bias, can be any datatype supported by pyrad bias : float. Dataset keyword The bias to be corrected [dB]. Default 0 radar_list : list of Radar objects @@ -62,7 +62,9 @@ def process_correct_bias(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field, it will contain + the corrected version of the provided datatypes + For example dBZ -> dBZc, ZDR -> ZDRc, RhoHV -> RhoHVc ind_rad : int radar index @@ -118,14 +120,19 @@ def process_correct_noise_rhohv(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The data types used in the correction + The data types used in the correction, it must contain + "uRhoHV", and, + "SNRh", and, + "ZDRc", and, + "Nh", and, + "Nv" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "RhoHV" ind_rad : int radar index @@ -200,7 +207,9 @@ def process_gc_monitoring(procstatus, dscfg, radar_list=None): excessgates_fname : str. Dataset keyword The name of the gates in excess of quantile file datatype : list of string. Dataset keyword - The input data types + The input data types, it must contain + "echoID" (Optional allows filter_prec), + as well as any other fields supported by pyrad step : float. Dataset keyword The width of the histogram bin. Default is None. In that case the default step in function get_histogram_bins is used @@ -221,7 +230,8 @@ def process_gc_monitoring(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : Radar - radar object containing histogram data + radar object containing histogram data with fields corresponding + to specified datatypes ind_rad : int radar index @@ -429,7 +439,9 @@ def process_occurrence(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, it must contain + "echoID" (Optional allows filter_prec), + as well as any other fields supported by pyrad regular_grid : Boolean. Dataset keyword Whether the radar has a Boolean grid or not. Default False rmin, rmax : float. Dataset keyword @@ -450,7 +462,8 @@ def process_occurrence(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + radar object containing frequency of occurence data with fields corresponding + to specified datatypes ind_rad : int radar index @@ -652,7 +665,10 @@ def process_time_avg_std(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, it must contain + "echoID" (Optional allows filter_prec), + "dBZ" or "dBZc" or "dBZv" or "dBZvc" or "dBuZ" or "dBuZc" (Optional, allows val_min) + as well as any other fields supported by pyrad regular_grid : Boolean. Dataset keyword Whether the radar has a Boolean grid or not. Default False rmin, rmax : float. Dataset keyword @@ -677,7 +693,8 @@ def process_time_avg_std(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the average and standard deviation for every field + specified as datatype ind_rad : int radar index @@ -918,7 +935,9 @@ def process_occurrence_period(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "occurence" and, + "nsamples" regular_grid : Boolean. Dataset keyword Whether the radar has a Boolean grid or not. Default False rmin, rmax : float. Dataset keyword @@ -930,7 +949,8 @@ def process_occurrence_period(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields "occurence" and + "nsamples" ind_rad : int radar index @@ -1075,7 +1095,10 @@ def process_sun_hits(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "dBm", and, + "dBmv", and, + "ZDR", or "ZDRu", or "ZDRu" delev_max : float. Dataset keyword maximum elevation distance from nominal radar elevation where to look for a sun hit signal [deg]. Default 1.5 @@ -1635,7 +1658,10 @@ def process_sunscan(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "dBm", and, + "dBmv", and, + "ZDR", or "ZDRu", or "ZDRu" delev_max : float. Dataset keyword maximum elevation distance from nominal radar elevation where to look for a sun hit signal [deg]. Default 1.5 diff --git a/src/pyrad_proc/pyrad/proc/process_dem.py b/src/pyrad_proc/pyrad/proc/process_dem.py index a8c2729e2..5a31aee41 100644 --- a/src/pyrad_proc/pyrad/proc/process_dem.py +++ b/src/pyrad_proc/pyrad/proc/process_dem.py @@ -39,7 +39,7 @@ def process_dem(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad keep_in_memory : int. Dataset keyword if set keeps the COSMO data dict, the COSMO coordinates dict and the COSMO field in radar coordinates in memory. Default False @@ -57,7 +57,8 @@ def process_dem(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field with name corresponding + to dem_field ind_rad : int radar index @@ -136,7 +137,7 @@ def process_visibility(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad offset : float. Dataset keyword The offset above the minimum visibility that must be filtered radar_list : list of Radar objects @@ -145,7 +146,8 @@ def process_visibility(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "visibility" ind_rad : int radar index @@ -204,7 +206,8 @@ def process_gecsx(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + arbitrary data type supported by pyrad + range_discretization : float. Dataset keyword Range discretization used when computing the Cartesian visibility field the larger the better but the slower the processing will be @@ -245,6 +248,12 @@ def process_gecsx(procstatus, dscfg, radar_list=None): new_dataset : list of dict list of dictionaries containing the polar data output and the Cartesian data output in this order + The first dictionary (polar) contains the following fields: + "rcs_clutter", "dBm_clutter", "dBZ_clutter" and "visibility_polar" + The second dictionary (cart) contains the following fields: + "bent_terrain_altitude", "terrain_slope", "terrain_aspect", + "elevation_angle", "min_vis_elevation", "min_vis_altitude", + "incident_angle", "sigma_0", "effective_area" ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_echoclass.py b/src/pyrad_proc/pyrad/proc/process_echoclass.py index 65d8a468e..cba95a0da 100755 --- a/src/pyrad_proc/pyrad/proc/process_echoclass.py +++ b/src/pyrad_proc/pyrad/proc/process_echoclass.py @@ -65,7 +65,11 @@ def process_echo_id(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must be + "dBZ" or "dBuZ", and, + "ZDR" or "ZDRu", and, + "RhoHV" or "uRhoHV", and, + "PhiDP" or "uPhiDP" wind_size : int Size of the moving window used to compute the ray texture (number of gates). Default 7 @@ -81,7 +85,7 @@ def process_echo_id(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "echoID" ind_rad : int radar index @@ -180,14 +184,17 @@ def process_birds_id(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must be + "dBZ" or "dBuZ", and, + "ZDR" or "ZDRu", and, + "RhoHV" or "uRhoHV" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "echoID" ind_rad : int radar index @@ -208,7 +215,9 @@ def process_birds_id(procstatus, dscfg, radar_list=None): zdr_field = get_fieldname_pyart(datatype) if datatype == "RhoHV": rhv_field = get_fieldname_pyart(datatype) - + if datatype == "uRhoHV": + rhv_field = get_fieldname_pyart(datatype) + ind_rad = int(radarnr[5:8]) - 1 if radar_list[ind_rad] is None: warn("No valid radar") @@ -279,14 +288,14 @@ def process_clt_to_echo_id(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must be "CLT" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "echoID" ind_rad : int radar index @@ -340,14 +349,14 @@ def process_vstatus_to_echo_id(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must be "wind_vel_rad_status" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "echoID" ind_rad : int radar index @@ -400,14 +409,14 @@ def process_hydro_mf_to_echo_id(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types must be "hydroMF" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "echoID" ind_rad : int radar index @@ -464,14 +473,14 @@ def process_hydro_mf_to_hydro(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types must be "hydroMF" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "hydro" ind_rad : int radar index @@ -543,7 +552,9 @@ def process_echo_filter(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must be + "echoID" at minimum, as well as any other fields + that will be echo filtered (e.g. dBZ, ZDR) echo_type : int or list of ints The type of echoes to keep: 1 noise, 2 clutter, 3 precipitation. Default 3 @@ -553,7 +564,9 @@ def process_echo_filter(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field, it will contain + the corrected version of the provided datatypes + For example dBZ -> dBZc, ZDR -> ZDRc, RhoHV -> RhoHVc ind_rad : int radar index @@ -635,7 +648,11 @@ def process_cdf(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must be + "echoID" (if not provided, no clutter filtering is possible), and, + "hydro" (if not provided, no hydro filtering is possible), and, + "VIS" (if not provided no blocked gate filtering is possible), and, + any other field that will be used to compute CDF radar_list : list of Radar objects Optional. list of radar objects @@ -716,7 +733,9 @@ def process_gatefilter(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any any data type supported by + pyrad, the number of datatypes must match the lower and upper bounds + dimensions lower_bounds : list of float The list of lower bounds for every input data type upper_bounds : list of float @@ -728,7 +747,9 @@ def process_gatefilter(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field, it will contain + the corrected version of the provided datatypes + For example dBZ -> dBZc, ZDR -> ZDRc, RhoHV -> RhoHVc ind_rad : int radar index @@ -807,7 +828,10 @@ def process_filter_snr(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data typesm, must contain + "SNRh", "SNRv", "SNR" or "CNR" as well + as any other datatype supported by pyrad that + will be SNR filtered. SNRmin : float. Dataset keyword The minimum SNR to keep the data. radar_list : list of Radar objects @@ -816,7 +840,9 @@ def process_filter_snr(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field, it will contain + the corrected version of the provided datatypes + For example dBZ -> dBZc, ZDR -> ZDRc, RhoHV -> RhoHVc ind_rad : int radar index @@ -914,14 +940,19 @@ def process_filter_vel_diff(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "diffV", as well + as any other datatype supported by pyrad that + will be filtered where no Doppler velocity could be estimated. radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field, it will contain + the corrected version of the provided datatypes + For example dBZ -> dBZc, ZDR -> ZDRc, RhoHV -> RhoHVc ind_rad : int radar index @@ -1000,7 +1031,10 @@ def process_filter_visibility(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data typesm, must contain + "VIS" or "visibility_polar", as well + as any other datatype supported by pyrad that + will be filtered where the visibility is poor. VISmin : float. Dataset keyword The minimum visibility to keep the data. radar_list : list of Radar objects @@ -1009,7 +1043,9 @@ def process_filter_visibility(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output, it will contain + the corrected version of the provided datatypes + For example dBZ -> dBZc, ZDR -> ZDRc, RhoHV -> RhoHVc ind_rad : int radar index @@ -1100,7 +1136,7 @@ def process_outlier_filter(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any any data type supported by pyrad threshold : float. Dataset keyword The distance between the value of the examined range gate and the median of the surrounding gates to consider the gate an outlier @@ -1119,7 +1155,9 @@ def process_outlier_filter(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output, it will contain + the corrected version of the provided datatypes + For example dBZ -> dBZc, ZDR -> ZDRc, RhoHV -> RhoHVc ind_rad : int radar index @@ -1225,7 +1263,10 @@ def process_filter_vol2bird(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "VOL2BIRD_CLASS", as well + as any other datatype supported by pyrad that + will be filtered where vol2bird detected non-biological echoes radar_list : list of Radar objects Optional. list of radar objects @@ -1309,7 +1350,10 @@ def process_gate_filter_vol2bird(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "VOL2BIRD_CLASS", and, + "dBZ" or "dBZc", and, + "V" or "Vc" dBZ_max : float Maximum reflectivity of biological scatterers V_min : float @@ -1403,7 +1447,12 @@ def process_hydroclass(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "dBZ" or "dBZc", and, + "ZDR" or "ZDRc", and, + "RhoHV", or "uRhoHV", or "RhoHVc", and, + "KDP", or "KDPc", and, + "TEMP" or "H_ISO0" (optional) HYDRO_METHOD : string. Dataset keyword The hydrometeor classification method. One of the following: SEMISUPERVISED, UKMO @@ -1473,7 +1522,9 @@ def process_hydroclass(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields "hydro", "entropy" (if compute_entropy is 1), + and "propAG", "propCR", "propLR", "propRP", "propRN", "propVI", "propWS", "propMH", + "propIH" (if output_distances is 1) ind_rad : int radar index @@ -1803,7 +1854,12 @@ def process_centroids(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "dBZ" or "dBZc", and, + "ZDR" or "ZDRc", and, + "RhoHV", or "uRhoHV", or "RhoHVc", and, + "KDP", or "KDPc", and, + "TEMP" or "H_ISO0" (optional) samples_per_vol : int. Dataset keyword Maximum number of samples per volume kept for further analysis. Default 20000 @@ -1905,7 +1961,7 @@ def process_centroids(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output centroids ind_rad : int radar index @@ -2188,14 +2244,18 @@ def process_melting_layer(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "dBZ" or "dBZc", and, + "ZDR" or "ZDRc", and, + "RhoHV" or "RhoHVc", and, + "TEMP" or "H_ISO0" (optional) radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "ml" ind_rad : int radar index @@ -2666,14 +2726,17 @@ def process_zdr_column(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "ZDR" or "ZDRc", and, + "RhoHV" or "RhoHVc", and, + "TEMP" or "H_ISO0" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "ZDR_col" ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_grid.py b/src/pyrad_proc/pyrad/proc/process_grid.py index 6d2da1ce8..be1e2d141 100644 --- a/src/pyrad_proc/pyrad/proc/process_grid.py +++ b/src/pyrad_proc/pyrad/proc/process_grid.py @@ -46,14 +46,17 @@ def process_raw_grid(procstatus, dscfg, radar_list=None): Processing status: 0 initializing, 1 processing volume, 2 post-processing dscfg : dictionary of dictionaries - data set configuration + data set configuration. Accepted Configuration Keywords: + + datatype : string. Dataset keyword + arbitrary data type supported by pyrad and contained in the grid data radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output with field corresponding to datatype ind_rad : int radar index @@ -130,7 +133,8 @@ def process_grid(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the gridded data + dictionary containing the gridded data with fields corresponding to + datatype ind_rad : int radar index @@ -611,7 +615,7 @@ def process_grid_time_stats(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any datatype supported by pyrad period : float. Dataset keyword the period to average [s]. If -1 the statistics are going to be performed over the entire data. Default 3600. @@ -632,7 +636,7 @@ def process_grid_time_stats(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields corresponding to datatypes ind_rad : int radar index @@ -953,7 +957,7 @@ def process_grid_time_stats2(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data type, can be any datatype supported by pyrad period : float. Dataset keyword the period to average [s]. If -1 the statistics are going to be performed over the entire data. Default 3600. @@ -971,7 +975,8 @@ def process_grid_time_stats2(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields corresponding to + datatypes ind_rad : int radar index @@ -1202,7 +1207,8 @@ def process_grid_rainfall_accumulation(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data type, can be any data type supported by pyrad + but typically RR is used period : float. Dataset keyword the period to average [s]. If -1 the statistics are going to be performed over the entire data. Default 3600. @@ -1218,7 +1224,7 @@ def process_grid_rainfall_accumulation(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field corresponding to datatype ind_rad : int radar index @@ -1401,7 +1407,8 @@ def process_grid_fields_diff(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The two input data types to compare. + Can any two datatypes supported by pyrad radar_list : list of Radar objects Optional. list of radar objects @@ -1471,7 +1478,7 @@ def process_grid_texture(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data type, can be any datatype supported by pyrad xwind, ywind : int The size of the local window in the x and y axis. Default 7 fill_value : float @@ -1482,7 +1489,8 @@ def process_grid_texture(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing a radar object containing the field differences + dictionary containing a radar object containing the field + "texture" ind_rad : int radar index @@ -1544,23 +1552,28 @@ def process_grid_mask(procstatus, dscfg, radar_list=None): Processing status: 0 initializing, 1 processing volume, 2 post-processing dscfg : dictionary of dictionaries - data set configuration + data set configuration. Accepted Configuration Keywords:: + + datatype : list of string. Dataset keyword + The input data type, can be any datatype supported by pyrad + threshold_min : float or None + Threshold used for the mask. Values below threshold are set to False. + Above threshold are set to True. Default None. + threshold_max : float or None + Threshold used for the mask. Values above threshold are set to False. + Below threshold are set to True. Default None. + x_dir_ext, y_dir_ext : int + Number of pixels by which to extend the mask on each side of the + west-east direction and south-north direction + radar_list : list of Radar objects Optional. list of radar objects - threshold_min : float or None - Threshold used for the mask. Values below threshold are set to False. - Above threshold are set to True. Default None. - threshold_max : float or None - Threshold used for the mask. Values above threshold are set to False. - Below threshold are set to True. Default None. - x_dir_ext, y_dir_ext : int - Number of pixels by which to extend the mask on each side of the - west-east direction and south-north direction + Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "mask" ind_rad : int radar index @@ -1648,14 +1661,18 @@ def process_normalize_luminosity(procstatus, dscfg, radar_list=None): Processing status: 0 initializing, 1 processing volume, 2 post-processing dscfg : dictionary of dictionaries - data set configuration + data set configuration. Accepted Configuration Keywords:: + + datatype : list of string. Dataset keyword + The input data type, can be any datatype supported by pyrad radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the normalized field, the name + of the field is datatype_norm ind_rad : int radar index @@ -1714,6 +1731,10 @@ def process_pixel_filter(procstatus, dscfg, radar_list=None): dscfg : dictionary of dictionaries data set configuration. Accepted Configuration Keywords:: + datatype : list of string. Dataset keyword + The input data types, must contain + "mask", as well as + any datatypes supported by pyrad pixel_type : int or list of ints The type of pixels to keep: 0 No data, 1 Below threshold, 2 Above threshold. Default 2 @@ -1723,7 +1744,7 @@ def process_pixel_filter(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output datatypes masked ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_icon.py b/src/pyrad_proc/pyrad/proc/process_icon.py index 21c7d3c2e..e518adf87 100755 --- a/src/pyrad_proc/pyrad/proc/process_icon.py +++ b/src/pyrad_proc/pyrad/proc/process_icon.py @@ -57,7 +57,7 @@ def process_icon(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad keep_in_memory : int. Dataset keyword if set keeps the icon data dict, the icon coordinates dict and the icon field in radar coordinates in memory @@ -75,7 +75,8 @@ def process_icon(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field corresponding to + icon_variables ind_rad : int radar index @@ -230,7 +231,7 @@ def process_hzt(procstatus, dscfg, radar_list=None): Type of METRANET reader library used to read the data. Can be 'C' or 'python' datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad keep_in_memory : int. Dataset keyword if set keeps the icon data dict, the icon coordinates dict and the icon field in radar coordinates in memory @@ -248,7 +249,8 @@ def process_hzt(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields corresponding to + icon_variables ind_rad : int radar index @@ -360,7 +362,7 @@ def process_iso0_mf(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by prad iso0_statistic : str. Dataset keyword The statistic used to weight the iso0 points. Can be avg_by_dist, avg, min, max @@ -371,7 +373,7 @@ def process_iso0_mf(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "H_ISO0" ind_rad : int radar index @@ -442,7 +444,7 @@ def process_iso0_grib(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad time_interp : bool. Dataset keyword whether to perform an interpolation in time between consecutive model outputs. Default True @@ -455,7 +457,7 @@ def process_iso0_grib(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field H_ISO0 ind_rad : int radar index @@ -524,7 +526,7 @@ def process_icon_lookup_table(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad lookup_table : int. Dataset keyword if set a pre-computed look up table for the icon coordinates is loaded. Otherwise the look up table is computed taking the first @@ -543,7 +545,7 @@ def process_icon_lookup_table(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields corresponding to icon_variables ind_rad : int radar index @@ -714,7 +716,7 @@ def process_hzt_lookup_table(procstatus, dscfg, radar_list=None): Type of METRANET reader library used to read the data. Can be 'C' or 'python' datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad lookup_table : int. Dataset keyword if set a pre-computed look up table for the icon coordinates is loaded. Otherwise the look up table is computed taking the first @@ -729,7 +731,7 @@ def process_hzt_lookup_table(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field HISO0 ind_rad : int radar index @@ -862,7 +864,7 @@ def process_icon_to_radar(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad icon_type : str. Dataset keyword name of the icon field to process. Default TEMP icon_variables : list of strings. Dataset keyword @@ -878,7 +880,7 @@ def process_icon_to_radar(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields corresponding to icon_variables ind_rad : int radar index @@ -996,7 +998,7 @@ def process_icon_coord(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad iconpath : string. General keyword path where to store the look up table model : string. Dataset keyword @@ -1007,7 +1009,8 @@ def process_icon_coord(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "icon_index" ind_rad : int radar index @@ -1070,7 +1073,7 @@ def process_hzt_coord(procstatus, dscfg, radar_list=None): Type of METRANET reader library used to read the data. Can be 'C' or 'python' datatype : string. Dataset keyword - arbitrary data type + arbitrary data type supported by pyrad iconpath : string. General keyword path where to store the look up table radar_list : list of Radar objects @@ -1079,7 +1082,7 @@ def process_hzt_coord(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "icon_index" ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_intercomp.py b/src/pyrad_proc/pyrad/proc/process_intercomp.py index 2d6dcbb83..b5860f738 100755 --- a/src/pyrad_proc/pyrad/proc/process_intercomp.py +++ b/src/pyrad_proc/pyrad/proc/process_intercomp.py @@ -52,7 +52,7 @@ def process_time_stats(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + Arbitrary data type supported by pyrad and contained in the radar data period : float. Dataset keyword the period to average [s]. If -1 the statistics are going to be performed over the entire data. Default 3600. @@ -73,7 +73,10 @@ def process_time_stats(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the statistic computed on the input field, as well as + "nsamples", as well as + "sum2" (sum-squared) if stat in (cov, std), as well as + ind_rad : int radar index @@ -417,7 +420,7 @@ def process_time_stats2(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + Arbitrary data type supported by pyrad and contianed in the radar data period : float. Dataset keyword the period to average [s]. If -1 the statistics are going to be performed over the entire data. Default 3600. @@ -435,7 +438,9 @@ def process_time_stats2(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the statistic computed on the input field, as well as + "nsamples" + ind_rad : int radar index @@ -681,7 +686,7 @@ def process_time_avg(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + Arbitrary data type supported by pyrad and contained in the radar data period : float. Dataset keyword the period to average [s]. Default 3600. start_average : float. Dataset keyword @@ -694,7 +699,8 @@ def process_time_avg(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the statistic computed on the input field, as well as + "nsamples" ind_rad : int radar index @@ -873,7 +879,8 @@ def process_weighted_time_avg(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + Arbitrary data type supported by pyrad and contained in the radar data, as well as + "dBZ" or "dBZc" or "dBuZ" or "dBZv" or "dBZvc" or "dBuZv" (refl. weighting) period : float. Dataset keyword the period to average [s]. Default 3600. start_average : float. Dataset keyword @@ -883,8 +890,8 @@ def process_weighted_time_avg(procstatus, dscfg, radar_list=None): Returns ------- - new_dataset : Radar - radar object + new_dataset : dict + dictionary containing the statistic computed on the input field ind_rad : int radar index @@ -1053,7 +1060,12 @@ def process_time_avg_flag(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must be + "PhiDP" or "PhiDPc" (Optional, for PhiDP flagging), and, + "echoID" (Optional, for echoID flagging), and, + "hydro" (Optional, for no rain flagging), and, + "TEMP" (Optional, for solid precip flagging), and, + "H_ISO0" (Optional, also for solid precip flagging) period : float. Dataset keyword the period to average [s]. Default 3600. start_average : float. Dataset keyword @@ -1070,8 +1082,8 @@ def process_time_avg_flag(procstatus, dscfg, radar_list=None): Returns ------- - new_dataset : Radar - radar object + new_dataset : dict + dictionary containing the field "time_avg_flag" ind_rad : int radar index @@ -1315,7 +1327,10 @@ def process_colocated_gates(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types to use to check colocated gates (one for every radar) + Any datatype supported by pyrad and available in both radars is accepted. + If visibility filtering is desired, the fields + "visibility" or "visibility_polar" must be specified for both radars. h_tol : float. Dataset keyword Tolerance in altitude difference between radar gates [m]. Default 100. @@ -1352,8 +1367,8 @@ def process_colocated_gates(procstatus, dscfg, radar_list=None): Returns ------- - new_dataset : radar object - radar object containing the flag field + new_dataset : dict + dictionary containing the field "colocated_gates" ind_rad : int radar index @@ -1532,7 +1547,9 @@ def process_intercomp(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types (one for every radar). + Any arbitrary datatype supported by pyrad and available + in both radar is accepted. colocgatespath : string. base path to the file containing the coordinates of the co-located gates @@ -1891,7 +1908,11 @@ def process_intercomp_time_avg(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + dBZ" or "dBZc" or "dBuZ" or "dBZv" or "dBZvc" or "dBuZv, and, + "PhiDP" or "PhiDPc", and, + "time_avg_flag" + for the two radars colocgatespath : string. base path to the file containing the coordinates of the co-located gates @@ -2419,7 +2440,8 @@ def process_fields_diff(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types for each radar, + Any datatype supported by pyrad is supported radar_list : list of Radar objects Optional. list of radar objects @@ -2500,7 +2522,8 @@ def process_intercomp_fields(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types for each radar, + Any datatype supported by pyrad and available in both radars is supported radar_list : list of Radar objects Optional. list of radar objects diff --git a/src/pyrad_proc/pyrad/proc/process_iq.py b/src/pyrad_proc/pyrad/proc/process_iq.py index 161ce5175..1853bc6d5 100644 --- a/src/pyrad_proc/pyrad/proc/process_iq.py +++ b/src/pyrad_proc/pyrad/proc/process_iq.py @@ -82,7 +82,8 @@ def process_pol_variables_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU", "IQvvADU", "IQNADUh" and "IQNADUv" subtract_noise : Bool If True noise will be subtracted from the signal lag : int @@ -100,7 +101,9 @@ def process_pol_variables_iq(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields corresponding to the specified + "variables" + "" ind_rad : int radar index @@ -171,7 +174,9 @@ def process_reflectivity_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU" or "IQvvADU", and, + "IQNADUh" or "IQNADUv" subtract_noise : Bool If True noise will be subtracted from the signal radar_list : list of spectra objects @@ -180,7 +185,9 @@ def process_reflectivity_iq(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "dBZ" if "IQhhADU" and "IQNADUh" are specified + "dBZv" if "IQvvADU" and "IQNADUv" are specified ind_rad : int radar index @@ -246,14 +253,15 @@ def process_st1_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU" or "IQvvADU" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "ST1" (stat_test_lag1) ind_rad : int radar index @@ -302,14 +310,15 @@ def process_st2_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU" or "IQvvADU" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "ST2" ind_rad : int radar index @@ -357,14 +366,15 @@ def process_wbn_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU" or "IQvvADU" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "WBN" (wide-band noise) ind_rad : int radar index @@ -413,7 +423,8 @@ def process_differential_reflectivity_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU", "IQvvADU", "IQNADUh" and "IQNADUv" subtract_noise : Bool If True noise will be subtracted from the signal lag : int @@ -424,7 +435,7 @@ def process_differential_reflectivity_iq(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "ZDR" ind_rad : int radar index @@ -491,14 +502,15 @@ def process_mean_phase_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU" or "IQvvADU" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "MPH" (mean_phase) ind_rad : int radar index @@ -546,7 +558,8 @@ def process_differential_phase_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU" and "IQvvADU" phase_offset : float. Dataset keyword The system differential phase offset to remove radar_list : list of spectra objects @@ -555,7 +568,7 @@ def process_differential_phase_iq(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field uPhiDP ind_rad : int radar index @@ -612,7 +625,8 @@ def process_rhohv_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU", "IQvvADU", "IQNADUh" and "IQNADUv" subtract_noise : Bool If True noise will be subtracted from the signal lag : int @@ -623,7 +637,7 @@ def process_rhohv_iq(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "RhoHV" ind_rad : int radar index @@ -689,7 +703,8 @@ def process_Doppler_velocity_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU" or "IQvvADU" direction : str The convention used in the Doppler mean field. Can be negative_away or negative_towards @@ -699,7 +714,8 @@ def process_Doppler_velocity_iq(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "V" + (if IQhhADU was provided) or "Vv" (if IQvvADU was provided) ind_rad : int radar index @@ -753,7 +769,8 @@ def process_Doppler_width_iq(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "IQhhADU" or "IQvvADU" subtract_noise : Bool If True noise will be subtracted from the signals lag : int @@ -764,7 +781,8 @@ def process_Doppler_width_iq(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "W" (if IQhhADU was provided), + or "Wv" (if IQvvADU was provided) ind_rad : int radar index @@ -830,7 +848,9 @@ def process_fft(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "IQNdBADUh" and/or "IQNdBADUv" and/or + "IQNADUh" and/or "IQNADUv" (see new_dataset below) window : list of str Parameters of the window used to obtain the spectra. The parameters are the ones corresponding to function @@ -841,7 +861,11 @@ def process_fft(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields + "ShhADUu" (unfiltered_complex_spectra_hh_ADU) if IQNdBADUh was provided, + "SvvADUu" (unfiltered_complex_spectra_vv_ADU) if IQNdBADUv was provided, + "sNADUh" (spectral_noise_power_hh_ADU) if IQNADUh was provided, + "sNADUv" (spectral_noise_power_vv_ADU) if IQNADUv was provided, ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_monitoring.py b/src/pyrad_proc/pyrad/proc/process_monitoring.py index 202cd649d..6af79e4cf 100755 --- a/src/pyrad_proc/pyrad/proc/process_monitoring.py +++ b/src/pyrad_proc/pyrad/proc/process_monitoring.py @@ -47,7 +47,14 @@ def process_selfconsistency_kdp_phidp(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of strings. Dataset keyword - The input data types + The input data types, must contain + "dBZ" or "dBZc", and, + "ZDR" or "ZDRc", and, + "PhiDP" or "PhiDPc", and, + "uRhoHV" or "RhoHV" or "RhoHVc", and, + "TEMP" (Optional), and, + "H_ISO0" (Optional), and, + "hydro" (Optional, only used if filter_rain) parametrization : str The type of parametrization for the self-consistency curves. Can be 'None', 'Gourley', 'Wolfensberger', 'Louf', 'Gorgucci' or @@ -84,7 +91,8 @@ def process_selfconsistency_kdp_phidp(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields KDP and PhiDP + "" ind_rad : int radar index @@ -291,7 +299,14 @@ def process_selfconsistency_bias(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "dBZ" or "dBZc", and, + "ZDR" or "ZDRc", and, + "PhiDP" or "PhiDPc", and, + "uRhoHV" or "RhoHV" or "RhoHVc", and, + "TEMP" (Optional), and, + "H_ISO0" (Optional), and, + "hydro" (Optional, only used if filter_rain) parametrization : str The type of parametrization for the self-consistency curves. Can be 'None', 'Gourley', 'Wolfensberger', 'Louf', 'Gorgucci' or @@ -359,7 +374,7 @@ def process_selfconsistency_bias(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "dBZ_bias" (refl. bias) ind_rad : int radar index @@ -622,7 +637,14 @@ def process_selfconsistency_bias2(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "dBZ" or "dBZc", and, + "ZDR" or "ZDRc", and, + "PhiDP" or "PhiDPc", and, + "uRhoHV" or "RhoHV" or "RhoHVc", and, + "TEMP" (Optional), and, + "H_ISO0" (Optional), and, + "hydro" (Optional, only used if filter_rain) parametrization : str The type of parametrization for the self-consistency curves. Can be 'None', 'Gourley', 'Wolfensberger', 'Louf', 'Gorgucci' or @@ -680,7 +702,7 @@ def process_selfconsistency_bias2(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "dBZ_bias" (refl. bias) ind_rad : int radar index @@ -1022,7 +1044,9 @@ def process_estimate_phidp0(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "dBZ" or "dBZc", and, + "PhiDP" or "PhiDPc" or "uPhiDP" rmin : float. Dataset keyword The minimum range where to look for valid data [m] rmax : float. Dataset keyword @@ -1039,7 +1063,8 @@ def process_estimate_phidp0(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields "PhiDP0" (system diff. phase) and + "PhiDP0_bin" (first gate diff. phase) ind_rad : int radar index @@ -1110,7 +1135,11 @@ def process_rhohv_rain(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "RhoHV" or "RhoHVc" or "uRhoHV", and, + "dBZ" or "dBZc", and + "TEMP" (Optional), or + "H_ISO0" (Optional) rmin : float. Dataset keyword minimum range where to look for rain [m]. Default 1000. rmax : float. Dataset keyword @@ -1138,7 +1167,7 @@ def process_rhohv_rain(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "RhoHV_rain" (RhoHV in rain) ind_rad : int radar index @@ -1267,8 +1296,14 @@ def process_zdr_precip(procstatus, dscfg, radar_list=None): dscfg : dictionary of dictionaries data set configuration. Accepted Configuration Keywords:: - datatype : list of string. Dataset keyword - The input data types + datatype : list of strings. Dataset keyword + The input data types, must contain + "dBZ" or "dBZc", and, + "ZDR" or "ZDRc", and, + "PhiDP" or "PhiDPc", and, + "uRhoHV" or "RhoHV" or "RhoHVc", and, + "TEMP" (Optional), and, + "H_ISO0" (Optional) ml_filter : boolean. Dataset keyword indicates if a filter on data in and above the melting layer is applied. Default True. @@ -1308,7 +1343,7 @@ def process_zdr_precip(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "ZDR_prec" ind_rad : int radar index @@ -1480,8 +1515,15 @@ def process_zdr_snow(procstatus, dscfg, radar_list=None): dscfg : dictionary of dictionaries data set configuration. Accepted Configuration Keywords:: - datatype : list of string. Dataset keyword - The input data types + datatype : list of strings. Dataset keyword + The input data types, must contain + "dBZ" or "dBZc", and, + "ZDR" or "ZDRc", and, + "PhiDP" or "PhiDPc", and, + "uRhoHV" or "RhoHV" or "RhoHVc", and, + "hydro", and, + "TEMP" (Optional), and, + "SNRh" or "SNRv" (Optional, used to filter with SNRmin and SNRmax) rmin : float. Dataset keyword minimum range where to look for rain [m]. Default 1000. rmax : float. Dataset keyword @@ -1525,7 +1567,7 @@ def process_zdr_snow(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "ZDR_snow" ind_rad : int radar index """ @@ -1649,7 +1691,7 @@ def process_monitoring(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + Arbitrary datatype supported by pyrad step : float. Dataset keyword The width of the histogram bin. Default is None. In that case the default step in function get_histogram_bins is used diff --git a/src/pyrad_proc/pyrad/proc/process_phase.py b/src/pyrad_proc/pyrad/proc/process_phase.py index 565da5a7c..67dd57211 100755 --- a/src/pyrad_proc/pyrad/proc/process_phase.py +++ b/src/pyrad_proc/pyrad/proc/process_phase.py @@ -55,7 +55,9 @@ def process_correct_phidp0(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBZc", and, + "PhiDP" or "PhiDPc" or "uPhiDP" rmin : float. Dataset keyword The minimum range where to look for valid data [m]. Default 1000. rmax : float. Dataset keyword @@ -73,7 +75,7 @@ def process_correct_phidp0(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "PhiDPc" ind_rad : int radar index @@ -163,7 +165,9 @@ def process_smooth_phidp_single_window(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBZc", and, + "PhiDP" or "PhiDPc" or "uPhiDP" rmin : float. Dataset keyword The minimum range where to look for valid data [m]. Default 1000. rmax : float. Dataset keyword @@ -183,7 +187,7 @@ def process_smooth_phidp_single_window(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "PhiDPc" ind_rad : int radar index @@ -272,7 +276,9 @@ def process_smooth_phidp_double_window(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBZc", and, + "PhiDP" or "PhiDPc" or "uPhiDP" rmin : float. Dataset keyword The minimum range where to look for valid data [m] rmax : float. Dataset keyword @@ -295,7 +301,7 @@ def process_smooth_phidp_double_window(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "PhiDPc" ind_rad : int radar index @@ -392,7 +398,10 @@ def process_phidp_kdp_Maesaka(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBZc", and, + "PhiDP" or "PhiDPc" or "uPhiDP", and + "TEMP" or "H_ISO0" (Optional) rmin : float. Dataset keyword The minimum range where to look for valid data [m]. Default 1000. rmax : float. Dataset keyword @@ -424,7 +433,7 @@ def process_phidp_kdp_Maesaka(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "PhiDPc" and "KDPc" ind_rad : int radar index @@ -629,7 +638,12 @@ def process_phidp_kdp_lp(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBZc", and, + "PhiDP" or "PhiDPc" or "uPhiDP", and, + "RhoHV" or "RhoHVc", (Optional, used when min_rhv is specified) and, + "SNRh" (Optional, used when min_snr is specified), and + "TEMP" or "H_ISO0" (Optional) fzl : float. Dataset keyword The freezing level height [m]. Default 2000. sounding : str. Dataset keyword @@ -699,7 +713,7 @@ def process_phidp_kdp_lp(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "PhiDPc" and "KDPc" ind_rad : int radar index @@ -930,7 +944,8 @@ def process_kdp_leastsquare_single_window(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "PhiDP" or "PhiDPc" or "uPhiDP" rwind : float. Dataset keyword The length of the segment for the least square method [m]. Default 6000. @@ -942,7 +957,7 @@ def process_kdp_leastsquare_single_window(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "KDPc" ind_rad : int radar index @@ -1007,8 +1022,9 @@ def process_kdp_leastsquare_double_window(procstatus, dscfg, radar_list=None): dscfg : dictionary of dictionaries data set configuration. Accepted Configuration Keywords:: - datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "PhiDP" or "PhiDPc" or "uPhiDP", and, + "dBZ" or "dBZc" rwinds : float. Dataset keyword The length of the short segment for the least square method [m]. Default 2000. @@ -1025,7 +1041,7 @@ def process_kdp_leastsquare_double_window(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "KDPc" ind_rad : int radar index @@ -1106,7 +1122,8 @@ def process_phidp_kdp_Vulpiani(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "PhiDP" or "PhiDPc" or "uPhiDP" rwind : float. Dataset keyword The length of the segment [m]. Default 2000. n_iter : int. Dataset keyword @@ -1130,7 +1147,7 @@ def process_phidp_kdp_Vulpiani(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "PhiDPc" and "KDPc" ind_rad : int radar index @@ -1239,7 +1256,8 @@ def process_phidp_kdp_Kalman(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "PhiDP" or "PhiDPc" or "uPhiDP" parallel : boolean. Dataset keyword if set use parallel computing get_phidp : boolean. Datset keyword @@ -1256,7 +1274,7 @@ def process_phidp_kdp_Kalman(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "PhiDPc" and "KDPc" ind_rad : int radar index @@ -1354,7 +1372,11 @@ def process_attenuation(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBZc", and, + "PhiDP" or "PhiDPc", and, + "ZDR" or "ZDRc", and + "TEMP" or "H_ISO0" (Optional) ATT_METHOD : float. Dataset keyword The attenuation estimation method used. One of the following: ZPhi, Philin. Default ZPhi @@ -1373,7 +1395,8 @@ def process_attenuation(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields "Ah" (spec. attenuation), + "PIA" (path-integrated attenuation) and "dBZc" (corr. refl.) ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_retrieve.py b/src/pyrad_proc/pyrad/proc/process_retrieve.py index 6bc904536..c77df1752 100755 --- a/src/pyrad_proc/pyrad/proc/process_retrieve.py +++ b/src/pyrad_proc/pyrad/proc/process_retrieve.py @@ -55,14 +55,17 @@ def process_ccor(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBZv", and, + "dBuZ", or "dBuZV" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "CCORh" or + "CCORv" (if vertical reflectivities were provided) ind_rad : int radar index @@ -116,7 +119,8 @@ def process_signal_power(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBuZ" or "dBZc" or "dBuZc" or "dBZv" or "dBuZv" or "dBuZvc" mflossh, mflossv : float. Dataset keyword The matching filter losses of the horizontal (vertical) channel [dB]. If None it will be obtained from the attribute @@ -141,7 +145,8 @@ def process_signal_power(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "dBm" or "dBmv" (if + vert. refl. was provided) ind_rad : int radar index @@ -222,7 +227,8 @@ def process_rcs_pr(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBuZ" or "dBZc" or "dBuZc" or "dBZv" or "dBuZv" or "dBuZvc" AntennaGainH, AntennaGainV : float. Dataset keyword The horizontal (vertical) polarization antenna gain [dB]. If None it will be obtained from the attribute instrument_parameters of @@ -259,7 +265,8 @@ def process_rcs_pr(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "rcs_h" or "rcs_v" (if vert. refl. were + provided) ind_rad : int radar index @@ -351,7 +358,8 @@ def process_rcs(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBuZ" or "dBZc" or "dBuZc" or "dBZv" or "dBuZv" or "dBuZvc" kw2 : float. Dataset keyowrd The water constant pulse_width : float. Dataset keyowrd @@ -368,7 +376,8 @@ def process_rcs(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "rcs_h" or "rcs_v" (if vert. refl. were + provided) ind_rad : int radar index @@ -423,7 +432,7 @@ def process_rcs(procstatus, dscfg, radar_list=None): def process_vol_refl(procstatus, dscfg, radar_list=None): """ - Computes the volumetric reflectivity in 10log10(cm^2 km^-3) + Computes the volumetric reflectivity eta in 10log10(cm^2 km^-3) Parameters ---------- @@ -434,7 +443,8 @@ def process_vol_refl(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "dBZ" or "dBuZ" or "dBZc" or "dBuZc" or "dBZv" or "dBuZv" or "dBuZvc" freq : float. Dataset keyword The radar frequency kw : float. Dataset keyword @@ -445,7 +455,8 @@ def process_vol_refl(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "eta_h" or "eta_v" (if vert. refl. were + provided) ind_rad : int radar index @@ -501,7 +512,9 @@ def process_snr(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "dBZ" or "dBuZ" or "dBZv" or "dBuZv", and, + "Nh" or "Nv" output_type : string. Dataset keyword The output data type. Either SNRh or SNRv radar_list : list of Radar objects @@ -510,7 +523,8 @@ def process_snr(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "SNRh" or "SNRv" (if vert. + refl. were provided) ind_rad : int radar index @@ -568,7 +582,8 @@ def process_radial_noise_hs(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "dBm" or "dBmv" rmin : float. Dataset keyword The minimum range from which to start the computation nbins_min : int. Dataset keyword @@ -585,7 +600,8 @@ def process_radial_noise_hs(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "NdBmh" and "noise_pos_h" or + "NdBmh" and "noise_pos_v" (if vert. refl. were provided) ind_rad : int radar index @@ -660,7 +676,8 @@ def process_radial_noise_ivic(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "dBm" or "dBmv" npulses_ray : int Default number of pulses used in the computation of the ray. If the number of pulses is not in radar.instrument_parameters this @@ -679,7 +696,8 @@ def process_radial_noise_ivic(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "NdBmh" and "noise_pos_h" or + "NdBmh" and "noise_pos_v" (if vert. refl. were provided) ind_rad : int radar index @@ -736,7 +754,7 @@ def process_radial_noise_ivic(procstatus, dscfg, radar_list=None): def process_l(procstatus, dscfg, radar_list=None): """ - Computes L parameter + Computes L parameter (logarithmic cross-correlation ratio) Parameters ---------- @@ -747,14 +765,15 @@ def process_l(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "RhoHV" or "RhoHVc" or "uRhoHV" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "L" ind_rad : int radar index @@ -790,7 +809,7 @@ def process_l(procstatus, dscfg, radar_list=None): def process_cdr(procstatus, dscfg, radar_list=None): """ - Computes Circular Depolarization Ratio + Computes approximation of Circular Depolarization Ratio Parameters ---------- @@ -801,14 +820,16 @@ def process_cdr(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "RhoHV" or "uRhoHV" or "RhoHVu", and, + "ZDR" or "ZDRc" radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "CDR" ind_rad : int radar index @@ -863,7 +884,9 @@ def process_vpr(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + "dBZ" or "dBZc", and, + "H_ISO0" or "H_ISO0c" or "TEMP" or "TEMPc" nvalid_min : int Minimum number of rays with data to consider the azimuthal average valid. Default 20. @@ -938,7 +961,7 @@ def process_vpr(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields "dBZc" and "VPRcorr" ind_rad : int radar index @@ -1220,7 +1243,24 @@ def process_rainrate(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The input data type + The input data type, must contain, + If RR_METHOD == "Z" or "ZPoly": + "dBZ" or "dBZc" + If RR_METHOD == "KDP": + "KDP" or "KDPc" + If RR_METHOD == "A": + "Ah" or "Ahc" + If RR_METHOD == "ZKDP": + "dBZ" or "dBZc", and, + "KDP" or "KDPc" + IF RR_METHOD == "ZA": + "dBZ" or "dBZc", and, + "Ah" or "Ahc" + IF RR_METHID == "hydro": + "dBZ" or "dBZc", and, + "Ah" or "Ahc", and, + "hydro" + RR_METHOD : string. Dataset keyword The rainfall rate estimation method. One of the following: Z, ZPoly, KDP, A, ZKDP, ZA, hydro @@ -1256,7 +1296,7 @@ def process_rainrate(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "RR" (rain rate) ind_rad : int radar index @@ -1520,7 +1560,8 @@ def process_rainfall_accumulation(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "RR" period : float. Dataset keyword the period to average [s]. If -1 the statistics are going to be performed over the entire data. Default 3600. @@ -1536,7 +1577,7 @@ def process_rainfall_accumulation(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "Raccu" ind_rad : int radar index @@ -1725,7 +1766,8 @@ def process_bird_density(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "eta_h" or "eta_v" (volumetric reflectivities) sigma_bird : float. Dataset keyword The bird radar cross section radar_list : list of Radar objects @@ -1734,7 +1776,7 @@ def process_bird_density(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "bird_density" ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_spectra.py b/src/pyrad_proc/pyrad/proc/process_spectra.py index d19f698b2..6be8148c5 100644 --- a/src/pyrad_proc/pyrad/proc/process_spectra.py +++ b/src/pyrad_proc/pyrad/proc/process_spectra.py @@ -94,14 +94,21 @@ def process_ifft(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "ShhADU" or "ShhADUu", or, + "SvvADU" or "SvvADUu", or, + "sNADUh" or "sNADUv" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields + "IQhhADU" if "ShhADU" or "ShhADUu" were provided, + "IQhvvDU" if "SvvADU" or "SvvADUu" were provided, + "IQNADUh" if "sNADUh" was provided, + "IQNADUv" if "sNADUv" was provided. ind_rad : int radar index @@ -583,7 +590,8 @@ def process_filter_0Doppler(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any of the spectral fields supported by pyrad + filter_width : float The Doppler filter width. Default 0. filter_units : str @@ -594,7 +602,9 @@ def process_filter_0Doppler(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field, the names of the output fields + is the same as the provided datatypes, except for unfiltered fields which are renamed in the following + "dBuZ" => "dBZ" ind_rad : int radar index @@ -663,7 +673,9 @@ def process_filter_srhohv(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "sRhoHV" or "sRhoHVu", + as well as any spectral field supported by pyrad sRhoHV_threshold : float Data with sRhoHV module above this threshold will be filtered. Default 1. @@ -673,7 +685,9 @@ def process_filter_srhohv(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field, the names of the output fields + is the same as the provided datatypes, except for unfiltered fields which are renamed in the following + "dBuZ" => "dBZ" ind_rad : int radar index @@ -744,7 +758,9 @@ def process_filter_spectra_noise(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "ShhADU" or "SvvADU" or "ShhADUu" or "SvvADUu", and, + "sNADUh" or "sNADUv" clipping_level : float The clipping level [dB above noise level]. Default 10. radar_list : list of spectra objects @@ -753,7 +769,8 @@ def process_filter_spectra_noise(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field, the names of the output fields + is the same as the provided datatypes ind_rad : int radar index @@ -840,7 +857,9 @@ def process_dealias_spectra(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "ShhADU" or "SvvADU" or "ShhADUu" or "SvvADUu", and, + "sNADUh" or "sNADUv" radar_list : list of spectra objects Optional. list of spectra objects @@ -848,7 +867,8 @@ def process_dealias_spectra(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields. The output fields are the same as the original + ones but are dealiased. ind_rad : int radar index @@ -916,7 +936,8 @@ def process_spectra_ang_avg(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, + any spectral datatype supported by pyrad navg : int Number of spectra to average. If -1 all spectra will be averaged. Default -1. @@ -926,7 +947,7 @@ def process_spectra_ang_avg(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the same output fields as the provided datatypes ind_rad : int radar index @@ -1018,7 +1039,9 @@ def process_spectral_power(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "ShhADU" or "SvvADU" or "ShhADUu" or "SvvADUu", and, + "sNADUh", or "sNADUv" units : str The units of the returned signal. Can be 'ADU', 'dBADU' or 'dBm' subtract_noise : Bool @@ -1032,7 +1055,14 @@ def process_spectral_power(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "sPhhADU" or "sPhhADUu", or + "sPvvADU" or "sPvvADUu", or + "sPhhdBADU" or "sPhhdBADUu", or + "sPvvdBADU" or "sPvvdBADUu", or + "sPhhAdBm" or "sPhhdBmu", or + "sPvvdBm" or "sPvvdBmu", + depending on which input datatype and units were provided ind_rad : int radar index @@ -1093,7 +1123,8 @@ def process_spectral_noise(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "ShhADU" or "SvvADU" or "ShhADUu" or "SvvADUu" units : str The units of the returned signal. Can be 'ADU', 'dBADU' or 'dBm' navg : int @@ -1109,7 +1140,14 @@ def process_spectral_noise(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "sNADUh" or + "sNADUv" or + "sNdBADUh" or + "sNdBADUv" or + "sNdBmh" or + "sNdBmv" + depending on which input datatype and units were provided ind_rad : int radar index @@ -1168,14 +1206,20 @@ def process_spectral_phase(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "ShhADU" or "SvvADU" or "ShhADUu" or "SvvADUu" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "SPhasehh" if "ShhADU" was provided as input + "SPhasehhu" if "ShhADUu" was provided as input + "SPhasevv" if "SvvADU" was provided as input + "SPhasevvu" if "SvvADUu" was provided as input + ind_rad : int radar index @@ -1222,7 +1266,12 @@ def process_spectral_reflectivity(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + either a combination of signal and noise + "ShhADU" or "SvvADU" or "ShhADUu" or "SvvADUu", and, + "sNADUh" or "sNADUv", or + the power signal + "sPhhADU" or "sPvvADU" or "sPhhADUu" or "sPvvADUu" subtract_noise : Bool If True noise will be subtracted from the signal smooth_window : int or None @@ -1234,7 +1283,11 @@ def process_spectral_reflectivity(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "sdBZ" if "ShhADU" (or "sPhhADU") was provided as input + "sdBuZ" if "ShhADUu" (or "sPhhADUu") was provided as input + "sdBZv" if "SvvADU" (or "sPvvADU") was provided as input + "sdBuZv" if "SvvADUu" (or "sPvvADUu") was provided as input ind_rad : int radar index @@ -1310,7 +1363,12 @@ def process_spectral_differential_reflectivity(procstatus, dscfg, radar_list=Non data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + either a combination of signal and noise + ("ShhADU" and "SvvADU") or ("ShhADUu" and "SvvADUu"), and, + ("sNADUh" and "sNADUv"), or + the power signal + ("sPhhADU" and "sPvvADU") or ("sPhhADUu" and "sPvvADUu") subtract_noise : Bool If True noise will be subtracted from the signal smooth_window : int or None @@ -1322,7 +1380,9 @@ def process_spectral_differential_reflectivity(procstatus, dscfg, radar_list=Non Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "sZDR" if "ShhADU" and "SvvADU" were provided as input + "sZDRu" if "ShhADUu" and "SvvADUu" were provided as input ind_rad : int radar index @@ -1410,14 +1470,19 @@ def process_spectral_differential_phase(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + either a combination of signal and noise + ("ShhADU" and "SvvADU") or ("ShhADUu" and "SvvADUu"), and, + "sRhoHV" or "sRhoHVu" (Optional) radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields + "sPhiDP" if "ShhADU" and "SvvADU" were provided + "sPhiDPu" if "ShhADUu" and "SvvADUu" were provided ind_rad : int radar index @@ -1486,7 +1551,9 @@ def process_spectral_rhohv(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + ("ShhADU" and "SvvADU") or ("ShhADUu" and "SvvADUu"), and, + ("sNADUh" and "sNADUv") subtract_noise : Bool If True noise will be subtracted from the signal radar_list : list of spectra objects @@ -1495,7 +1562,9 @@ def process_spectral_rhohv(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields + "sRhoHV" if "ShhADU" and "SvvADU" were provided + "sRhoHVu" if "ShhADUu" and "SvvADUu" were provided ind_rad : int radar index @@ -1559,7 +1628,14 @@ def process_pol_variables(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + either a combination of signal and noise + ("ShhADU" and "SvvADU") or ("ShhADUu" and "SvvADUu"), and, + ("sNADUh" and "sNADUv"), or + the power signal + ("sPhhADU" and "sPvvADU") or ("sPhhADUu" and "sPvvADUu"), and + ("sRhoHV" or "sRhoHVu") + subtract_noise : Bool If True noise will be subtracted from the signal. Default False smooth_window : int or None @@ -1573,7 +1649,8 @@ def process_pol_variables(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the all outputs fields, that correspond to the + specified "variables" keyword ind_rad : int radar index @@ -1671,7 +1748,8 @@ def process_noise_power(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "ShhADU" or "SvvADU" or "ShhADUu" or "SvvADUu" units : str The units of the returned signal. Can be 'ADU', 'dBADU' or 'dBm' navg : int @@ -1687,7 +1765,11 @@ def process_noise_power(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "sNADUh" or "sNADUv", or + "sNdBmh" or "sNdBmv", or + "sNdBAUh" or "sNdBAUv", or + depending on which input datatype and units were provided ind_rad : int radar index @@ -1745,14 +1827,16 @@ def process_reflectivity(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain + "sdBZ" or sdBZv" or "sdBuZ" or "sdBuZv" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "dBZ", "dBZv", + "dBuZ" or "dBuZv" depending on the provided input datatype ind_rad : int radar index @@ -1806,14 +1890,17 @@ def process_differential_reflectivity(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "sdBZ" and "sdBZv", or + "sdBuZ" and "sdBZuv" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output fields + "ZDR" or "ZDRu" depending on the specified input datatype ind_rad : int radar index @@ -1875,7 +1962,8 @@ def process_differential_phase(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "uPhiDPu" or + "uPhiDP" depending on the provided datatypes ind_rad : int radar index @@ -1929,7 +2017,14 @@ def process_rhohv(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + either a combination of signal and noise + ("ShhADU" and "SvvADU") or ("ShhADUu" and "SvvADUu"), and, + ("sNADUh" and "sNADUv"), or + the power signal + ("sPhhADU" and "sPvvADU") or ("sPhhADUu" and "sPvvADUu"), and + optionally ("sRhoHV" or "sRhoHVu") + subtract_noise : Bool If True noise will be subtracted from the signal radar_list : list of spectra objects @@ -1938,7 +2033,9 @@ def process_rhohv(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field "RhoHV" + or "uRhoHV" depending on the provided datatypes + ind_rad : int radar index @@ -2036,14 +2133,18 @@ def process_Doppler_velocity(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "sdBZ" or "sdBZv" or "sdBuZ" or "sdBuZv" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "V" if "sdBZ" was provided + "Vv" if "sdBZv" was provided + "Vu" if "sdBuZ" was provided ind_rad : int radar index @@ -2095,14 +2196,18 @@ def process_Doppler_width(procstatus, dscfg, radar_list=None): data set configuration. Accepted configuration keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, must contain, + "sdBZ" or "sdBZv" or "sdBuZ" or "sdBuZv" radar_list : list of spectra objects Optional. list of spectra objects Returns ------- new_dataset : dict - dictionary containing the output + dictionary containing the output field + "W" if "sdBZ" was provided + "Wv" if "sdBZv" was provided + "Wu" if "sdBuZ" was provided ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_timeseries.py b/src/pyrad_proc/pyrad/proc/process_timeseries.py index cdd861087..47f21344c 100644 --- a/src/pyrad_proc/pyrad/proc/process_timeseries.py +++ b/src/pyrad_proc/pyrad/proc/process_timeseries.py @@ -42,7 +42,8 @@ def process_point_measurement(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type where we want to extract the point measurement + The data type where we want to extract the point measurement, + can be any datatype supported by pyrad and available in the data single_point : boolean. Dataset keyword if True only one gate per radar volume is going to be kept. Otherwise all gates within the azimuth and elevation tolerance @@ -283,7 +284,8 @@ def process_multiple_points(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type where we want to extract the point measurement + The data type where we want to extract the point measurement, + can be any datatype supported by pyrad and available in the data truealt : boolean. Dataset keyword if True the user input altitude is used to determine the point of interest. @@ -470,7 +472,8 @@ def process_qvp(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type where we want to extract the point measurement + The data type where we want to extract the point measurement, + can be any datatype supported by pyrad and available in the data angle : int or float If the radar object contains a PPI volume, the sweep number to use, if it contains an RHI volume the elevation angle. @@ -504,7 +507,7 @@ def process_qvp(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the QVP and a keyboard stating whether the + dictionary containing the QVP and a keyword stating whether the processing has finished or not. ind_rad : int radar index @@ -629,7 +632,8 @@ def process_rqvp(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type where we want to extract the point measurement + The data type where we want to extract the point measurement, + can be any datatype supported by pyrad and available in the data hmax : float The maximum height to plot [m]. Default 10000. hres : float @@ -663,7 +667,7 @@ def process_rqvp(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the QVP and a keyboard stating whether the + dictionary containing the QVP and a keyword stating whether the processing has finished or not. ind_rad : int radar index @@ -788,7 +792,8 @@ def process_evp(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type where we want to extract the point measurement + The data type where we want to extract the point measurement, + can be any datatype supported by pyrad and available in the data lat, lon : float latitude and longitude of the point of interest [deg] latlon_tol : float @@ -824,7 +829,7 @@ def process_evp(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the EVP and a keyboard stating whether the + dictionary containing the EVP and a keyword stating whether the processing has finished or not. ind_rad : int radar index @@ -958,7 +963,8 @@ def process_svp(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type where we want to extract the point measurement + The data type where we want to extract the point measurement, + can be any datatype supported by pyrad and available in the data angle : int or float If the radar object contains a PPI volume, the sweep number to use, if it contains an RHI volume the elevation angle. @@ -1001,7 +1007,7 @@ def process_svp(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the svp and a keyboard stating whether the + dictionary containing the svp and a keyword stating whether the processing has finished or not. ind_rad : int radar index @@ -1143,7 +1149,8 @@ def process_time_height(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type where we want to extract the point measurement + The data type where we want to extract the point measurement, + can be any datatype supported by pyrad and available in the data lat, lon : float latitude and longitude of the point of interest [deg] latlon_tol : float @@ -1169,7 +1176,7 @@ def process_time_height(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the QVP and a keyboard stating whether the + dictionary containing the QVP and a keyword stating whether the processing has finished or not. ind_rad : int radar index @@ -1284,7 +1291,8 @@ def process_ts_along_coord(procstatus, dscfg, radar_list=None): data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword - The data type where we want to extract the time series + The data type where we want to extract the time series, + can be any datatype supported by pyrad and available in the data mode : str coordinate to extract data along. Can be ALONG_AZI, ALONG_ELE or ALONG_RNG @@ -1305,7 +1313,7 @@ def process_ts_along_coord(procstatus, dscfg, radar_list=None): Returns ------- new_dataset : dict - dictionary containing the data and a keyboard stating whether the + dictionary containing the data and a keyword stating whether the processing has finished or not. ind_rad : int radar index diff --git a/src/pyrad_proc/pyrad/proc/process_traj.py b/src/pyrad_proc/pyrad/proc/process_traj.py index c5128207f..1e34b7307 100755 --- a/src/pyrad_proc/pyrad/proc/process_traj.py +++ b/src/pyrad_proc/pyrad/proc/process_traj.py @@ -64,7 +64,8 @@ def process_trajectory(procstatus, dscfg, radar_list=None, trajectory=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any datatype supported by pyrad + and available in the radar data radar_list : list of Radar objects Optional. list of radar objects trajectory : Trajectory object @@ -115,7 +116,8 @@ def process_traj_trt(procstatus, dscfg, radar_list=None, trajectory=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any datatype supported by pyrad + and available in the radar data time_tol : float. Dataset keyword tolerance between reference time of the radar volume and that of the TRT cell [s]. Default 100. @@ -279,7 +281,8 @@ def process_traj_trt_contour(procstatus, dscfg, radar_list=None, trajectory=None data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any datatype supported by pyrad + and available in the radar data time_tol : float. Dataset keyword tolerance between reference time of the radar volume and that of the TRT cell [s]. Default 100. @@ -355,7 +358,8 @@ def process_traj_lightning(procstatus, dscfg, radar_list=None, trajectory=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any datatype supported by pyrad + and available in the radar data data_is_log : dict. Dataset keyword Dictionary specifying for each field if it is in log (True) or linear units (False). Default False @@ -619,7 +623,8 @@ def process_traj_atplane(procstatus, dscfg, radar_list=None, trajectory=None): data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword - The input data types + The input data types, can be any datatype supported by pyrad + and available in the radar data data_is_log : dict. Dataset keyword Dictionary specifying for each field if it is in log (True) or linear units (False). Default False @@ -886,7 +891,8 @@ def process_traj_antenna_pattern(procstatus, dscfg, radar_list=None, trajectory= 2 post-processing dscfg : dictionary of dictionaries datatype : list of string. Dataset keyword - The input data types + The input data types, can be any datatype supported by pyrad + and available in the radar data antennaType : str. Dataset keyword Type of antenna of the radar we want to get the view from. Can be AZIMUTH, ELEVATION, LOWBEAM, HIGHBEAM