Source code for pycaps.derive.derive_functions

import numpy as np
from scipy.stats import nanmean

from pycaps.derive.dropsizedist import RainPSD, SnowPSD, HailPSD, GraupelPSD, PSDCollection

# Some more or less universal variables
density_rain = 1000.
dielectric_water = 0.93
dielectric_ice = 0.176
Rd = 287.        # Gas constant for dry air
Rv = 461.5       # Gas constant for water vapor
epsilon = Rd / Rv  
Cp = 1004        # Specific heat of dry air at constant pressure
L = 2.5e6        # Latent heat of vaporization for water
kappa = Rd/Cp
p0 = 100000.0    # Base state pressure in Pa
g = 9.806


[docs]def reflectivity_lin(**kwargs): """ Compute reflectivity from a Lin microphysics scheme Args: **kwargs: Must contain 'qv' (water vapor mixing ratio in kg/kg), 'qr' (rain mixing ratio in kg/kg), 'qs' (snow mixing ratio in kg/kg), 'qh' (hail mixing ratio in kg/kg), 'n0rain' (n0 for the rain distribution in 1/m^4), 'n0snow' (n0 for the snow distribution in 1/m^4), 'n0hail' (n0 for the hail distribution in 1/m^4), 'rhosnow' (assumed density for snow in kg/m^3), 'rhoice' (assumed density for ice in kg/m3), 'rhohail' (assumed density for hail in kg/m^3), 'p' (air pressure in Pa), and 'pt' or 't' (potential temperature or temperature, either in K). Any other keyword arguments are ignored. Returns: Radar reflectivity in dBZ """ if 't' in kwargs: temp = kwargs['t'] else: temp = temp_from_theta(**kwargs) density = air_density(t=temp, **kwargs) is_wet = np.where(temp > 273.15, 1, 0) Zr_factor = 720 * 1e18 / ((np.pi * density_rain) ** 1.75 * kwargs['n0rain'] ** 0.75) Zs_wet_factor = 720 * 1e18 / ((np.pi * kwargs['rhosnow']) ** 1.75 * kwargs['n0snow'] ** 0.75) Zs_dry_factor = 720 * 1e18 * dielectric_ice * kwargs['rhosnow'] ** 0.25 / \ (np.pi ** 1.75 * dielectric_water * kwargs['n0snow'] ** 0.75 * kwargs['rhoice'] ** 2) Zh_factor = 720 * 1e18 / ((np.pi * kwargs['rhohail']) ** 1.75 * kwargs['n0hail'] ** 0.75) Zr = Zr_factor * (density * kwargs['qr']) ** 1.75 Zs = Zs_wet_factor * is_wet * (density * kwargs['qs']) ** 1.75 + Zs_dry_factor * (1 - is_wet) * (density * kwargs['qs']) ** 1.75 Zh = (Zh_factor * (density * kwargs['qh']) ** 1.75) ** 0.95 dBZ = 10 * np.log10(Zr + Zs + Zh) if len(dBZ.shape) == 0: if np.isnan(dBZ): dBZ = np.float64(0.) else: dBZ[np.isnan(dBZ)] = 0. dBZ = np.maximum(dBZ, 0.) return dBZ
[docs]def reflectivity_dualmom(**kwargs): """ Compute reflectivity from a dual-moment microphysics scheme. Args: **kwargs: Must contain 'qv' (water vapor mixing ratio in kg/kg), 'qr' (rain mixing ratio in kg/kg), 'qs' (snow mixing ratio in kg/kg), 'qh' (hail mixing ratio in kg/kg), 'qg' (graupel mixing ratio in kg/kg), 'nr' (total number concentration of rain in #/m^3), 'ns' (total number concentration of snow in #/m^3), 'nh' (total number concentration of hail in #/m^3), 'ng' (total number concentration of graupel in #/m^3), 'p' (air pressure in Pa), and 'pt' or 't' (potential temperature or temperature, either in K). Any other keyword arguments are ignored. Returns: Radar reflectivity in dBZ """ rp = RainPSD(kwargs['qr'], num_concentration=kwargs['nr']) sp = SnowPSD(kwargs['qs'], num_concentration=kwargs['ns']) hp = HailPSD(kwargs['qh'], num_concentration=kwargs['nh']) gp = GraupelPSD(kwargs['qg'], num_concentration=kwargs['ng']) pc = PSDCollection(rain=rp, snow=sp, hail=hp, graupel=gp) density = air_density(**kwargs) return pc.reflectivity(density, method='jzx')
[docs]def updraft_helicity(z_bottom=2000., z_top=5000., **kwargs): """ Calculates updraft helicity (m^2/s^2) given 3d winds and physical height. u,v,w should have dimensions of [nz,ny,nx] dx and dy are scalars zp is a matrix of dimensions of [nz,ny,nx] Args: u: u-wind (east-west wind component) v: v-wind (north-south wind component) w: w-wind (vertical wind component) dx: horizontal (east-west) grid spacing in meters dy: horizontal (north-south) grid spacing in meters zp: physical height z_top: Bottom of layer over which to calculate updraft helicity (OPTIONAL: default 2000m) z_bottom: Top of layer over which to calculate updraft helicity (OPTIONAL: default 5000m) Returns: The updraft helicity in m^2/s^2 in the layer z_bottom to z_top. """ #Initializing Arrays nz, ny, nx = kwargs['u'].shape #calculate vorticity vort = vert_vorticity(**kwargs) #Correct zp --> zp_scalar for ARPS data zps = zp_to_scalar(kwargs['zp']) dz = np.zeros(zps.shape) for kz in xrange(0, nz-1, 1): dz[kz,:,:] = zps[kz+1,:,:] - zps[kz,:,:] # dz = zp[1:] - zp[:(nz - 1)] #Integrate ONLY over the desired layer by creating a mask array (1 = in layer, 0 = not in layer) in_layer = np.zeros(kwargs['zp'].shape) in_layer[(zps > z_bottom) & (zps < z_top)] = 1.0 #Integrate ONLY where w > 0.0: w = np.where(kwargs['w'] > 0.0, kwargs['w'], 0.0) # w[w < 0] = 0. uh_pointwise = w * vort * dz * in_layer UH = np.sum(uh_pointwise, axis=0) return UH
#------------------------------------------------------------------------# # Calculate hydrometeor classification # [Park et al. algorithm, modified & ported to ARPS by Bryan Putnam] # Updated by Bryan Putnam -- 9 Mar. 2015: Added additional classifiers # to prevent speckling and erroneous classification of near-surface # snow in situations where snow should not be there (e.g. summer). # # Updated 9 Jun. 2015 -- Excluded boundary points from snow level calculation. # # Required arguments: Name [Contents] # (From grdbas file) # x [nx] # y [ny] # (From gridtilt radar data) # ref [Z] # zdr [Zdr] # rhv [Rho-hv] # vel [Vr] # hgt [Radar height ('outgridtilthgt')] # (From ARPS history file) # pt [3D Potential temprature ('pt')] # p [3D Pressure in Pa ('p')] # qs [3D Snow mixing ratio ('qs')] # zp [3D gridpoint physical height in meters ('zp')] # # The hydrometeor classifications are: # (0) ground clutter/AP; (1) biological scatterers [birds, bugs, etc.]; # (2) snow; (3) melting snow; (4) ice crystals; (5) graupel; # (6) large raindrops; (7) rain; (8) heavy rain; (9) rain/hail mixture; # (10) three-body scatter spike [hail spike] # #-------------------------------------------------------------------------# # Note that x,y may apply to model grid or azim/gate, the variables are # just place holders for a two-dimensional data structure #-------------------------------------------------------------------------#
[docs]def hydrometeor_classify(qc_opt,x,y,ref,zdr,rhv,vel,pt,p,qs,hgt,zp): # import np # from ns_modules import create_progress_bar, update_progress_bar #define membership function tables Z_table = np.array([[15.0, 5.0, 5.0, 25.0, 0.0, 25.0, 20.0, 5.0, 40.0, 45.0, -5.0], [20.0, 10.0, 10.0, 30.0, 5.0, 35.0, 25.0, 10.0, 45.0, 50.0, 0.0], [70.0, 20.0, 35.0, 40.0, 20.0, 50.0, 45.0, 45.0, 55.0, 75.0, 10.0], [80.0, 30.0, 40.0, 50.0, 25.0, 55.0, 50.0, 50.0, 60.0, 80.0, 25.0]]) rhv_table = np.array([[0.5, 0.3, 0.95, 0.88, 0.95, 0.9, 0.92, 0.95, 0.92, 0.85, 0.0], [0.6, 0.5, 0.98, 0.92, 0.98, 0.97, 0.95, 0.97, 0.95, 0.9, 0.28], [0.9, 0.8, 1.0, 0.95, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.77], [0.95, 0.83, 1.01, 0.985, 1.01, 1.01, 1.01, 1.01, 1.01, 1.01, 0.92]]) #Zdr table is made on the fly, after data is read in weights = np.array([[0.2, 0.4, 1.0], [0.4, 0.6, 1.0], [1.0, 0.8, 0.6], [0.6, 0.8, 1.0], [1.0, 0.6, 0.4], [0.8, 1.0, 0.4], [0.8, 1.0, 0.6], [1.0, 0.8, 0.6], [1.0, 0.8, 0.6], [1.0, 0.8, 0.6], [1.0, 0.2, 1.0]]) nx = x ny = y refdata = ref zdrdata = zdr rhvdata = rhv veldata = vel heightdata = hgt thetadata = pt pdata = p phys_height = zp nz = len(phys_height[:,0,0]) snow_lev_mid = 0.0 #snow_lev_low = 100000000.0 found_snow_lev = 0 found_frz_lev = 0 hca = np.zeros((nx, ny)) # #Find freezing level and lowest level of snow frz_lev = np.zeros((nx, ny)) - 999. snow_lev = np.zeros((nx, ny)) - 999. air_temp_C = temp_from_theta(pt=pt, p=p) - 273.15 # pbar1 = create_progress_bar(nz, steps = 20, pbtext = 'Finding freezing/snow levels: ') for k in xrange(nz): # update_progress_bar(pbar1, k) snow_lev = np.where((qs[k] > 0) & (snow_lev == -999.), phys_height[k], snow_lev) frz_lev = np.where((air_temp_C[k] < 0.) & (frz_lev == -999.), phys_height[k], frz_lev) # pbar2 = create_progress_bar(nx, steps = 20, pbtext = 'Calculating physical height: ') # for i in xrange(nx): # update_progress_bar(pbar2, i) # for j in xrange(ny): # frz_lev[i,j] = phys_height[frz_lev_2D[i,j], i, j] # snow_lev_2D[i,j] = np.where(snow_lev_2D[i,j] > -1, phys_height[snow_lev_2D[i,j], i, j], 100000.0) # snow_lev_low = np.amin(snow_lev_2D[1:-1,1:-1]) # Exclude boundary points to avoid potential error. # This fix is unnecessary in most cases. # pbar3 = create_progress_bar(nx, steps = 20, pbtext = 'Running HCA: ') # Produce f1, f2, and f3 (Zdr membership functions) as arrays: f1_array = (-0.5 + 2.5e-3 * refdata + 7.5e-4 * (refdata**2)) f2_array = (0.68 - 4.81e-2 * refdata + 2.92e-3 * (refdata**2)) f3_array = (1.42 + 6.67e-2 * refdata + 4.85e-4 * (refdata**2)) # Reflectivity (Zhh) weights: prob_array = np.zeros((11, nx, ny)) for s in xrange(11): prob_array[s,:,:] = ((-1/(Z_table[3,s] - Z_table[2,s]))*(refdata - Z_table[3,s])*weights[s,0]) prob_array[s,:,:] = np.where(((refdata <= Z_table[0,s]) | (refdata >= Z_table[3,s])), weights[s,0]*0.0, prob_array[s,:,:]) prob_array[s,:,:] = np.where(((refdata >= Z_table[1,s]) & (refdata <= Z_table[2,s])), weights[s,0]*1.0, prob_array[s,:,:]) prob_array[s,:,:] = np.where(((refdata > Z_table[0,s]) & (refdata < Z_table[1,s])), ((1/(Z_table[1,s] - Z_table[0,s]))*(refdata - Z_table[0,s])*weights[s,0]), prob_array[s,:,:]) # Cross-pol correlation coefficient (rhv) weights: prob_rhv_array = np.zeros((11, nx, ny)) for s in xrange(11): prob_rhv_array[s,:,:] = (-1/(rhv_table[3,s] - rhv_table[2,s]))*(rhvdata - rhv_table[3,s])*weights[s,2] prob_rhv_array[s,:,:] = np.where(((rhvdata <= rhv_table[0,s]) | (rhvdata >= rhv_table[3,s])), weights[s,2]*0.0, prob_rhv_array[s,:,:]) prob_rhv_array[s,:,:] = np.where(((rhvdata >= rhv_table[1,s]) & (rhvdata <= rhv_table[2,s])), weights[s,2]*1.0, prob_rhv_array[s,:,:]) prob_rhv_array[s,:,:] = np.where(((rhvdata > rhv_table[0,s]) & (rhvdata < rhv_table[1,s])), (1/(rhv_table[1,s] - rhv_table[0,s]))*(rhvdata - rhv_table[0,s])*weights[s,2], prob_rhv_array[s,:,:]) # Sum up Zhh and rhv contributions: prob_array = prob_array + prob_rhv_array for i in xrange(nx): # update_progress_bar(pbar3, i) for j in xrange(ny): # Handle the clear-air case first -- saves computing time. if refdata[i,j] <= 0.0: hca[i,j] = 0. else: # probability array -- contains probabilities for 11 different hydrometeor classes prob = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0] # Bring in Zhh and rhv weights (from outside loop) for s in xrange(11): prob[s] = prob_array[s, i, j] f1 = f1_array[i,j] f2 = f2_array[i,j] f3 = f3_array[i,j] # Zdr table calculation: Zdr_table = np.array([[-1.0, 0.0, -0.3, 0.5, 0.0, -0.3, f2 - 0.3, f1 - 0.3, f1 - 0.3, -0.3, -5.9], [-2.0, 2.0, 0.0, 1.0, 0.3, 0.0, f2, f1, f1, 0.0, -2.2], [ 1.0, 10.0, 0.3, 2.0, 3.0, 0.3, f3, f2, f2, f1, 8.0], [ 2.0, 12.0, 0.6, 3.0, 3.3, 0.6, f3 + 1, f2 + 0.5, f2 + 0.5, f1 + 0.5, 12.0]]) for s in xrange(11): #For each hydrometeor class if(zdrdata[i,j] <= Zdr_table[0,s] or zdrdata[i,j] >= Zdr_table[3,s]): prob[s] = prob[s] + 0.0*weights[s,1] elif(zdrdata[i,j] >= Zdr_table[1,s] and zdrdata[i,j] <= Zdr_table[2,s]): prob[s] = prob[s] + 1.0*weights[s,1] elif(zdrdata[i,j] > Zdr_table[0,s] and zdrdata[i,j] < Zdr_table[1,s]): prob[s] = (prob[s] + (1/(Zdr_table[1,s] - Zdr_table[0,s]))*(zdrdata[i,j] - Zdr_table[0,s])*weights[s,1]) else: prob[s] = (prob[s] + (-1/(Zdr_table[3,s] - Zdr_table[2,s]))*(zdrdata[i,j] - Zdr_table[3,s])*weights[s,1]) #add weight to all of the probabilities for s in xrange(11): prob[s] = prob[s]/sum(weights[s]) #end for prob_array[:, i, j] = prob #Apply Zdr constraint from park et al. 2009 within the loop: if(zdrdata[i, j] < (f2 - 0.3)): prob[6] = 0.0 #end for #end for #Additional constraints from Park et al. (2009) implemented using np.where (for speed) prob_array[0,:,:] = np.where((veldata > 1.0), 0.0, prob_array[0,:,:]) prob_array[1,:,:] = np.where((rhvdata > 0.97), 0.0, prob_array[1,:,:]) prob_array[2,:,:] = np.where((zdrdata > 2.0), 0.0, prob_array[2,:,:]) prob_array[3,:,:] = np.where(((refdata < 20.0) | (zdrdata < 0.0)), 0.0, prob_array[3,:,:]) prob_array[4,:,:] = np.where((refdata > 40.0), 0.0, prob_array[4,:,:]) prob_array[5,:,:] = np.where(((refdata < 10.0) | (refdata > 60.0)), 0.0, prob_array[5,:,:]) #Zdr condition already handled in earlier loop. prob_array[7,:,:] = np.where((refdata > 50.0), 0.0, prob_array[7,:,:]) prob_array[8,:,:] = np.where((refdata < 30.0), 0.0, prob_array[8,:,:]) prob_array[9,:,:] = np.where((refdata < 40.0), 0.0, prob_array[9,:,:]) #Calculate the level midway between the freezing line and lowest snow (as an array) #to help differentiate areas of likely dry/wet snow as it falls and melt. snow_lev_mid = (frz_lev + snow_lev) / 2.0 #If qc_opt is true (> 0), QC has already been done on data to remove ground #clutter, biological scatters, three-body scatter spikes. prob_array[0,:,:] = np.where(qc_opt > 0, 0.0, prob_array[0,:,:]) prob_array[1,:,:] = np.where(qc_opt > 0, 0.0, prob_array[0,:,:]) prob_array[10,:,:] = np.where(qc_opt > 0, 0.0, prob_array[10,:,:]) #Series of check to remove categories that likely do/don't exist based #on temperature and snow profiles: prob_array[2,:,:] = np.where((heightdata < snow_lev), 0.0, prob_array[2,:,:]) prob_array[3,:,:] = np.where((heightdata < snow_lev), 0.0, prob_array[3,:,:]) prob_array[4,:,:] = np.where((heightdata < snow_lev), 0.0, prob_array[4,:,:]) prob_array[5,:,:] = np.where((heightdata < snow_lev), 0.0, prob_array[5,:,:]) prob_array[0,:,:] = np.where(((heightdata > snow_lev) & (heightdata < snow_lev_mid)), 0.0, prob_array[0,:,:]) prob_array[1,:,:] = np.where(((heightdata > snow_lev) & (heightdata < snow_lev_mid)), 0.0, prob_array[1,:,:]) prob_array[2,:,:] = np.where(((heightdata > snow_lev) & (heightdata < snow_lev_mid)), 0.0, prob_array[2,:,:]) prob_array[4,:,:] = np.where(((heightdata > snow_lev) & (heightdata < snow_lev_mid)), 0.0, prob_array[4,:,:]) prob_array[8,:,:] = np.where(((heightdata > snow_lev) & (heightdata < snow_lev_mid)), 0.0, prob_array[8,:,:]) prob_array[0,:,:] = np.where(((heightdata > snow_lev_mid) & (heightdata < frz_lev)), 0.0, prob_array[0,:,:]) prob_array[1,:,:] = np.where(((heightdata > snow_lev_mid) & (heightdata < frz_lev)), 0.0, prob_array[1,:,:]) prob_array[4,:,:] = np.where(((heightdata > snow_lev_mid) & (heightdata < frz_lev)), 0.0, prob_array[4,:,:]) prob_array[7,:,:] = np.where(((heightdata > snow_lev_mid) & (heightdata < frz_lev)), 0.0, prob_array[7,:,:]) prob_array[8,:,:] = np.where(((heightdata > snow_lev_mid) & (heightdata < frz_lev)), 0.0, prob_array[8,:,:]) prob_array[0,:,:] = np.where((heightdata > frz_lev), 0.0, prob_array[0,:,:]) prob_array[1,:,:] = np.where((heightdata > frz_lev), 0.0, prob_array[1,:,:]) prob_array[3,:,:] = np.where((heightdata > frz_lev), 0.0, prob_array[3,:,:]) prob_array[6,:,:] = np.where((heightdata > frz_lev), 0.0, prob_array[6,:,:]) prob_array[7,:,:] = np.where((heightdata > frz_lev), 0.0, prob_array[7,:,:]) prob_array[8,:,:] = np.where((heightdata > frz_lev), 0.0, prob_array[8,:,:]) #print prob_array hca = np.argmax(prob_array, axis=0) + 1 hca[refdata <= 0.0] = 0.0 # for i in range(0, nx, 1): # for j in range(0, ny, 1): # if refdata[i,j] <= 0.0: # pass # else: # hca[i,j] = np.argmax(prob_array[:, i, j]) + 1 #have to add one since python # #has an index start of 0 return hca
#------------------------------------------------------------------------#
[docs]def calc_mesh(**kwargs): """ Calculated the Maximum Estimated Size of Hail (MESH) Witt et al. (1998) Args: **kwargs: 'dBZ' (the reflectivity), 'pt' (potential temperature), 'p' (the pressure),'zp' (the physical height). dBZ,pt,p,and zp must be numpy arrays with 3 dimensions and the axes are assumed to be in (NZ, NY, NX) order. Any kwargs not 'dBZ','pt','p','zp' will be ignored Returns: The numpy matrix of MESH (mm). The arrays has two dimensions (NY,NX) """ dBZ = kwargs['dBZ'] zp = zp_to_scalar(kwargs['zp']) dz = np.zeros(dBZ.shape) dz[1:] = zp[1:] - zp[:-1] #--- Computing the Reflectivity Weighting Factor W_Z = np.where(dBZ>=40,(dBZ-40.0)/10.0,0) W_Z[dBZ>=50.] = 1. #--- HKE Term hke = np.where(dBZ>=40,0.000005 * (10.0**(0.084 * dBZ)) * W_Z,0) #--- Calculating the Freezing Layer Height h_0 = isotherm_hgt(isotherm = 0,convert_scalar=False,pt=kwargs['pt'],p=kwargs['p'],zp = zp) #--- Calculating the -20C Isotherm Height h_neg20 = isotherm_hgt(isotherm = -20,convert_scalar=False,pt=kwargs['pt'],p=kwargs['p'],zp = zp) #--- Temperature Based Weighting Function W_T = np.where(zp[:] >= np.amax(h_0,axis=0),(zp - h_0)/(h_neg20 - h_0),0) W_T = np.where(zp[:] >= h_neg20,1,W_T) #--- SHI and MESH shi_2D = np.sum(0.1 * (W_T * hke * dz),axis=0) shi_2D[shi_2D<0] = 0 mesh_2D = 2.54 * (shi_2D ** (0.5)) return mesh_2D
#------------------------------------------------------------------------#
[docs]def air_density(**kwargs): """ Calculates air density for given pressure and potential temperature arrays of arbitrary shape. Args: **kwargs: Must contain 'p' (pressure in Pa) and 'pt' or 't' (potential temperature or temperature, either in K). May also contain 'qv' (water vapor mixing ratio in kg/kg). Any arguments other than these are ignored. Returns: Air density in kg/m^3 as an array of the same shape as p and pt """ if 'qv' in kwargs: Tv = virt_temp(**kwargs) elif 't' in kwargs: Tv = kwargs['t'] else: Tv = temp_from_theta(**kwargs) #Calculate air density using ideal gas law rho_air = kwargs['p'] / (Rd * Tv) return rho_air
[docs]def zp_to_scalar(zp): """ Converts ARPS physical height (zp) to height of scalar values (zps). Args: zp: ARPS physical height array (dimensions: (nz, ny, nx)) Returns zp_scalar: zp converted to the height of scalar values (similar to ARPS variable zps) """ zp_scalar = np.zeros(zp.shape) # Top layer is non-physical and cannot be corrected; leave it the same. zp_scalar[(zp.shape[0] - 1)] = zp[(zp.shape[0] - 1)] for index in range(0, zp.shape[0] - 1, 1): zp_scalar[index] = ((zp[index] + zp[index + 1]) / 2.0) return zp_scalar
[docs]def vert_vorticity(**kwargs): """ Computes vertical vorticity. Args: **kwargs: Must contain 'u' (u wind component), 'v' (v wind component), 'dx' (grid spacing in the x direction), and 'dy' (grid spacing in the y direction). u and v must be numpy arrays with 2 or more dimensions, and the axes are assumed to be in (..., NY, NX) order. dx and dy must be scalar floats. Any kwargs not 'u', 'v', 'dx', or 'dy' are ignored. Returns: A numpy array containing vertical vorticity in 1/s. The array is the same shape as the input arrays. """ if 'u' in kwargs: u = kwargs['u'] if 'v' in kwargs: v = kwargs['v'] if 'grid_spacing' in kwargs: dx = kwargs['grid_spacing'] dy = kwargs['grid_spacing'] elif 'dx' in kwargs and 'dy' in kwargs: dx = kwargs['dx'] dy = kwargs['dy'] ndim = len(u.shape) if len(u.shape) < 2 or len(v.shape) < 2: raise ValueError("u and v arrays in vert_vorticity() must have at least two dimensions.") spacings = [dy, dx] for idx in xrange(ndim - 3, -1, -1): if u.shape[idx] > 1: spacings = [ 1 ] + spacings nsqdim = len(u.squeeze().shape) dvdx = np.gradient(v.squeeze(), *spacings)[nsqdim - 1] dudy = np.gradient(u.squeeze(), *spacings)[nsqdim - 2] if len(dvdx.shape) == 2: dvdx = dvdx[np.newaxis] dudy = dudy[np.newaxis] return dvdx - dudy
[docs]def horiz_convergence(**kwargs): """ Computes horizontal convergence. Args: **kwargs: Must contain 'u' (u wind component), 'v' (v wind component), 'dx' (grid spacing in the x direction), and 'dy' (grid spacing in the y direction). u and v must be numpy arrays with 2 or more dimensions, and the axes are assumed to be in (..., NY, NX) order. dx and dy must be scalar floats. Any kwargs not 'u', 'v', 'dx', or 'dy' are ignored. Returns: A numpy array containing horizontal convergence in 1/s. The array is the same shape as the input arrays. """ if 'u' in kwargs: u = kwargs['u'] if 'v' in kwargs: v = kwargs['v'] if 'grid_spacing' in kwargs: dx = kwargs['grid_spacing'] dy = kwargs['grid_spacing'] elif 'dx' in kwargs and 'dy' in kwargs: dx = kwargs['dx'] dy = kwargs['dy'] ndim = len(u.shape) if ndim < 2: return spacings = [dy, dx] for idx in xrange(ndim - 3, -1, -1): if u.shape[idx] > 1: spacings = [ 1 ] + spacings nsqdim = len(u.squeeze().shape) dudx = np.gradient(u.squeeze(), *spacings)[nsqdim - 1] dvdy = np.gradient(v.squeeze(), *spacings)[nsqdim - 2] return -(dudx + dvdy)
[docs]def finite_diff(data, coords, axis=0): """ Compute a finite difference along a given axis. Args: data: A numpy array of which to take the finite difference. coords: The coordinates for the axis along which to take the derivative, specified as a numpy array. axis: The axis along which to take the finite difference; optional, defaults to 0. Returns: An array of the same shape as the data array containing the finite difference. """ def cta(coord): return tuple(([slice(None)] * axis) + [coord]) l_slc = slice(0, -2) c_slc = slice(1, -1) r_slc = slice(2, None) derivative = np.zeros(data.shape) alpha = (coords[cta(r_slc)] - coords[cta(c_slc)]) / (coords[cta(c_slc)] - coords[cta(l_slc)]) derivative[cta(c_slc)] = (data[cta(r_slc)] + (alpha - 1) * data[cta(c_slc)] - alpha * data[cta(l_slc)]) / (2 * alpha * (coords[cta(c_slc)] - coords[cta(l_slc)])) derivative[cta(0)] = (data[cta(1)] - data[cta(0)]) / (coords[cta(1)] - coords[cta(0)]) derivative[cta(-1)] = (data[cta(-1)] - data[cta(-2)]) / (coords[cta(-1)] - coords[cta(-2)]) return derivative
[docs]def vort_components(**kwargs): """ Compute all components of vorticity on a 3-dimensional grid. Args: **kwargs: Must contain 'u', 'u', 'w', 'x', 'y', and 'z'. u, v, and w are the zonal, meridional, and vertical components of wind, respectively, and must be 3-dimensional numpy arrays. The order of the axes is assumed to be (NZ, NY, NX). x and y should be 1-dimensional arrays containing the x and y coordinates for the domain, and z should be 3-dimensional arrays containing the vertical coordinates for the domain (e.g. 'zp' from an ARPS history file). All keywords that are not the aforementioned are ignored. Returns: The 3 components of vorticity as a numpy record array. The records are 'xvort' (vorticity in the x direction), 'yvort' (vorticity in the y direction), and 'zvort' (vertical vorticity). The returned array is the same shape as the input arrays. """ if 'vg_tensor' in kwargs: vg_tens = kwargs['vg_tensor'] else: vg_tens = vg_tensor(**kwargs) vort3d = np.empty(vg_tens.shape, dtype=[('zvort', np.float32), ('yvort', np.float32), ('xvort', np.float32)]) vort3d['zvort'] = vg_tens['dvdx'] - vg_tens['dudy'] vort3d['yvort'] = vg_tens['dudz'] - vg_tens['dwdx'] vort3d['xvort'] = vg_tens['dwdy'] - vg_tens['dvdz'] return vort3d
[docs]def vg_tensor(**kwargs): """ Compute the components of the velocity gradient tensor. Args: **kwargs: Must contain 'u', 'u', 'w', 'x', 'y', and 'z'. u, v, and w are the zonal, meridional, and vertical components of wind, respectively, and must be 3-dimensional numpy arrays. The order of the axes is assumed to be (NZ, NY, NX). x and y should be 1-dimensional arrays containing the x and y coordinates for the domain, and z should be 3-dimensional arrays containing the vertical coordinates for the domain (e.g. 'zp' from an ARPS history file). All keywords that are not the aforementioned are ignored. Returns: The 9 components of the velocity gradient tensor as a numpy record array. The records are 'dudx', 'dudy', 'dudz' (change in u wind in the x, y, and z directions), 'dvdx', 'dvdy', 'dvdz' (change in v wind in the x, y, and z directions), 'dwdx', 'dwdy', and 'dwdz' (change in w wind in the x, y, and z directions). The returned array is the same shape as the input arrays. """ dtype = [] for wc in ['u', 'v', 'w']: for ax in ['x', 'y', 'z']: dtype.append(("d%sd%s" % (wc, ax), np.float32)) vg_tensor = np.empty(kwargs['u'].shape, dtype=dtype) nz, ny, nx = kwargs['z'].shape kwargs['x'] = np.atleast_3d(kwargs['x']).reshape((1, 1, nx)) kwargs['y'] = np.atleast_3d(kwargs['y']).reshape((1, ny, 1)) for wc in ['u', 'v', 'w']: for ax_no, ax in enumerate(['z', 'y', 'x']): vg_tensor["d%sd%s" % (wc, ax)] = finite_diff(kwargs[wc], kwargs[ax], axis=ax_no) return vg_tensor
[docs]def pmsl(**kwargs): """ Compute pressure at mean sea level, [check my assumption]. Args: **kwargs: Must contain 'z' (height in meters of each point above sea level), 'p' (pressure in Pa at each point), 'pt' or 't' (potential temperature or temperature, either in K), and 'qv' (water vapor mixing ratio in kg/kg). Returns: Pressure at mean sea level in Pa as a numpy array of the same shape as the input arrays. """ p = kwargs['p'] z = kwargs['z'] T_v = virt_temp(**kwargs) return p * np.exp((g * z)/(Rd * T_v))
[docs]def temp_from_theta(**kwargs): """ Get temperature from potential temperature. Args: **kwargs: Must contain 'p' (pressure in Pa) and 'pt' (potential temperature in K) either as scalars or numpy arrays. Any other keyword arguments are ignored. Returns: Temperature in K """ pres = kwargs['p'] pt = kwargs['pt'] return pt * (pres / 100000.) ** (2. / 7.)
[docs]def temp_from_vapr(**kwargs): """ Get temperature from vapor pressure, using the Clausius-Clapeyron equation. Args: **kwargs: Must contain one of two sets of arguments. The first is 'e' (vapor pressure in Pa). The second is both 'qv' (water vapor mixing ratio in kg/kg) and 'p' (pressure in Pa). Any arguments other than these are ignored. Returns: Temperature in K. """ if 'e' in kwargs: vapor_pres = kwargs['e'] else: vapor_pres = vapr_from_qv(**kwargs) return 1. / (1. / 273.15 - Rv / L * np.log(vapor_pres / 611.))
[docs]def dewp_from_qv(**kwargs): """ Get dewpoint temperature from water vapor mixing ratio, using the Clausius-Clapeyron equation. Args: **kwargs: Must contain 'qv' (water vapor mixing ratio in kg/kg) and 'p' (pressure in Pa). Any other keyword arguments are ignored. Returns: Dewpoint temperature in K. """ return temp_from_vapr(**kwargs)
[docs]def dewp_from_rh(**kwargs): """ Get dewpoint temperature from relative humidity. Args: **kwargs: Must contain 'rh' (relative humidity as a fraction from 0-1), 'p' (pressure in Pa), and 'pt' or 't' (potential temperature or temperature, either in K). Any other keyword arguments are ignored. Returns: Dewpoint temperature in K. """ rh = kwargs['rh'] pres = kwargs['p'] sat_qv = qv_from_vapr(**kwargs) return dewp_from_qv(qv=(rh * sat_qv), p=pres)
[docs]def vapr_from_temp(**kwargs): """ Get vapor pressure from temperature, using the Clausius-Clapeyron equation. Args: **kwargs: Must contain 't' (temperature in K) or 'pt' and 'p' (potential temperature in K and pressure in Pa). Any other keyword arguments are ignored. Returns: Vapor pressure in Pa. """ if 't' in kwargs: temperature = kwargs['t'] else: temperature = temp_from_theta(**kwargs) return 611 * np.exp(L / Rv * (1 / 273.15 - 1 / temperature))
[docs]def vapr_from_qv(**kwargs): """ Get vapor pressure from water vapor mixing ratio. Args: **kwargs: Must contain 'qv' (water vapor mixing ratio in kg/kg) and 'p' (pressure in Pa). Any other keyword arguments are ignored. Returns: Vapor pressure in Pa """ mixing_ratio = kwargs['qv'] pressure = kwargs['p'] return pressure * mixing_ratio / (mixing_ratio + epsilon)
[docs]def qv_from_vapr(**kwargs): """ Get water vapor mixing ratio from vapor pressure. Args: **kwargs: Must contain 'p' (pressure in Pa) and either 'e' (vapor pressure in Pa), 't' (temperature in K), or 'pt' (potential temperature in K). Any other keyword arguments are ignored. Returns: Water vapor mixing ratio in kg/kg """ if 'e' in kwargs: vapor_pres = kwargs['e'] else: vapor_pres = vapr_from_temp(**kwargs) pressure = kwargs['p'] return (epsilon * vapor_pres) / (pressure - vapor_pres)
[docs]def virt_temp(**kwargs): """ Compute virtual temperature. Args: **kwargs: Must contain two things. The first is either 't' (temperature in K) or 'pt' and 'p' (potential temperature in K and pressure in Pa). The second is either 'sph' (specific humidity in kg/kg) or 'qv' (water vapor mixing ratio in kg/kg). Any other keyword arguments are ignored. Returns: Virtual temperature in K. """ if 't' in kwargs: t = kwargs['t'] else: t = temp_from_theta(**kwargs) if 'sph' in kwargs: sph = kwargs['sph'] else: sph = kwargs['qv'] / (1 + kwargs['qv']) return t * (1 + (1 / epsilon - 1) * sph)
[docs]def theta_e(**kwargs): """ Compute equivalent potential temperature. Args: **kwargs: Must contain 'pt' (potential temperature in K), 'p' (pressure in Pa), and 'qv' (water vapor mixing ratio in kg/kg). Returns: Equivalent potential temperature in K. """ t = temp_from_theta(**kwargs) return kwargs['pt'] * np.exp((L * kwargs['qv']) / (Cp * t))
[docs]def virt_theta(**kwargs): """ Compute virtual potential temperature. Args: **kwargs: Must contain 'pt' (potential temperature in K) and either 'qv' or 'sph' (water vapor mixing ratio or specific humidity, either in kg/kg). Any other keyword arguments are ignored. Returns: Virtual potential temperature in K """ return virt_temp(t=kwargs['pt'], **kwargs)
[docs]def moist_lapse(**kwargs): """ Compute the moist adiabatic lapse rate at a given temperature and pressure. Args: **kwargs: Must contain 'p' (pressure in Pa) and either 't' or 'pt' (temperature or potential temperature, either in K). Any other keyword arguments are ignored. Returns: The moist adiabatic lapse rate in K/Pa. """ pressure = kwargs['p'] if 'pt' in kwargs: temperature = temp_from_theta(**kwargs) else: temperature = kwargs['t'] sat_qv = qv_from_vapr(**kwargs) moist_term1 = (sat_qv * L) / (Rd * temperature) moist_term2 = (sat_qv * L ** 2) / (Rv * Cp * temperature ** 2) return ((1 + moist_term1) * temperature * Rd) / ((1 + moist_term2) * pressure * Cp)
[docs]def pbl_depth(**kwargs): """ ARPS planetary boundary layer depth calculation. Translated directly from pbldpth() in sfcphy3d.f90. args: **kwargs: Must contain 'pt' (potential temperature in K), 'qv' (water vapor mixing ratio in kg/kg), and 'z' (height in m). The arguments must be arrays with the first dimension being height; axis order (NZ, ...). Any other keyword arguments are ignored Returns: Depth of the planetary boundary layer in m. """ thetav = virt_theta(**kwargs) z_coord = kwargs['z'] z0 = (z_coord[1] + z_coord[0]) / 2 pbl_height = np.zeros(thetav[0].shape, dtype=thetav.dtype) for z in xrange(2, thetav.shape[0]): idxs = np.where((thetav[z] > thetav[1]) & (pbl_height == 0)) pbl_height[idxs] = (z_coord[z - 1] + (z_coord[z] - z_coord[z - 1]) * (thetav[1] - thetav[z - 1]) / (thetav[z] - thetav[z - 1]) - z0)[idxs] if not np.any(pbl_height == 0): break pbl_height = np.maximum((z_coord[2] - z_coord[1]) / 2., pbl_height) return pbl_height
#---------------------------------------------------------------------------------#
[docs]def compute_srh(zOne=1000, zTwo=3000, direction='RM', **kwargs): """ Computes storm relative helicity Args: zOne: The bottom height for which the integral begins. zTwo: The top height for which the integral ends. direction: The direction fo the storm motion, 'RM' or 'LM. **kwargs: Must contain 'u' (u wind component), 'v' (v wind component), 'zp' (physical model height), and 'zpsoil' (soil height), u,v,zp,zpsoil must be numpy arrays with 3 dimensions. and the axes are assumed to be in (..., NY, NX) order. Any kwargs not 'u', 'v','zp','zpsoil' will be ignored Returns: A numpy array containing Storm Relative Helicity (m^2/s^2). The array is the same shape as the input arrays. """ bunkersU,bunkersV,uMean,vMean = bunkers_motion(direction,**kwargs) uShear,vShear = vert_windshear(zOne,zTwo,**kwargs) SRH = ((-uMean*vShear)+(vMean*uShear)) - ((-bunkersU*vShear)+(bunkersV*uShear)) return SRH
#---------------------------------------------------------------------------------#
[docs]def bunkers_motion(direction='RM',**kwargs): """ Defines the Storm motion based upon a vertical sounding using the Bunkers et al. (2000) storm motion approximation. Args: direction: The direction fo the storm motion, 'RM' or 'LM. **kwargs: Must contain 'u' (u wind component), 'v' (v wind component), 'zp' (physical model height), and 'zpsoil' (soil height), u,v,zp,zpsoil must be numpy arrays with 3 dimensions. and the axes are assumed to be in (NZ, NY, NX) order. Any kwargs not 'u', 'v','zp','zpsoil' will be ignored Returns: A numpy array containing the storm motion (m/s). The array is two dimensions (NY,NX) """ #--- Establishing Matrices AGL = kwargs['zp'] - kwargs['zpsoil'][0] AGL = zp_to_scalar(AGL) dz = AGL[1:] - AGL[:-1] AGL = AGL[:-1] [nz,ny,nx] = AGL.shape u = kwargs['u'][:-1] v = kwargs['v'][:-1] grid_points = np.tile(np.arange(0,nz),(nx,ny,1)) grid_points = np.transpose(grid_points) #--- Weighted 0-6km Wind lowHgt = closest_grdpt(convert_scalar=False,z = 0,zp = AGL) hiHgt = closest_grdpt(convert_scalar=False,z = 6000,zp = AGL) totDZ = np.amax(np.where((hiHgt == grid_points[:]),AGL,0),axis=0) -np.amax(np.where((lowHgt == grid_points[:]),AGL,0),axis=0) weight = dz/totDZ weight = np.where((grid_points[:] >=lowHgt) & (grid_points[:] <=hiHgt),weight,0) umean = np.average(u,weights=weight,axis=0) vmean = np.average(v,weights=weight,axis=0) #--- Weighted 0-0.5km Wind lowHgt = closest_grdpt(convert_scalar=False,z = 0,zp = AGL) hiHgt = closest_grdpt(convert_scalar=False,z = 500,zp = AGL) totDZ = np.amax(np.where((hiHgt == grid_points[:]),AGL,0),axis=0) -np.amax(np.where((lowHgt == grid_points[:]),AGL,0),axis=0) weight = dz/totDZ weight = np.where((grid_points[:] >=lowHgt) & (grid_points[:] <=hiHgt),weight,0) uBot = np.average(u,axis=0,weights=weight) vBot = np.average(v,axis=0,weights=weight) #--- Weighted 5.5-6.0km Wind lowHgt = closest_grdpt(convert_scalar=False,z = 5500,zp = AGL) hiHgt = closest_grdpt(convert_scalar=False,z = 6000,zp = AGL) totDZ = np.amax(np.where((hiHgt == grid_points[:]),AGL,0),axis=0) -np.amax(np.where((lowHgt == grid_points[:]),AGL,0),axis=0) weight = dz/totDZ weight = np.where((grid_points[:] >=lowHgt) & (grid_points[:] <=hiHgt),weight,0) uTop = np.average(u,axis=0,weights=weight) vTop = np.average(v,axis=0,weights=weight) #--- Shifted Storm Motion slope = (vTop - vBot)/(uTop-uBot) du =abs(7.5*np.cos(np.arctan2(-1,slope))) dv =abs(7.5*np.sin(np.arctan2(-1,slope))) if direction == 'LM': #--- Left Moving bunkersU = np.where(slope < 0,umean+du,0) bunkersU = np.where(slope > 0,umean-du,bunkersU) bunkersV = np.where(slope < 0,vmean+dv,0) bunkersV = np.where(slope > 0,vmean+dv,bunkersV) elif direction == 'RM': #--- Right Moving bunkersU = np.where(slope < 0,umean-du,0) bunkersU = np.where(slope > 0,umean+du,bunkersU) bunkersV = np.where(slope < 0,vmean-dv,0) bunkersV = np.where(slope > 0,vmean-dv,bunkersV) return bunkersU,bunkersV,umean,vmean
#---------------------------------------------------------------------------------#
[docs]def vert_windshear(z_one=1000,z_two=3000,**kwargs): """ Defines a vertical wind shear vector between two specfied heights. Useful in calculating SRH - I integrate between 1-3km. Args: zOne: The bottom height which is considered zTwo: The top height which is considered **kwargs: Must contain 'u' (u wind component), 'v' (v wind component), 'zp' (physical model height), and 'zpsoil' (soil height), u,v,zp,zpsoil must be numpy arrays with 3 dimensions. and the axes are assumed to be in (NZ, NY, NX) order. Any kwargs not 'u', 'v','zp','zpsoil' will be ignored Returns: Two arrays of the U and V components of vertical wind shear (m/s). The arrays are two dimensional (NY,NX). """ AGL = kwargs['zp'] - kwargs['zpsoil'][0] AGL = zp_to_scalar(AGL) [nz,ny,nx] = AGL.shape #--- Calculating In Between Heights grid_points = np.tile(np.arange(0,nz),(nx,ny,1)) grid_points = np.transpose(grid_points) lowHgts = closest_grdpt(convert_scalar=False,z=z_one,zp=AGL) hiHgts = closest_grdpt(convert_scalar=False,z=z_two,zp=AGL) #--- Sum Wind Shear over Relevant Heights uShear = np.sum(np.where((grid_points[:] == hiHgts),kwargs['u'],0),axis=0) - np.sum(np.where((grid_points[:] == lowHgts),kwargs['u'],0),axis=0) vShear = np.sum(np.where((grid_points[:] == hiHgts),kwargs['v'],0),axis=0) - np.sum(np.where((grid_points[:] == lowHgts),kwargs['v'],0),axis=0) return uShear,vShear
#---------------------------------------------------------------------------------#
[docs]def closest_grdpt(convert_scalar=True,**kwargs): """ Find the closest grid point above the given height, fast when interpolation is not desired Args: convert_scalar: Convert from scalar to physical height **kwargs: 'z' (the vertical height), 'zp' (physical model height), z must be a scalar value in meters zp must be numpy arrays with 3 dimensions and the axes are assumed to be in (NZ, NY, NX) order. Any kwargs not 'z','zp' will be ignored Returns: The numpy matrix with the vertical levels of said height. The arrays has two dimensions (NY,NX) """ if convert_scalar == True: zp = zp_to_scalar(kwargs['zp']) else: zp = kwargs['zp'] [nz,ny,nx] = zp.shape #--- List the Grid Points [nz,ny,nx] = zp.shape grid_points = np.tile(np.arange(0,nz),(nx,ny,1)) grid_points = np.transpose(grid_points) arbritrary_hgt = np.where(kwargs['z'] < zp,0,zp) arbritrary_hgt[1:] = arbritrary_hgt[1:] -arbritrary_hgt[:-1] grid_points = np.amax(np.where(arbritrary_hgt[:]==np.amin(arbritrary_hgt,axis=0),grid_points,0),axis=0) grid_points = np.where((grid_points == nz-1) & (kwargs['z'] < zp[1]),1,grid_points) return grid_points
#---------------------------------------------------------------------------------#
[docs]def isotherm_hgt(isotherm=0,convert_scalar=True,**kwargs): """ Calculate the the physical height of the gridpoint, one gridlevel above isotherm Args: isotherm: The isotherm height to analyze convert_scalar: Convert from scalar to physical height **kwargs: 'p' (the pressure), 'pt' (the potential temperature), 'zp' (physical model height), p,pt,zp must be numpy arrays with 3 dimensions and the axes are assumed to be in (NZ, NY, NX) order. Any kwargs not 'z','zp' will be ignored If the temperature is known may use 'T' (the temperature) instead of 'p' and 'pt' Returns: The numpy matrix with the vertical levels of said isotherm. The arrays has two dimensions (NY,NX) """ if convert_scalar == True: zp = zp_to_scalar(kwargs['zp']) else: zp = kwargs['zp'] temp = temp_from_theta(**kwargs) - 273.15 arbritrary_hgt = np.where(temp<isotherm,zp,0) arbritrary_hgt[1:] = arbritrary_hgt[1:] -arbritrary_hgt[:-1] #-- Change in hgts largest at iso height = np.amax(np.where(arbritrary_hgt[:]==np.amax(arbritrary_hgt,axis=0),zp,0),axis=0) return height
#------------------------------------------------------------------------#
[docs]def recarray_fm_dict(**dct): """ Convert a Python dictionary to a numpy record array. Args: **dct: Dictionary keys to convert. Returns: A numpy array with a record data type. """ dct_shape = dct.values()[0].shape dtype = [(k, np.float32) for k in sorted(dct.keys())] data = zip(*[dct[k].ravel() for k in sorted(dct.keys())]) return np.array(data, dtype=dtype).reshape(dct_shape)
[docs]def dict_fm_recarray(rec): """ Convert a numpy record array to a Python dictionary. Args: rec: The record array to convert Returns: A Python dictionary with keys taken from the array data type. """ return dict((f, rec[f]) for f in rec.dtype.fields.iterkeys())