Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

impdar

Package Overview
Dependencies
Maintainers
1
Versions
26
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

impdar - npm Package Compare versions

Comparing version
1.1.5
to
1.1.6
impdar/lib/ApresData/_coherence.c

Sorry, the diff of this file is too big to display

+389
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL3 license.
"""
Process quad-polarized ApRES data
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
Oct 13 2020
"""
import sys
import time
import numpy as np
from ._TimeDiffProcessing import coherence
from scipy.signal import butter, filtfilt
from ..ImpdarError import ImpdarError
try:
USE_C = True
from .coherence import coherence2d_loop
except ImportError:
USE_C = False
def rotational_transform(self, theta_start=0, theta_end=np.pi, n_thetas=100,
cross_pol_exception=False,
cross_pol_flip=False,
flip_force=False):
"""
Azimuthal (rotational) shift of principal axes
at the transmitting and receiving antennas
Mott, 2006
Parameters
---------
self: class
ApresQuadPol object
theta_start: float
Starting point for array of azimuths
theta_end: float
Ending point for array of azimuths
n_thetas: int
number of thetas to rotate through
cross_pol_exception: bool
continue even if the cross-polarized terms do not look correct
cross_pol_flip: bool or string
Which cross-polarized term to flip if they look inverted
"""
# Antennas are not symmetric on 180 degree
# Issues have come up with flipped antennas, so check
if abs(np.sum(np.imag(self.shv)+np.imag(self.svh))) < \
abs(np.sum(np.imag(self.shv)-np.imag(self.svh))) or \
abs(np.sum(np.real(self.shv)+np.real(self.svh))) < \
abs(np.sum(np.real(self.shv)-np.real(self.svh))) or \
flip_force:
if cross_pol_exception:
pass
elif cross_pol_flip == 'HV':
Warning('Flipping sign of cross-polarized term HV')
self.shv *= -1.
elif cross_pol_flip == 'VH':
Warning('Flipping sign of cross-polarized term VH')
self.svh *= -1.
else:
raise ValueError('Cross-polarized terms are of the opposite sign, check and update.')
self.thetas = np.linspace(theta_start, theta_end, n_thetas)
self.HH = np.empty((len(self.range), len(self.thetas))).astype(np.cdouble)
self.HV = np.empty((len(self.range), len(self.thetas))).astype(np.cdouble)
self.VH = np.empty((len(self.range), len(self.thetas))).astype(np.cdouble)
self.VV = np.empty((len(self.range), len(self.thetas))).astype(np.cdouble)
for i, theta in enumerate(self.thetas):
self.HH[:, i] = self.shh*np.cos(theta)**2. + \
(self.svh + self.shv)*np.sin(theta)*np.cos(theta) + \
self.svv*np.sin(theta)**2
self.HV[:, i] = self.shv*np.cos(theta)**2. + \
(self.svv - self.shh)*np.sin(theta)*np.cos(theta) - \
self.svh*np.sin(theta)**2
self.VH[:, i] = self.svh*np.cos(theta)**2. + \
(self.svv - self.shh)*np.sin(theta)*np.cos(theta) - \
self.shv*np.sin(theta)**2
self.VV[:, i] = self.svv*np.cos(theta)**2. - \
(self.svh + self.shv)*np.sin(theta)*np.cos(theta) + \
self.shh*np.sin(theta)**2
self.flags.rotation = np.array([1, n_thetas])
def coherence2d(self, delta_theta=20.0*np.pi/180., delta_range=100.,
force_python=False):
"""
Coherence between two 2-d images (e.g. c_hhvv).
Jordan et al. (2019) eq. 19
Parameters
---------
self: class
ApresQuadPol object
delta_theta : float
window size in the azimuthal dimension; default: 20 degrees
delta_range: float
window size in the vertical; default: 100.
"""
if self.flags.rotation[0] != 1:
raise ImpdarError('Rotate the quad-pol acquisition before \
calling this function.')
nrange = int(delta_range//abs(self.range[0]-self.range[1]))
ntheta = int(delta_theta//abs(self.thetas[0]-self.thetas[1]))
HH_start = self.HH[:, :ntheta]
HH_end = self.HH[:, -ntheta:]
HH_ = np.hstack((HH_end, self.HH, HH_start))
VV_start = self.VV[:, :ntheta]
VV_end = self.VV[:, -ntheta:]
VV_ = np.hstack((VV_end, self.VV, VV_start))
chhvv = np.nan*np.ones_like(HH_).astype(np.cdouble)
range_bins, azimuth_bins = HH_.shape[0], HH_.shape[1]
t0 = time.time()
if USE_C and not force_python:
chhvv = coherence2d_loop(np.ascontiguousarray(chhvv, dtype=np.cdouble),
np.ascontiguousarray(HH_, dtype=np.cdouble),
np.ascontiguousarray(VV_, dtype=np.cdouble),
nrange,
ntheta,
range_bins,
azimuth_bins)
t1 = time.time()
print('Execution with c code={:s} took {:6.2f}'.format(str(USE_C), t1 - t0))
else:
print('Beginning iteration through {:d} azimuths'.format(azimuth_bins - 2 * ntheta))
print('Azimuth bin: ', end='')
sys.stdout.flush()
for i in range(azimuth_bins):
if (i < ntheta) or (i > azimuth_bins - ntheta - 1):
continue
if (i - ntheta) % 10 == 0:
print('{:d}'.format(i - ntheta), end='')
else:
print('.', end='')
sys.stdout.flush()
for j in range(range_bins):
imin, imax = i - ntheta, i + ntheta
jmin, jmax = max(0, j - nrange), min(range_bins-1, j + nrange)
chhvv[j, i] = coherence(HH_[jmin:jmax,imin:imax].flatten(),
VV_[jmin:jmax,imin:imax].flatten())
print('coherence calculation done')
t1 = time.time()
print('Execution with c code={:s} took {:6.2f}'.format(str(False), t1 - t0))
self.chhvv = chhvv[:, ntheta:-ntheta]
# If the cpe axis has already been identified, get coherence along it
if self.flags.cpe is True:
self.chhvv_cpe = self.chhvv[np.arange(self.snum), self.cpe_idxs]
self.flags.coherence = np.array([1, delta_theta, delta_range])
# --------------------------------------------------------------------------------------------
def phase_gradient2d(self, filt=None, Wn=0):
"""
Depth-gradient of hhvv coherence image.
Jordan et al. (2019) eq 23
Parameters
---------
self: class
ApresQuadPol object
filt : string
filter type; default lowpass
Wn : float
filter frequency
"""
if self.flags.coherence[0] != 1:
raise ImpdarError('Calculate coherence before calling this function.')
# Real and imaginary parts of the hhvv coherence
R_ = np.real(self.chhvv).copy()
I_ = np.imag(self.chhvv).copy()
# Filter image before gradient calculation
if filt is not None:
if filt == 'lowpass':
R_ = lowpass(R_, Wn, 1./self.dt)
I_ = lowpass(I_, Wn, 1./self.dt)
else:
raise TypeError('Filter: %s has \
not been implemented yet.' % filt)
# Depth gradient for each component
dRdz = np.gradient(R_, self.range, axis=0)
dIdz = np.gradient(I_, self.range, axis=0)
# Phase-depth gradient from Jordan et al. (2019) eq. 23
self.dphi_dz = (R_*dIdz-I_*dRdz)/(R_**2.+I_**2.)
# If the cpe axis has already been identified, get phase_gradient along it
if self.flags.cpe is True:
self.dphi_dz_cpe = self.dphi_dz[np.arange(self.snum), self.cpe_idxs]
self.flags.phasegradient = True
def find_cpe(self, Wn=50, rad_start=np.pi/4., rad_end=3.*np.pi/4.,
*args, **kwargs):
"""
Find the cross-polarized extinction axis.
Ershadi et al. (2022)
Parameters
---------
self: class
ApresQuadPol object
Wn: float
Filter frequency
rad_start: float
Starting point for the azimuthal window to look within for cpe axis
rad_end: float
Ending point for the azimuthal window to look within for cpe axis
"""
if self.flags.rotation[0] != 1:
raise ImpdarError('Rotate the quad-pol acquisition before \
calling this function.')
# Power anomaly
HV_pa = power_anomaly(self.HV.copy())
# Lowpass filter
HV_pa = lowpass(HV_pa, Wn, 1./self.dt)
# Find the index of the cross-polarized extinction axis
idx_start = np.argmin(abs(self.thetas-rad_start))
idx_stop = np.argmin(abs(self.thetas-rad_end))
CPE_idxs = np.empty_like(self.range).astype(int)
for i in range(len(CPE_idxs)):
CPE_idxs[i] = np.argmin(HV_pa[i, idx_start:idx_stop])
CPE_idxs += idx_start
self.cpe_idxs = CPE_idxs
self.cpe = np.array([self.thetas[i] for i in CPE_idxs]).astype(float)
# If the coherence calculation has already been done
# get it along the cpe axis
if self.flags.coherence[0] == 1.:
self.chhvv_cpe = self.chhvv[np.arange(self.snum), self.cpe_idxs]
# If the phase gradient calculation has already been done
# get it along the cpe axis
if self.flags.phasegradient:
self.dphi_dz_cpe = self.dphi_dz[np.arange(self.snum), self.cpe_idxs]
self.flags.cpe = True
# --------------------------------------------------------------------------------------------
def phase_gradient_to_fabric(self, c=300e6, fc=300e6, delta_eps=0.035, eps=3.12):
"""
Calculate the fabric strength from phase gradient
Jordan et al. (2019)
Parameters
---------
self: class
ApresQuadPol object
c: float
speed of light
fc: float
center frequency
delta_eps: float
ice crystal permittivity anisotropy
eps: float
permittivity of ice
"""
if not hasattr(self, 'dphi_dz_cpe'):
raise AttributeError("Get the phase gradient along CPE axis \
before calling this function.")
self.e2e1 = (c/(4.*np.pi*fc))*(2.*np.sqrt(eps)/delta_eps)*self.dphi_dz_cpe
# --------------------------------------------------------------------------------------------
def power_anomaly(data):
"""
Calculate the power anomaly from mean for a given image.
Ershadi et al. (2021) eq 21
Parameters
--------
data : array
2-d array of azimuth-depth return
"""
# Calculate Power
P = 10.*np.log10((data)**2.)
# Remove the mean for each row
Pa = np.transpose(np.transpose(P) - np.nanmean(P, axis=1))
return Pa
# --------------------------------------------------------------------------------------------
def lowpass(data, Wn, fs, order=3):
"""
lowpass filter
Parameters
--------
data: array
2-d array of azimuth-depth return
Wn: float
filter frequency
fs: float
sample frequency
order: int
order of filter
"""
# Subset the array around nan values
nan_idx = next(k for k, value in enumerate(data[:, 1]) if ~np.isnan(value))
if nan_idx != 0:
data_sub = data[nan_idx:-nan_idx+1]
else:
data_sub = data.copy()
# Get the filter coefficients
b, a = butter(order, Wn, btype='low', fs=fs)
# Filter (need to transpose to filter along depth axis)
data_filtered = filtfilt(b, a, data_sub, axis=0)
# Insert into original array
if nan_idx != 0:
data[nan_idx:-nan_idx+1] = data_filtered
return data
else:
return data_filtered
# --------------------------------------------------------------------------------------------
def azimuthal_rotation(data,thetas,azi):
"""
Rotate a quad-pol image based on some known antenna orientation.
Parameters
--------
data : array
2-d array of azimuth-depth return
thetas : array
series of azimuths for the image
azi : array
azimuth of the antenna orientation on measurement
Output
--------
data : array
rotated image
"""
thetas += azi
if azi < 0:
idx_clip = np.argwhere(thetas > 0)[0][0]
hold = data[:, idx_clip:]
data = np.append(hold, data[:, :idx_clip], axis=1)
elif azi > 0:
idx_clip = np.argwhere(thetas > np.pi)[0][0]
hold = data[:, idx_clip:]
data = np.append(hold, data[:, :idx_clip], axis=1)
thetas -= azi
return data
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2022 Benjamin Hills <bhills@uw.edu>
#
# Distributed under terms of the GNU GPL3 license.
"""
Differencing between two ApRES data objects
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
May 20 2022
"""
from ._ApresDataProcessing import phase2range
import numpy as np
from scipy.stats import linregress
from scipy.signal import medfilt, find_peaks
def coherence(s1, s2):
"""
Phase correlation between two elements of the scattering matrix
Jodan et al. (2019) eq. 13
Parameters
---------
s1: array
first acquisition
s2:
second acquisition
Output
---------
c: array
phase coherence
"""
if hasattr(s1, '__len__') and hasattr(s2, '__len__'):
top = np.sum(np.dot(s1, np.conj(s2)))
bottom = np.sqrt(np.sum(np.abs(s1)**2.)*np.sum(np.abs(s2)**2.))
c = top/bottom
else:
top = np.dot(s1, np.conj(s2))
bottom = np.sqrt(np.abs(s1)**2.*np.abs(s2)**2.)
c = top/bottom
return c
def phase_diff(self, win, step, range_ext=None):
"""
Calculate the phase offset along the full ApRES acquisition
using a correlation coefficient within a moving window.
Parameters
---------
self: class
Apres differencing object
win: int
window size over which to do the correlation coefficient calculation
step: int
step size for the window to move between calculations
range_ext: array; optional
if an external depth array is desired, input here
"""
# Fill a depth array which will be more sparse than the full Range vector
idxs = np.arange(win//2, len(self.data)-win//2, step).astype(int)
if range_ext is not None:
self.ds = range_ext[idxs]
else:
self.ds = self.range[idxs]
# Create data and coherence vectors
acq1 = self.data
acq2 = self.data2
self.co = np.empty_like(self.ds).astype(np.cdouble)
for i, idx in enumerate(idxs):
# index two sub_arrays to compare
arr1 = acq1[idx-win//2:idx+win//2]
arr2 = acq2[idx-win//2:idx+win//2]
# correlation coefficient between acquisitions
# amplitude is coherence between acquisitions and phase is the offset
self.co[i] = coherence(arr1, arr2)
self.flags.phase_diff = np.array([win, step])
def phase_unwrap(self, win=10, thresh=0.9):
"""
Unwrap the phase profile to get one that is
either monotonically increasing or monotonically decreasing.
Parameters
---------
self: class
ApresTimeDiff object
win: int
window; number of points to include in the wrap-finding window
thresh: float (0-1)
threshold; coherence threshold where to stop looking for phase wraps
"""
# Check that the range difference has been done
if self.flags.phase_diff is None:
raise ValueError('Need to do the phase difference calculation first.')
self.phi = np.angle(self.co).astype(float)
for i in range(len(self.co)-1):
idx = i+1
if np.all(abs(self.co[idx-win:idx+win]) < thresh):
continue
if self.phi[idx]-self.phi[idx-1] > np.pi:
self.phi[idx:] -= 2.*np.pi
elif self.phi[idx]-self.phi[idx-1] < -np.pi:
self.phi[idx:] += 2.*np.pi
def range_diff(self, uncertainty='noise_phasor'):
"""
Convert the phase profile to range offset (vertical velocity).
Parameters
---------
self: class
ApresTimeDiff object
uncertainty: string;
default 'noise_phasor' as in Kingslake et al. (2014)
"""
# Check for unwrap
if not hasattr(self, 'phi'):
raise ValueError('Should unwrap the phase profile before converting to range')
win, step = self.flags.phase_diff
# convert the phase offset to a distance vector
self.w = phase2range(self, self.phi,
self.header.lambdac,
self.ds,
self.header.chirp_grad,
self.header.ci)
# If the individual acquisitions have had uncertainty calculations
if self.unc1 is not None:
if uncertainty == 'CR':
# Error from Cramer-Rao bound, Jordan et al. (2020) Ann. Glac. eq. (5)
sigma = (1./abs(self.co))*np.sqrt((1.-abs(self.co)**2.)/(2.*win))
# convert the phase offset to a distance vector
self.w_err = phase2range(self, sigma,
self.header.lambdac,
self.ds,
self.header.chirp_grad,
self.header.ci)
elif uncertainty == 'noise_phasor':
# Uncertainty from Noise Phasor as in Kingslake et al. (2014)
# r_uncertainty should be calculated using the function phase_uncertainty defined in this script
r_uncertainty = phase2range(self, self.unc1, self.header.lambdac) +\
phase2range(self, self.unc2, self.header.lambdac)
idxs = np.arange(win//2, len(self.data)-win//2, step)
self.w_err = np.array([np.nanmean(r_uncertainty[i-win//2:i+win//2]) for i in idxs])
def strain_rate(self, strain_window=(200, 1200), w_surf=0.):
"""
Estimate the average vertical strain rate within some
range span provided.
Parameters
---------
self: class
ApresTimeDiff object
strain_window: tuple
The range over which the vertical velocity profile is quasi linear and the average strain rate should be calculated.
w_surf: float
Vertical velocity at the surface (use ice equivalent accumulation rate)
This is just to line up the profile, it is not really needed, can leave as 0.
"""
if not hasattr(self, 'w'):
raise ValueError("Get the vertical velocity profile first with 'range_diff()'.")
print('Calculating vertical strain rate over range from %s to %s meters.' % strain_window)
idx = np.logical_and(self.ds > strain_window[0], self.ds < strain_window[1])
slope, intercept, r_value, p_value, std_err = linregress(self.ds[idx], self.w[idx])
self.eps_zz = slope
self.w0 = intercept
print('Vertical strain rate (yr-1):', self.eps_zz)
print('r_squared:', r_value**2.)
self.w += w_surf - self.w0
def bed_pick(self, sample_threshold=50, coherence_threshold=0.9,
filt_kernel=201, prominence=10, peak_width=300):
"""
Estimate the location of the ice-bed interface.
Parameters
---------
self: class
ApresTimeDiff object
sample_threshold: int
Number of samples to tolerate for difference between the pick from acquisition 1 and 2
coherence_threshold: int
Minimum coherence allowed for the picked bed
filt_kernel: int
Kernel for median filter
prominence: int
How high the bed power needs to be above neighboring power profile
peak_width: int
Width of the power peak
"""
P1 = 10.*np.log10(self.data**2.)
mfilt1 = medfilt(P1.real, filt_kernel)
peaks1 = find_peaks(mfilt1, prominence=prominence, width=peak_width)[0]
bed_idx1 = max(peaks1)
P2 = 10.*np.log10(self.data2**2.)
mfilt2 = medfilt(P2.real, filt_kernel)
peaks2 = find_peaks(mfilt2, prominence=prominence, width=peak_width)[0]
bed_idx2 = max(peaks2)
if not abs(bed_idx1 - bed_idx2) < sample_threshold:
raise ValueError('Bed pick from first and second acquisitions are too far apart.')
bed_samp = (bed_idx1+bed_idx2)//2
bed_power = (mfilt1[bed_idx1]+mfilt2[bed_idx2])/2.
bed_range = self.range[bed_samp]
diff_idx = np.argmin(abs(self.ds-bed_range))
bed_coherence = np.median(abs(self.co[diff_idx-10:diff_idx+10]))
if not bed_coherence > coherence_threshold:
raise ValueError('Bed pick has too low coherence.')
self.bed = np.array([bed_samp, bed_range, bed_coherence, bed_power])
/*
* coherence.c
* Copyright (C) 2021 David Lilien <david.lilien@umanitoba.ca>
*
* Distributed under terms of the GNU GPL3.0 license.
*/
#include <complex.h>
#include "coherence.h"
#include <math.h>
#include <stdio.h>
#include <time.h>
#include <signal.h>
#include <stdlib.h>
#include <Python.h>
void coherence2d (double complex * chhvv, double complex * HH, double complex * VV, int nrange, int ntheta, int range_bins, int azimuth_bins){
int i, j, ii, jj;
int imin, imax, jmin, jmax;
double complex numerator;
double complex denominator1;
double complex denominator2;
PySys_WriteStdout("Beginning iteration through %d azimuths...\nAzimuth bin: ", azimuth_bins);
fflush(stdout);
/* You can flip the loop order, but I think this is marginally faster
* based on c row-major ordering. Could be wrong though... */
for(i=ntheta;i<azimuth_bins - ntheta;i++){
if (i % 10 == 0){
if (i > 0){
PySys_WriteStdout("\n%d", i);
fflush(stdout);
}
} else {
PySys_WriteStdout(".");
fflush(stdout);
}
for(j=0;j<range_bins;j++){
imin = i - ntheta;
imax = i + ntheta;
jmin = max(0, j - nrange);
jmax = min(range_bins - 1, j + nrange);
numerator = 0.0 + 0.0 * _Complex_I;
denominator1 = 0.0 + 0.0 * _Complex_I;
denominator2 = 0.0 + 0.0 * _Complex_I;
for (ii=imin; ii<imax; ii++){
for (jj=jmin; jj<jmax; jj++){
numerator += HH[jj * azimuth_bins + ii] * conj(VV[jj * azimuth_bins + ii]);
denominator1 += pow(cabs(HH[jj * azimuth_bins + ii]), 2);
denominator2 += pow(cabs(VV[jj * azimuth_bins + ii]), 2);
}
}
chhvv[j * azimuth_bins + i] = numerator/(sqrt(denominator1) * sqrt(denominator2));
}
}
}
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL3 license.
"""
Load quad-polarized data
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
Oct 13 2020
"""
import numpy as np
import glob
import datetime
from scipy.io import loadmat
from . import ApresQuadPol
from .ApresFlags import QuadPolFlags
from .load_apres import load_apres
from ..ImpdarError import ImpdarError
def load_quadpol(fn, ftype='mat', load_single_pol=True, *args, **kwargs):
"""Load processed apres profiles from all four polarizations: hh, hv, vh, vv
into one data object for a quad polarized acquisition.
Parameters
----------
fn: either string of site name where fn = site name + polarization name
or list of file names in the correct order - hh, hv, vh, vv
Returns
-------
quadpol_data: class
quad-polarized apres data object
"""
if not load_single_pol:
quadpol_data = ApresQuadPol(fn)
else:
# Load each of the individual polarizations as their own ApresData object
polarizations = ['HH', 'HV', 'VH', 'VV']
if isinstance(fn, str):
fns = [glob.glob(fn + '_{:s}.*'.format(pol)) for pol in polarizations]
for pol, f in zip(polarizations, fns):
if len(f) != 1:
raise FileNotFoundError('Need exactly one file matching each polarization')
fns = np.squeeze(fns)
elif len(fn) == 4:
fns = fn
else:
raise ValueError('fn must be a glob for files with _HH, _HV, etc., or a 4-tuple')
single_acquisitions = [load_apres([f]) for f in fns]
# Check that the data have gone through the initial processing steps
# If they haven't do range conversion and stack to one trace
for i, acquisition in enumerate(single_acquisitions):
try:
acquisition.stacking()
print('Restacked acquisition #{:d} to a 1-d array.'.format(i + 1))
except ImpdarError:
print('Acquisition #{:d} is already stacked to shape: {:s}'.format(i + 1, str(np.shape(acquisition.data))))
if acquisition.flags.range == 0:
print('Acquisition #',i+1,'has not been converted to range. Range conversion now...')
acquisition.apres_range(2)
# Check that all four acquisitions have the same attributes
from copy import deepcopy
hh = deepcopy(single_acquisitions[0])
for xx in single_acquisitions[1:]:
if hh.snum != xx.snum:
raise ValueError('Need the same number of vertical samples in each file')
if not np.all(hh.travel_time == xx.travel_time):
raise ValueError('Need matching travel time vectors')
if abs(hh.decday[0] - xx.decday[0]) > 1.:
# TODO: ask to proceed
Warning('It looks like these acquisitions were not all taken on the same day.')
# load into the ApresQuadPol object
quadpol_data = ApresQuadPol(None)
quadpol_data.snum = hh.snum
quadpol_data.shh = hh.data.flatten().astype(np.cdouble)
quadpol_data.shv = single_acquisitions[1].data.flatten().astype(np.cdouble)
quadpol_data.svh = single_acquisitions[2].data.flatten().astype(np.cdouble)
quadpol_data.svv = single_acquisitions[3].data.flatten().astype(np.cdouble)
quadpol_data.decday = hh.decday
quadpol_data.range = hh.Rcoarse
quadpol_data.dt = hh.dt
quadpol_data.travel_time = hh.travel_time
# save a 'data' field to make this class play nice with the save scripts etc.
quadpol_data.data = quadpol_data.shh.copy()
quadpol_data.data_dtype = quadpol_data.data.dtype
# take the flags and header from the first data object
quadpol_data.flags = QuadPolFlags()
quadpol_data.flags.file_read_code = single_acquisitions[0].flags.file_read_code
quadpol_data.header = single_acquisitions[0].header
return quadpol_data
# ------------------------------------------------
def load_quadpol_fujita(model_name):
"""
Load processed modeled apres profiles from all four polarizations: hh, hv, vh, vv
into one data object
Parameters
----------
model : class
output from the effective medium model
Returns
-------
quadpol_data: class
quad-polarized apres data object
"""
if type(model_name) is str:
class empty:
pass
model = empty()
data = loadmat(model_name)
for attr in data.keys():
setattr(model,attr,np.squeeze(data[attr]))
else:
model = model_name
# load into the ApresQuadPol object
quadpol_data = ApresQuadPol(None)
quadpol_data.shh = model.shh
quadpol_data.shv = model.shv
quadpol_data.svh = model.svh
quadpol_data.svv = model.svv
quadpol_data.range = model.range
now = datetime.datetime.now()
timezero = datetime.datetime(1, 1, 1, 0, 0, 0)
offset = now-timezero
quadpol_data.decday = offset.days + offset.seconds/(3600.*24.) + 366. # Matlab compatable
quadpol_data.snum = len(model.shh)
v = model.c/np.sqrt(model.epsr)
quadpol_data.travel_time = quadpol_data.range/v
quadpol_data.dt = np.mean(np.gradient(quadpol_data.travel_time))
quadpol_data.data_dtype = quadpol_data.shh.dtype
return quadpol_data
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL3 license.
"""
Load ApRES Diff data
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
July 11 2022
"""
import glob
import numpy as np
from .load_apres import load_apres
from . import ApresData, ApresTimeDiff
from .ApresFlags import TimeDiffFlags
from ..ImpdarError import ImpdarError
def load_time_diff(fn, load_single_acquisitions=True, *args, **kwargs):
"""
Load two Apres acquisitions together into a single object for time differencing.
"""
if not load_single_acquisitions:
diff_data = ApresTimeDiff(fn)
else:
# Make sure we have the correct fn format to load
times = ['time1', 'time2']
if isinstance(fn, str):
fns = [glob.glob(fn + '_{:s}*'.format(t)) for t in times]
for t, f in zip(times, fns):
if len(f) != 1:
raise FileNotFoundError('Need exactly one file matching each time acqusition')
elif len(fn) == 2:
fns = fn
else:
raise ValueError('fn must be a glob for files with _time1, _time2, or a 2-tuple')
# Load each of the individual acquisitions
# as their own ApresData object
if isinstance(fns[0], str):
single_acquisitions = [load_apres([f]) for f in fns]
else:
single_acquisitions = [dat for dat in fns]
print(single_acquisitions)
# Check that the data have gone through the initial processing steps
# If they haven't do range conversion and stack to one trace
for i, acquisition in enumerate(single_acquisitions):
try:
acquisition.stacking()
print('Restacked acquisition #{:d} to a 1-d array.'.format(i + 1))
except ImpdarError:
print('Acquisition #{:d} is already stacked to shape: {:s}'.format(i + 1,
str(np.shape(acquisition.data))))
if acquisition.flags.range == 0:
print('Acquisition #', i+1, 'has not been converted to range. Range conversion now...')
acquisition.apres_range(2)
# Check that all four acquisitions have the same attributes
from copy import deepcopy
dat1 = deepcopy(single_acquisitions[0])
dat2 = deepcopy(single_acquisitions[1])
if dat1.snum != dat2.snum:
raise ValueError('Need the same number of vertical samples in each file')
if not np.all(dat1.travel_time == dat2.travel_time):
raise ValueError('Need matching travel time vectors')
# load into the ApresTimeDiff object
diff_data = ApresTimeDiff(None)
diff_data.snum = dat1.snum
diff_data.data = dat1.data.flatten().astype(complex)
diff_data.data2 = dat2.data.flatten().astype(complex)
diff_data.decday = dat1.decday
diff_data.range = dat1.Rcoarse
diff_data.dt = dat1.dt
diff_data.travel_time = dat1.travel_time
diff_data.fn1 = dat1.header.fn
diff_data.fn2 = dat2.header.fn
diff_data.fn = diff_data.fn1+'_diff_'+diff_data.fn2
# if uncertainties were calculated, bring those in too
if hasattr(dat1, 'uncertainty'):
diff_data.unc1 = dat1.uncertainty
if hasattr(dat2, 'uncertainty'):
diff_data.unc2 = dat2.uncertainty
diff_data.data_dtype = diff_data.data.dtype
# take the flags and header from the first data object
diff_data.flags = TimeDiffFlags()
diff_data.flags.file_read_code = dat1.flags.file_read_code
diff_data.header = dat1.header
return diff_data
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
#
# Distributed under terms of the GNU GPL3 license.
"""
Load ApRES data as a profile
Author:
Benjamin Hills
bhills@uw.edu
University of Washington
Earth and Space Sciences
July 11 2022
"""
import glob
import numpy as np
import numpy as np
from ..RadarData import RadarData
from ..RadarFlags import RadarFlags
from ..ApresData.load_apres import load_apres
from ..ImpdarError import ImpdarError
def load_apres_profile(fns_apres, *args, **kwargs):
"""
Load Apres acquisitiuon into a profile.
"""
apres_obj = load_apres(fns_apres)
apres_obj.apres_range(2)
# Setup a RadarData class to fill
apres_data = RadarData(None)
apres_data.fn = fns_apres[0]
# Try to pull in as much as possible directly from the apres object
for attr in vars(apres_obj):
if attr == 'data':
continue
if attr in apres_data.attrs_guaranteed:
setattr(apres_data, attr, getattr(apres_obj,attr))
if attr in apres_data.attrs_optional:
setattr(apres_data, attr, getattr(apres_obj,attr))
# Reshape the data array into a profile
apres_data.data = np.reshape(apres_obj.data,(apres_obj.bnum*apres_obj.cnum, apres_obj.snum))
apres_data.data = np.transpose(apres_data.data)
apres_data.data.dtype = complex
apres_data.snum = apres_data.data.shape[0]
apres_data.tnum = apres_data.data.shape[1]
apres_data.trace_num = np.arange(apres_data.tnum)
# travel time in the apres object is set up for fmcw data
# convert using the range attr
apres_data.travel_time = apres_obj.Rcoarse/(apres_obj.header.ci/2.)
apres_data.travel_time *= 1e6
# reformat arrays for values that are only taken once per chirp
apres_data.decday = apres_obj.chirp_time.flatten()
apres_data.lat = np.transpose(np.tile(apres_obj.lat,
(apres_obj.cnum, 1))).flatten()
apres_data.long = np.transpose(np.tile(apres_obj.long,
(apres_obj.cnum, 1))).flatten()
if apres_obj.elev is None:
apres_data.elev = np.zeros_like(apres_data.lat)
elif np.shape(apres_obj.elev) == np.shape(apres_obj.lat):
apres_data.elev = np.transpose(np.tile(apres_obj.elev,
(apres_obj.cnum, 1))).flatten()
# Set the along-track coordinates
try:
apres_data.get_projected_coords()
except:
apres_data.dist = np.zeros(apres_data.tnum)
apres_data.trace_int = np.gradient(apres_data.dist)
# fill and empty pressure array
apres_data.pressure = np.zeros(apres_data.tnum)
apres_data.trig = np.nan*np.zeros(apres_data.tnum)
apres_data.trig_level = np.nan*np.zeros(apres_data.tnum)
apres_data.chan = 0
# Initialize the flags
apres_data.flags = RadarFlags()
# Check attributed in the RadarData class and return
apres_data.check_attrs()
return apres_data
+1
-0

@@ -7,2 +7,3 @@ [console_scripts]

impproc = impdar.bin.impproc:main
qpdar = impdar.bin.qpdar:main
+1
-1
Metadata-Version: 2.1
Name: impdar
Version: 1.1.5
Version: 1.1.6
Summary: Scripts for impulse radar

@@ -5,0 +5,0 @@ Home-page: http://github.com/dlilien/impdar

@@ -40,4 +40,10 @@ README.md

impdar/lib/ApresData/_ApresDataSaving.py
impdar/lib/ApresData/_QuadPolProcessing.py
impdar/lib/ApresData/_TimeDiffProcessing.py
impdar/lib/ApresData/__init__.py
impdar/lib/ApresData/_coherence.c
impdar/lib/ApresData/coherence.c
impdar/lib/ApresData/load_apres.py
impdar/lib/ApresData/load_quadpol.py
impdar/lib/ApresData/load_time_diff.py
impdar/lib/RadarData/_RadarDataFiltering.py

@@ -49,2 +55,3 @@ impdar/lib/RadarData/_RadarDataProcessing.py

impdar/lib/load/load_UoA.py
impdar/lib/load/load_apres_profile.py
impdar/lib/load/load_bsi.py

@@ -51,0 +58,0 @@ impdar/lib/load/load_delores.py

@@ -12,9 +12,15 @@ #! /usr/bin/env python

"""
import sys
import os.path
import argparse
import numpy as np
from impdar.lib.ApresData import load_apres
from impdar.lib.ApresData import load_apres, load_time_diff, load_quadpol
from impdar.lib.ApresData import FILETYPE_OPTIONS, ApresData, ApresTimeDiff, ApresQuadPol
from impdar.lib.ApresData.ApresFlags import ApresFlags, TimeDiffFlags, QuadPolFlags
from impdar.lib import plot
def _get_args():

@@ -24,9 +30,206 @@ parser = argparse.ArgumentParser()

# Horizontal window filter
# Loading functionality
parser_load = _add_procparser(subparsers,
'load',
'load apres data',
lambda x: x,
load,
defname='load')
parser_load.add_argument('-acq_type',
type=str,
help='Acquisition type',
default='single',
choices=['single', 'timediff', 'quadpol'])
_add_def_args(parser_load)
# Full processing flow for a single ApRES acquisition
parser_singleproc = _add_procparser(subparsers,
'proc',
'full processing flow on the apres data object',
single_processing,
'proc')
parser_singleproc.add_argument('-max_range',
type=float,
help='maximum range for the range conversion')
parser_singleproc.add_argument('-num_chirps',
type=int,
help='number of chirps to stack (default: stack all)')
parser_singleproc.add_argument('-noise_bed_range',
type=float,
help='bed range under which the noise phasor will be calculated')
parser_singleproc.set_defaults(max_range=4000., num_chirps=0, noise_bed_range=3000.)
_add_def_args(parser_singleproc)
# Full Difference Processing
parser_diffproc = _add_procparser(subparsers,
'diffproc',
'create an ApresDiff object and then execute the full differencing processing flow',
time_diff_processing,
'diffproc')
parser_diffproc.add_argument('-window',
type=int,
help='window size over which the cross correlation is done')
parser_diffproc.add_argument('-step',
type=int,
help='step size in samples for the moving window')
parser_diffproc.add_argument('-thresh',
type=int,
help='step size in samples for the moving window')
parser_diffproc.add_argument('-strain_window',
type=tuple,
help='step size in samples for the moving window')
parser_diffproc.add_argument('-w_surf',
type=float,
help='surface vertical velocity (ice equivalent accumulation rate)')
parser_diffproc.set_defaults(window=20, step=20,thresh=0.95,strain_window=(200,1000),w_surf=-0.15)
_add_def_args(parser_diffproc)
# Full quadpol processing
parser_qpproc = _add_procparser(subparsers,
'qpproc',
'full processing flow on the quadpol data object',
quadpol_processing,
'qpproc')
parser_qpproc.add_argument('-nthetas',
type=int,
help='number of theta values to rotate into')
parser_qpproc.add_argument('-dtheta',
type=float,
help='size of coherence window in theta direction')
parser_qpproc.add_argument('-drange',
type=float,
help='size of coherence window in range direction')
parser_qpproc.add_argument('-cross_pol_flip',
type=str,
help='flip the sign on one of the cross polarized terms.')
parser_qpproc.set_defaults(nthetas=100, dtheta=20.*np.pi/180., drange=100, cross_pol_flip=False)
_add_def_args(parser_qpproc)
# Initial range conversion (deramping)
parser_range = _add_procparser(subparsers,
'range',
'convert the recieved waveform to a range-amplitude array',
range_conversion,
'range')
parser_range.add_argument('-max_range',
type=float,
help='maximum range for the range conversion',
default=4000.)
_add_def_args(parser_range)
# Stacking
parser_stack = _add_procparser(subparsers,
'stack',
'stack apres chirps into a single array',
stack,
'stacked')
parser_stack.add_argument('-num_chirps',
type=int,
help='number of chirps to stack (default: stack all)',
default=0)
_add_def_args(parser_stack)
# Uncertainty
parser_unc = _add_procparser(subparsers,
'uncertainty',
'calculate the phase uncertainty for all samples',
uncertainty,
'uncertainty')
parser_unc.add_argument('-noise_bed_range',
type=float,
help='bed range under which the noise phasor will be calculated',
default=3000.)
_add_def_args(parser_unc)
# Phase Differencing
parser_pdiff = _add_procparser(subparsers,
'pdiff',
'calculate correlation between the two acquisitions',
phase_differencing,
'pdiff')
parser_pdiff.add_argument('-window',
type=int,
help='window size over which the cross correlation is done')
parser_pdiff.add_argument('-step',
type=int,
help='step size in samples for the moving window')
parser_pdiff.set_defaults(window=20, step=20)
_add_def_args(parser_pdiff)
# Phase Unwrap
parser_unwrap = _add_procparser(subparsers,
'unwrap',
'unwrap the differenced phase profile from top to bottom',
unwrap)
_add_def_args(parser_unwrap)
# Range Differencing
parser_rdiff = _add_procparser(subparsers,
'rdiff',
'convert the differenced phase profile to range',
range_differencing)
_add_def_args(parser_rdiff)
# Rotation, fill in data at all azimuths
parser_rotate = _add_procparser(subparsers,
'rotate',
'use a rotational transform to find data at all azimuths',
rotate,
'rotated')
parser_rotate.add_argument('-nthetas',
type=int,
help='number of theta values to rotate into',
default=100)
parser_rotate.add_argument('-cross_pol_flip',
type=str,
help='flip the sign on one of the cross polarized terms.',
default=False)
_add_def_args(parser_rotate)
# Coherence 2D
parser_coherence = _add_procparser(subparsers,
'coherence',
'2-dimensional coherence between HH and VV polarizations',
coherence,
'chhvv')
parser_coherence.add_argument('-dtheta',
type=float,
help='size of coherence window in theta direction')
parser_coherence.add_argument('-drange',
type=float,
help='size of coherence window in range direction')
parser_coherence.set_defaults(dtheta=20.*np.pi/180., drange=100.)
_add_def_args(parser_coherence)
# Cross-Polarized Extinction
parser_cpe = _add_procparser(subparsers,
'cpe',
'find the depth-azimuth profile for cross-polarized extinction',
cross_polarized_extinction,
'cpe')
parser_cpe.add_argument('-Wn',
type=float,
help='filter frequency')
parser_cpe.add_argument('-fs',
type=float,
help='sampling frequency')
_add_def_args(parser_cpe)
# Plot Apres Acquisition
parser_plot = _add_procparser(subparsers,
'plot',
'plot apres data from a single acquisition',
plot_apres,
'plot')
parser_plot.add_argument('-acq_type',
type=str,
help='Acquisition type',
default=None,
choices=['single', 'timediff', 'quadpol'])
parser_plot.add_argument('-s',
action='store_true',
help='Save file (do not plt.show())')
parser_plot.add_argument('-yd', action='store_true',
help='plot the depth rather than travel time')
_add_def_args(parser_plot)
return parser

@@ -67,41 +270,163 @@

apres_data = [load_apres.load_apres_single_file(fn) for fn in args.fns]
if args.name == 'load':
pass
apres_data, name = args.func(**vars(args))
else:
for dat in apres_data:
args.func(dat, **vars(args))
apres_data, empty_name = load(**vars(args))
name = args.name
args.func(apres_data, **vars(args))
if args.name == 'load':
name = 'raw'
if args.name == 'plot':
return
elif args.o is not None:
out_fn = args.o
apres_data.save(out_fn)
else:
name = args.name
bn = os.path.splitext(args.fns[0])[0]
if bn[-3:] == 'raw':
bn = bn[:-6]
out_fn = bn + '_{:s}.mat'.format(name)
apres_data.save(out_fn)
if args.o is not None:
if ((len(apres_data) > 1) or (args.o[-1] == '/')):
for d, f in zip(apres_data, args.fns):
bn = os.path.split(os.path.splitext(f)[0])[1]
if bn[-4:] == '_raw':
bn = bn[:-4]
out_fn = os.path.join(args.o, bn + '_{:s}.h5'.format(name))
d.save(out_fn)
# --------------------------------------------------------
### Loading ###
# --------------------------------------------------------
def load(fns='', acq_type=None,**kwargs):
if acq_type == 'single':
apres_data = load_apres.load_apres(fns)
name = 'apraw'
elif acq_type == 'timediff':
if len(fns) == 1:
apres_data = load_time_diff.load_time_diff(fns[0],load_single_acquisitions=False)
else:
out_fn = args.o
apres_data[0].save(out_fn)
apres_data = load_time_diff.load_time_diff(fns)
name = 'tdraw'
elif acq_type == 'quadpol':
if len(fns) == 1:
apres_data = load_quadpol.load_quadpol(fns[0],load_single_pol=False)
else:
apres_data = load_quadpol.load_quadpol(fns)
name = 'qpraw'
if acq_type is None:
try:
apres_data = load_apres.load_apres(fns)
name = 'apraw'
except:
Warning('Cannot load as single Apres acquisition, trying alternative acq_type...')
try:
if len(fns) == 1:
apres_data = load_time_diff.load_time_diff(fns[0],load_single_acquisitions=False)
else:
apres_data = load_time_diff.load_time_diff(fns)
name = 'tdraw'
except:
Warning('Cannot load as timediff Apres acquisition, trying alternative acq_type...')
try:
if len(fns) == 1:
apres_data = load_quadpol.load_quadpol(fns[0],load_single_pol=False)
else:
apres_data = load_quadpol.load_quadpol(fns)
name = 'qpraw'
except:
Warning('Cannot load as quadpol Apres acquisition, trying alternative acq_type...')
return apres_data, name
# --------------------------------------------------------
### Full Processing Flow ###
# --------------------------------------------------------
def single_processing(dat, p=2, max_range=4000., num_chirps=0., noise_bed_range=3000., **kwargs):
"""Full processing flow for ApresData object.
Range conversion, stacking, uncertainty."""
dat.apres_range(p,max_range)
if num_chirps == 0.:
dat.stacking()
else:
for d, f in zip(apres_data, args.fns):
bn = os.path.splitext(f)[0]
if bn[-4:] == '_raw':
bn = bn[:-4]
out_fn = bn + '_{:s}.h5'.format(name)
d.save(out_fn)
dat.stacking(num_chirps)
dat.phase_uncertainty(noise_bed_range)
def crop(dat, lim=0, top_or_bottom='top', dimension='snum', **kwargs):
"""Crop in the vertical."""
dat.crop(lim, top_or_bottom=top_or_bottom, dimension=dimension)
def time_diff_processing(diffdat, win=20, step=20, thresh=0.95,
strain_window=(200,1000), w_surf=-0.15, **kwargs):
diffdat.phase_diff(win,step)
diffdat.phase_unwrap(win,thresh)
diffdat.range_diff()
diffdat.strain_rate(strain_window=strain_window,w_surf=w_surf)
diffdat.bed_pick()
def quadpol_processing(dat, nthetas=100, dtheta=20.0*np.pi/180.,
drange=100., Wn=0., fs=0., cross_pol_flip=False, **kwargs):
"""Full processing flow for ApresQuadPol object.
Range conversion, stacking, uncertainty."""
dat.rotational_transform(n_thetas=nthetas, cross_pol_flip=cross_pol_flip)
dat.find_cpe()
dat.coherence2d(delta_theta=dtheta, delta_range=drange)
# --------------------------------------------------------
### Individual Processing Functions ###
# --------------------------------------------------------
def range_conversion(dat, p=2, max_range=4000, **kwargs):
"""Range conversion."""
dat.apres_range(p,max_range)
def stack(dat, num_chirps=0, **kwargs):
"""Stack chirps."""
if num_chirps == 0:
dat.stacking()
else:
dat.stacking(num_chirps)
def uncertainty(dat,noise_bed_range=3000, **kwargs):
"""Calculate uncertainty."""
dat.phase_uncertainty(noise_bed_range)
def phase_differencing(diffdat, win=20, step=20, **kwargs):
diffdat.phase_diff(win,step)
def unwrap(diffdat,win=20,thresh=.95, **kwargs):
diffdat.phase_unwrap(win,thresh)
def range_differencing(diffdat, **kwargs):
diffdat.range_diff()
def rotate(dat, nthetas=100, cross_pol_flip=False, **kwargs):
"""Range conversion."""
dat.rotational_transform(n_thetas=nthetas, cross_pol_flip=cross_pol_flip)
def coherence(dat, dtheta=20.0*np.pi/180., drange=100., **kwargs):
"""Stack chirps."""
dat.coherence2d(delta_theta=dtheta, delta_range=drange)
def cross_polarized_extinction(dat,
Wn=0., fs=0., **kwargs):
"""Calculate uncertainty."""
dat.find_cpe(Wn=Wn)
# --------------------------------------------------------
### Plotting ###
# --------------------------------------------------------
def plot_apres(dat, acq_type=None, s=False, o=None, o_fmt='png',
dpi=300, **kwargs):
if type(dat.flags) is ApresFlags:
plot.plot_apres(dat, s=s, o=o, ftype=o_fmt, dpi=dpi)
elif type(dat.flags) is TimeDiffFlags:
plot.plot_apres_diff(dat, s=s, o=o, ftype=o_fmt, dpi=dpi)
elif type(dat.flags) is QuadPolFlags:
plot.plot_apres_quadpol(dat, s=s, o=o, ftype=o_fmt, dpi=dpi)
if __name__ == '__main__':
main()

@@ -30,11 +30,15 @@ #! /usr/bin/env python

from .ApresFlags import ApresFlags
from .ApresFlags import ApresFlags, TimeDiffFlags, QuadPolFlags
from .ApresHeader import ApresHeader
from ..ImpdarError import ImpdarError
FILETYPE_OPTIONS = ['DAT', 'dat', 'mat', 'h5', 'nc']
class ApresData(object):
"""A class that holds the relevant information for an ApRES acquisition.
We keep track of processing steps with the flags attribute.
This base version's __init__ takes a filename of a .mat file in the old StODeep format to load.
Data are bundled across bursts into the same object if they are loaded together.
Some information such as the instrument temperature, battery voltage, etc., are recorded for each burst.
Some information is written to the header subclass instead of each burst individually.
We track the processing steps with the flags attribute.
"""

@@ -55,5 +59,2 @@ #: Attributes that every ApresData object should have and should not be None.

#: Optional attributes that may be None without affecting processing.
#: These may not have existed in old StoDeep files that we are compatible with,
#: and they often cannot be set at the initial data load.
#: If they exist, they all have units of meters.
attrs_optional = ['lat',

@@ -67,5 +68,6 @@ 'long',

'battery_voltage',
'Rcoarse']
'Rcoarse',
'uncertainty']
from ._ApresDataProcessing import apres_range, phase_uncertainty, phase2range, range_diff, stacking
from ._ApresDataProcessing import apres_range, phase_uncertainty, stacking
from ._ApresDataSaving import save

@@ -81,19 +83,19 @@

self.bnum = None #: int, the number of bursts
self.data = None #: np.ndarray(snum x tnum) of the actual return power
self.data = None #: np.ndarray(bnum, cnum, snum) of the actual return amplitude
self.dt = None #: float, The spacing between samples in travel time, in seconds
self.uncertainty = None #: float, phase uncertainty
# Per-trace attributes
#: np.ndarray(tnum,) of the acquisition time of each trace
# Per-chirp attributes
#: np.ndarray(bnum, cnum) of the acquisition time of each trace
#: note that this is referenced to Jan 1, 0 CE (matlabe datenum)
#: for convenience, use the `datetime` attribute to access a python version of the day
self.decday = None
#: np.ndarray(tnum,) latitude along the profile. Generally not in projected coordinates
#: np.ndarray(bnum, cnum) latitude/longitude along the profile. Generally not in projected coordinates
self.lat = None
#: np.ndarray(tnum,) longitude along the profile. Generally not in projected coords.
self.long = None
# chirp
self.chirp_num = None #: np.ndarray(cnum,) The 1-indexed number of the chirp
self.chirp_att = None #: np.ndarray(cnum,) Chirp attenuation settings
self.chirp_time = None #: np.ndarray(cnum,) Time at beginning of chirp (serial day)
self.chirp_num = None #: np.ndarray(bnum, cnum,) The 1-indexed number of the chirp
self.chirp_att = None #: np.ndarray(bnum, cnum,) Chirp attenuation settings
self.chirp_time = None #: np.ndarray(bnum, cnum,) Time at beginning of chirp (serial day)

@@ -104,12 +106,15 @@ # Sample-wise attributes

self.Rcoarse = None
self.frequencies = None
#: np.ndarray(tnum,) Optional. Projected x-coordinate along the profile.
#: float Optional. Projected coordinates of the acquisition location
self.x_coord = None
#: np.ndarray(tnum,) Optional. Projected y-coordinate along the profile.
self.y_coord = None
#: np.ndarray(tnum,) Optional. Elevation along the profile.
self.elev = None
#: float Optional. Temperatures
self.temperature1 = None
self.temperature2 = None
self.battery_voltage = None
# Special attributes
#: impdar.lib.RadarFlags object containing information about the processing steps done.
self.flags = ApresFlags()

@@ -145,3 +150,3 @@ self.header = ApresHeader()

setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
elif (mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1) and attr != 'data':
setattr(self, attr, mat[attr].flatten())

@@ -165,7 +170,6 @@ else:

self.flags.from_matlab(mat['flags'])
self.header = ApresHeader()
self.header.from_matlab(mat['header'])
self.fn = fn
self.header = ApresHeader()
self.flags.from_matlab(mat['flags'])
self.header.from_matlab(mat['header'])
self.check_attrs()

@@ -186,9 +190,353 @@

raise ImpdarError('{:s} is missing. \
It appears that this is an ill-defined RadarData object'.format(attr))
It appears that this is an ill-defined ApresData object'.format(attr))
if getattr(self, attr) is None:
raise ImpdarError('{:s} is None. \
It appears that this is an ill-defined RadarData object'.format(attr))
It appears that this is an ill-defined ApresData object'.format(attr))
for attr in self.attrs_optional:
if not hasattr(self, attr):
raise ImpdarError('{:s} is missing. \
It appears that this is an ill-defined ApresData object'.format(attr))
if not hasattr(self, 'data_dtype') or self.data_dtype is None:
self.data_dtype = self.data.dtype
return
@property
def datetime(self):
"""A python operable version of the time of acquisition of each trace"""
return np.array([datetime.datetime.fromordinal(int(dd)) + datetime.timedelta(days=dd % 1) - datetime.timedelta(days=366)
for dd in self.decday], dtype=np.datetime64)
# ------------------------------------------------
class ApresTimeDiff(object):
"""
Class for differencing between two Apres acquisitions.
"""
#: Attributes that every ApresTimeDiff object should have and should not be None.
attrs_guaranteed = ['data',
'data2',
'decday',
'decday2',
'dt',
'snum',
'range',
'fn1',
'fn2',
'fn']
#: Optional attributes that may be None without affecting processing.
#: These may not have existed in old StoDeep files that we are compatible with,
#: and they often cannot be set at the initial data load.
#: If they exist, they all have units of meters.
attrs_optional = ['lat',
'lat2',
'long',
'long2',
'x_coord',
'x_coord2',
'y_coord',
'y_coord2',
'elev',
'elev2',
'unc1',
'unc2',
'ds',
'co',
'phi',
'w',
'w_err',
'w_0',
'eps_zz',
'bed']
from ._TimeDiffProcessing import phase_diff, phase_unwrap, range_diff, strain_rate, bed_pick
from ._ApresDataSaving import save
def __init__(self,fn):
"""Initialize differencing fields
fn: class or string
Apres data object to load (if string should be an impdar apres file)
dat2: class or string
Apres data object to load (if string should be an impdar apres file)
"""
if fn is None:
# Write these out so we can document them
# Very basics
self.snum = None #: int number of samples per chirp
self.data = None #: np.ndarray(bnum, cnum, snum) of the actual return amplitude
self.data2 = None #: np.ndarray(bnum, cnum, snum) of the actual return amplitude
self.dt = None #: float, The spacing between samples in travel time, in seconds
# Per-trace attributes
#: np.ndarray(tnum,) of the acquisition time of each trace
#: note that this is referenced to Jan 1, 0 CE (matlabe datenum)
#: for convenience, use the `datetime` attribute to access a python version of the day
self.decday = None
self.decday2 = None
#: np.ndarray(tnum,) latitude along the profile. Generally not in projected coordinates
self.lat = None
self.lat2 = None
#: np.ndarray(tnum,) longitude along the profile. Generally not in projected coords.
self.long = None
self.long2 = None
# Sample-wise attributes
#: np.ndarray(snum,) The range for each sample, in m
self.range = None
#: np.ndarray(tnum,) Optional. Projected x-coordinate along the profile.
self.x_coord = None
self.x_coord2 = None
#: np.ndarray(tnum,) Optional. Projected y-coordinate along the profile.
self.y_coord = None
self.y_coord2 = None
#: np.ndarray(tnum,) Optional. Elevation along the profile.
self.elev = None
self.elev2 = None
# Differencing attributes
self.ds = None
self.co = None
self.w = None
# Special attributes
#: impdar.lib.RadarFlags object containing information about the processing steps done.
self.flags = TimeDiffFlags()
self.header = ApresHeader()
self.data_dtype = None
return
if os.path.splitext(fn)[1] == '.h5':
with h5py.File(fn, 'r') as fin:
grp = fin['dat']
for attr in grp.keys():
if attr in ['TimeDiffFlags', 'ApresHeader']:
continue
val = grp[attr][:]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
for attr in grp.attrs.keys():
val = grp.attrs[attr]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
self.flags = TimeDiffFlags()
self.header = ApresHeader()
self.flags.read_h5(grp)
self.header.read_h5(grp)
elif os.path.splitext(fn)[1] == '.mat':
mat = loadmat(fn)
for attr in self.attrs_guaranteed:
if mat[attr].shape == (1, 1):
setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
setattr(self, attr, mat[attr].flatten())
else:
setattr(self, attr, mat[attr])
# We may have some additional variables
for attr in self.attrs_optional:
if attr in mat:
if mat[attr].shape == (1, 1):
setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
setattr(self, attr, mat[attr].flatten())
else:
setattr(self, attr, mat[attr])
else:
setattr(self, attr, None)
self.data_dtype = self.data.dtype
self.flags = TimeDiffFlags()
self.flags.from_matlab(mat['flags'])
self.header = ApresHeader()
self.header.from_matlab(mat['header'])
else:
raise ImportError('ApresTimeDiff() is looking for an .h5 or .mat file \
saved as an Apdar object.')
self.fn = fn
self.check_attrs()
def check_attrs(self):
"""Check if required attributes exist.
This is largely for development only; loaders should generally call this method last,
so that they can confirm that they have defined the necessary attributes.
Raises
------
ImpdarError
If any required attribute is None or any optional attribute is fully absent"""
for attr in self.attrs_guaranteed:
if not hasattr(self, attr):
raise ImpdarError('{:s} is missing. \
It appears that this is an ill-defined ApresTimeDiff object'.format(attr))
if getattr(self, attr) is None:
raise ImpdarError('{:s} is None. \
It appears that this is an ill-defined ApresTimeDiff object'.format(attr))
if not hasattr(self, 'data_dtype') or self.data_dtype is None:
self.data_dtype = self.data.dtype
return
# ------------------------------------------------
class ApresQuadPol(object):
"""
A class that holds the relevant information for a quad-polarized ApRES acquisition.
"""
#: Attributes that every ApresQuadPol object should have and should not be None.
attrs_guaranteed = ['data',
'shh',
'shv',
'svh',
'svv',
'range',
'decday',
'dt',
'snum',
'travel_time']
#: Optional attributes that may be None without affecting processing.
#: These may not have existed in old StoDeep files that we are compatible with,
#: and they often cannot be set at the initial data load.
#: If they exist, they all have units of meters.
attrs_optional = ['lat',
'long',
'x_coord',
'y_coord',
'elev',
'ant_sep',
'ant_azi',
'thetas',
'HH',
'HV',
'VH',
'VV',
'chhvv',
'dphi_dz',
'cpe',
'cpe_idxs',
'chhvv_cpe',
'dphi_dz_cpe',
'phi']
from ._QuadPolProcessing import rotational_transform, coherence2d, phase_gradient2d, find_cpe
from ._ApresDataSaving import save
# Now make some load/save methods that will work with the matlab format
def __init__(self, fn):
if fn is None:
# Write these out so we can document them
# Very basics
self.data = None #: np.ndarray(snum) of the actual return amplitude; copy of shh so that this class has a data array
self.snum = None #: int, number of samples per chirp
self.dt = None #: float, The spacing between samples in travel time, in seconds
# Sample-wise attributes
#: np.ndarray(snum,)
self.shh = None #: returned amplitude for hh polarization
self.shv = None #: returned amplitude for hv polarization
self.svh = None #: returned amplitude for vh polarization
self.svv = None #: returned amplitude for vv polarization
self.travel_time = None #: The two way travel time to each sample, in us
# Float attributes relative to the time and location of the acquisition
#: note that decimal days are referenced to Jan 1, 0 CE (matlabe datenum)
#: for convenience, use the `datetime` attribute to access a python version of the day
self.decday = None #: acquisition time in days
self.lat = None #: latitude along the profile. Generally not in projected coordinates
self.long = None #: longitude along the profile. Generally not in projected coordinates
self.x_coord = None #: Optional. Projected x-coordinate along the profile.
self.y_coord = None #: Optional. Projected y-coordinate along the profile.
self.elev = None #: Optional. Elevation along the profile.
# Special attributes
#: impdar.lib.RadarFlags object containing information about the processing steps done.
self.flags = QuadPolFlags()
self.data_dtype = None
return
# Load from a file that has already been initialized in ImpDAR
if os.path.splitext(fn)[1] == '.h5':
with h5py.File(fn, 'r') as fin:
grp = fin['dat']
for attr in grp.keys():
if attr in ['QuadPolFlags']:
continue
val = grp[attr][:]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
for attr in grp.attrs.keys():
val = grp.attrs[attr]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
self.flags = QuadPolFlags()
self.flags.read_h5(grp)
else:
mat = loadmat(fn)
for attr in self.attrs_guaranteed:
if mat[attr].shape == (1, 1):
setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
setattr(self, attr, mat[attr].flatten())
else:
setattr(self, attr, mat[attr])
# We may have some additional variables
for attr in self.attrs_optional:
if attr in mat:
if mat[attr].shape == (1, 1):
setattr(self, attr, mat[attr][0][0])
elif mat[attr].shape[0] == 1 or mat[attr].shape[1] == 1:
setattr(self, attr, mat[attr].flatten())
else:
setattr(self, attr, mat[attr])
else:
setattr(self, attr, None)
self.data_dtype = self.shh.dtype
self.flags = QuadPolFlags()
self.flags.from_matlab(mat['flags'])
self.header = ApresHeader()
self.header.from_matlab(mat['header'])
self.fn = fn
self.check_attrs()
def check_attrs(self):
"""Check if required attributes exist.
This is largely for development only; loaders should generally call this method last,
so that they can confirm that they have defined the necessary attributes.
Raises
------
ImpdarError
If any required attribute is None or any optional attribute is fully absent"""
for attr in self.attrs_guaranteed:
if not hasattr(self, attr):
raise ImpdarError('{:s} is missing. \
It appears that this is an ill-defined ApresQuadPol object'.format(attr))
if getattr(self, attr) is None:
raise ImpdarError('{:s} is None. \
It appears that this is an ill-defined ApresQuadPol object'.format(attr))
if not hasattr(self, 'data_dtype') or self.data_dtype is None:
self.data_dtype = self.shh.dtype
return

@@ -195,0 +543,0 @@

@@ -23,3 +23,4 @@ #! /usr/bin/env python

def apres_range(self,p,max_range=4000,winfun='blackman'):
def apres_range(self, p, max_range=4000, winfun='blackman'):
"""

@@ -34,17 +35,8 @@ Range conversion.

pad factor, level of interpolation for fft
max_range: float
cut off after some maximum range
winfun: str
window function for fft
Output
--------
Rcoarse: array
range to bin centres (m)
Rfine: array
range to reflector from bin centre (m)
spec_cor: array
spectrum corrected. positive frequency half of spectrum with
ref phase subtracted. This is the complex signal which can be used for
cross-correlating two shot segements.
### Original Matlab File Notes ###

@@ -71,3 +63,3 @@ Phase sensitive processing of FMCW radar data based on Brennan et al. 2013

# window for fft
if winfun not in ['blackman','bartlett','hamming','hanning','kaiser']:
if winfun not in ['blackman', 'bartlett', 'hamming', 'hanning', 'kaiser']:
raise TypeError('Window must be in: blackman, bartlett, hamming, hanning, kaiser')

@@ -93,3 +85,3 @@ elif winfun == 'blackman':

# Brennan et al. (2014) eq. 17 measured at t=T/2
self.phiref = 2.*np.pi*self.header.fc*tau -(self.header.chirp_grad*tau**2.)/2
self.phiref = 2.*np.pi*self.header.fc*tau - (self.header.chirp_grad*tau**2.)/2

@@ -99,4 +91,4 @@ # --- Loop through for each chirp in burst --- #

# pre-allocate
spec = np.zeros((self.bnum,self.cnum,nf)).astype(np.cdouble)
spec_cor = np.zeros((self.bnum,self.cnum,nf)).astype(np.cdouble)
spec = np.zeros((self.bnum, self.cnum, nf)).astype(np.cdouble)
spec_cor = np.zeros((self.bnum, self.cnum, nf)).astype(np.cdouble)

@@ -106,14 +98,14 @@ for ib in range(self.bnum):

# isolate the chirp and preprocess before transform
chirp = self.data[ib,ic,:].copy()
chirp = chirp-np.mean(chirp) # de-mean
chirp *= win # windowed
chirp = self.data[ib, ic, :].copy()
chirp = chirp-np.mean(chirp) # de-mean
chirp *= win # windowed
# fourier transform
fft_chirp = (np.sqrt(2.*p)/len(chirp))*np.fft.fft(chirp,p*self.snum) # fft and scale for padding
fft_chirp /= np.sqrt(np.mean(win**2.)) # scale with rms of window
fft_chirp = (np.sqrt(2.*p)/len(chirp))*np.fft.fft(chirp, p*self.snum) # fft and scale for padding
fft_chirp /= np.sqrt(np.mean(win**2.)) # scale with rms of window
# output
spec[ib,ic,:] = fft_chirp[:nf] # positive frequency half of spectrum up to (nyquist minus deltaf)
comp = np.exp(-1j*(self.phiref)) # unit phasor with conjugate of phiref phase
spec_cor[ib,ic,:] = comp*fft_chirp[:nf] # positive frequency half of spectrum with ref phase subtracted
spec[ib, ic, :] = fft_chirp[:nf] # positive frequency half of spectrum up to (nyquist minus deltaf)
comp = np.exp(-1j*(self.phiref)) # unit phasor with conjugate of phiref phase
spec_cor[ib, ic, :] = comp*fft_chirp[:nf] # positive frequency half of spectrum with ref phase subtracted

@@ -125,12 +117,12 @@ self.data = spec_cor.copy()

# precise range measurement
self.Rfine = phase2range(np.angle(self.data),self.header.lambdac,
np.tile(self.Rcoarse,(self.bnum,self.cnum,1)),
self.header.chirp_grad,self.header.ci)
self.Rfine = phase2range(self, np.angle(self.data), self.header.lambdac,
np.tile(self.Rcoarse, (self.bnum, self.cnum, 1)),
self.header.chirp_grad, self.header.ci)
# Crop output variables to useful depth range only
n = np.argmin(self.Rcoarse<=max_range)
n = np.argmin(self.Rcoarse <= max_range)
self.Rcoarse = self.Rcoarse[:n]
self.Rfine = self.Rfine[:,:,:n]
self.data = self.data[:,:,:n]
self.spec = self.spec[:,:,:n]
self.Rfine = self.Rfine[:n]
self.data = self.data[:, :, :n]
self.spec = self.spec[:, :, :n]
self.snum = n

@@ -141,3 +133,3 @@

def phase_uncertainty(self,bed_range):
def phase_uncertainty(self, bed_range):
"""

@@ -152,9 +144,4 @@ Calculate the phase uncertainty using a noise phasor.

ApresData object
Returns
--------
phase_uncertainty: array
uncertainty in the phase (rad)
r_uncertainty: array
uncertainty in the range (m) calculated from phase uncertainty
bed_range: float
Range to the ice-bed interface, so noise floor can be calculated beneath it
"""

@@ -166,4 +153,4 @@

# Get measured phasor from the data class, and use the median magnitude for noise phasor
meas_phasor = self.data
median_mag = np.nanmedian(abs(meas_phasor[:,:,np.argwhere(self.Rcoarse>bed_range)]))
meas_phasor = np.squeeze(self.data)
median_mag = np.nanmedian(abs(meas_phasor[np.argwhere(self.Rcoarse>bed_range)]))
# Noise phasor with random phase and magnitude equal to median of measured phasor

@@ -174,14 +161,8 @@ noise_phase = np.random.uniform(-np.pi,np.pi,np.shape(meas_phasor))

# Phase uncertainty is the deviation in the phase introduced by the noise phasor when it is oriented perpendicular to the reflector phasor
phase_uncertainty = np.abs(np.arcsin(noise_orth/np.abs(meas_phasor)))
# Convert phase to range
r_uncertainty = phase2range(phase_uncertainty,
self.header.lambdac,
self.Rcoarse,
self.header.chirp_grad,
self.header.ci)
self.uncertainty = np.abs(np.arcsin(noise_orth/np.abs(meas_phasor)))
return phase_uncertainty, r_uncertainty
self.flags.uncertainty = True
def phase2range(phi,lambdac,rc=None,K=None,ci=None):
def phase2range(self, phi, lambdac=None, rc=None, K=None, ci=None):
"""

@@ -192,4 +173,2 @@ Convert phase difference to range for FMCW radar

---------
phi: float or array
phase (radians), must be of spectrum after bin center correction
lambdac: float

@@ -204,6 +183,2 @@ wavelength (m) at center frequency

Returns
--------
r: float or array
range (m)

@@ -215,2 +190,5 @@ ### Original Matlab File Notes ###

if lambdac is None:
lambdac = self.header.lambdac
if not all([K,ci]) or rc is None:

@@ -223,89 +201,10 @@ # First order method

r = phi/((4.*np.pi/lambdac) - (4.*rc*K/ci**2.))
return r
def range_diff(self,acq1,acq2,win,step,Rcoarse=None,r_uncertainty=None,uncertainty='CR'):
def stacking(self, num_chirps=None):
"""
Calculate the vertical motion using a correlation coefficient.
Parameters
---------
self: class
ApresData object
acq1: array
first acquisition for comparison
acq2: array
second acquisition for comparison
win: int
window size over which to do the correlation coefficient calculation
step: int
step size for the window to move between calculations
Rcoarse: array; optional
if an external depth array is desired, input here
r_uncertainty: array; optional
if unceratinty based on the noise vector is desired input value here
this should be the sum of uncertainty from both acquisitions.
uncertainty: string;
default 'CR' Cramer-Rao bound as in Jordan et al. (2020)
Returns
--------
ds: array
depths at which the correlation coefficient is calculated
co: array
correlation coefficient between acquisitions
amplitude indicates how well reflection packets match between acquisitions
phase is a measure of the vertical motion
range_diff: array
vertical motion in meters
range_diff_unc: array
uncertainty of vertical motion in meters
"""
if np.shape(acq1) != np.shape(acq2):
raise TypeError('Acquisition inputs must be of the same shape.')
idxs = np.arange(win//2,(len(acq1)-win//2),step)
if Rcoarse is not None:
ds = Rcoarse[idxs]
else:
ds = self.Rcoarse[idxs]
co = np.empty_like(ds).astype(np.complex)
for i,idx in enumerate(idxs):
# index two sub_arrays to compare
arr1 = acq1[idx-win//2:idx+win//2]
arr2 = acq2[idx-win//2:idx+win//2]
# correlation coefficient to get the motion
# the amplitude indicates how well the reflections match between acquisitions
# the phase is a measure of the offset
co[i] = np.corrcoef(arr1,arr2)[1,0]
# convert the phase offset to a distance vector
r_diff = phase2range(np.angle(co),
self.header.lambdac,
ds,
self.header.chirp_grad,
self.header.ci)
if uncertainty == 'CR':
# Error from Cramer-Rao bound, Jordan et al. (2020) Ann. Glac. eq. (5)
sigma = (1./abs(co))*np.sqrt((1.-abs(co)**2.)/(2.*win))
# convert the phase offset to a distance vector
r_diff_unc = phase2range(sigma,
self.header.lambdac,
ds,
self.header.chirp_grad,
self.header.ci)
elif uncertainty == 'noise_phasor':
# Uncertainty from Noise Phasor as in Kingslake et al. (2014)
# r_uncertainty should be calculated using the function phase_uncertainty defined in this script
r_diff_unc = np.array([np.nanmean(r_uncertainty[i-win//2:i+win//2]) for i in idxs])
return ds, co, r_diff, r_diff_unc
def stacking(self,num_chirps=None):
"""
Stack traces/chirps together to beat down the noise.
Can stack across bursts if multiple are present.

@@ -339,1 +238,2 @@ Parameters

self.flags.stack = num_chirps

@@ -23,3 +23,3 @@ #! /usr/bin/env python

def save(self, fn):
"""Save the radar data
"""Save the apres data

@@ -44,3 +44,3 @@ Parameters

def save_mat(self, fn):
"""Save the radar data as an ImpDAR .mat file
"""Save the apres data as an ImpDAR .mat file

@@ -98,4 +98,4 @@ Parameters

def save_h5(self, fn):
"""Save the radar data as an h5 file
def save_h5(self, fn, groupname='dat'):
"""Save the apres data as an h5 file

@@ -110,3 +110,3 @@ Parameters

with h5py.File(fn, 'w') as f:
save_as_h5_group(self, f, 'dat')
save_as_h5_group(self, f, groupname=groupname)

@@ -129,2 +129,4 @@

val = getattr(self, attr)
if type(val) is str:
continue
if val is not None:

@@ -136,3 +138,3 @@ if hasattr(val, 'shape') and np.any([s != 1 for s in val.shape]):

else:
dtype = 'f'
dtype = np.dtype('f')
else:

@@ -144,3 +146,3 @@ dtype = val.dtype

else:
grp.attrs[attr] = h5py.Empty('f')
grp.attrs[attr] = h5py.Empty(dtype = np.dtype('f'))
for attr in self.attrs_optional:

@@ -154,3 +156,3 @@ if hasattr(self, attr) and getattr(self, attr) is not None:

else:
dtype = 'f'
dtype = np.dtype('f')
else:

@@ -164,2 +166,4 @@ # override the dtype for data

grp.attrs.create(attr, val)
else:
grp.attrs.create(attr, h5py.Empty(dtype = np.dtype('f')))

@@ -166,0 +170,0 @@ if self.flags is not None:

@@ -23,4 +23,3 @@ #! /usr/bin/env python

----------
batch: bool
Legacy indication of whether we are batch processing. Always False.
file_read_code
range: float

@@ -35,5 +34,6 @@ max range

self.range = 0
self.stack = 1
self.attrs = ['file_read_code', 'range', 'stack']
self.attr_dims = [None, None, None]
self.stack = 0
self.uncertainty = False
self.attrs = ['file_read_code', 'range', 'stack', 'uncertainty']
self.attr_dims = [None, None, None, None]

@@ -81,1 +81,135 @@ def write_h5(self, grp):

setattr(self, attr, np.zeros((attr_dim, )))
# ------------------------------------------------
class TimeDiffFlags():
"""Flags that indicate the processing that has been used on the data.
These are used for figuring out whether different processing steps have been performed. They also contain some information about the input arguments for some (but not all) of the processing steps.
Attributes
----------
file_read_code
phase_diff
unwrap
strain
bed_pick
"""
def __init__(self):
self.file_read_code = None
self.phase_diff = False
self.unwrap = False
self.strain = np.zeros((2,))
self.bed_pick = False
self.attrs = ['file_read_code', 'phase_diff', 'unwrap', 'strain', 'bed_pick']
self.attr_dims = [None, None, None, 2, None]
def write_h5(self, grp):
"""Write to a subgroup in hdf5 file
Parameters
----------
grp: h5py.Group
The group to which the ApresFlags subgroup is written
"""
subgrp = grp.create_group('ApresFlags')
for attr in self.attrs:
val = getattr(self, attr)
if val is None:
subgrp[attr] = h5py.Empty('f')
else:
if hasattr(val, 'dtype'):
val = val.astype('f')
subgrp.attrs[attr] = val
def read_h5(self, grp):
subgrp = grp['ApresFlags']
for attr in subgrp.attrs.keys():
val = subgrp.attrs[attr]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
def to_matlab(self):
"""Convert all associated attributes into a dictionary formatted for use with :func:`scipy.io.savemat`
"""
outmat = {att: (getattr(self, att) if getattr(self, att) is not None else np.NaN) for att in self.attrs}
return outmat
def from_matlab(self, matlab_struct):
"""Associate all values from an incoming .mat file (i.e. a dictionary from :func:`scipy.io.loadmat`) with appropriate attributes
"""
for attr, attr_dim in zip(self.attrs, self.attr_dims):
setattr(self, attr, matlab_struct[attr][0][0][0])
# Use this because matlab inputs may have zeros for flags that
# were lazily appended to be arrays, but we preallocate
if attr_dim is not None and getattr(self, attr).shape[0] == 1:
setattr(self, attr, np.zeros((attr_dim, )))
# ------------------------------------------------
class QuadPolFlags():
"""Flags that indicate the processing that has been used on the data.
These are used for figuring out whether different processing steps have been performed. They also contain some information about the input arguments for some (but not all) of the processing steps.
Attributes
----------
batch: bool
Legacy indication of whether we are batch processing. Always False.
"""
def __init__(self):
self.rotation = np.zeros((2,))
self.coherence = np.zeros((3,))
self.phasegradient = False
self.cpe = True
self.attrs = ['rotation', 'coherence', 'phasegradient', 'cpe']
self.attr_dims = [2, 3, None, None]
def read_h5(self, grp):
subgrp = grp['QuadPolFlags']
for attr in subgrp.attrs.keys():
val = subgrp.attrs[attr]
if isinstance(val, h5py.Empty):
val = None
setattr(self, attr, val)
def write_h5(self, grp):
"""Write to a subgroup in hdf5 file
Parameters
----------
grp: h5py.Group
The group to which the ApresFlags subgroup is written
"""
subgrp = grp.create_group('QuadPolFlags')
for attr in self.attrs:
val = getattr(self, attr)
if val is None:
subgrp[attr] = h5py.Empty('f')
else:
if hasattr(val, 'dtype'):
val = val.astype('f')
subgrp.attrs[attr] = val
def to_matlab(self):
"""Convert all associated attributes into a dictionary formatted for use with :func:`scipy.io.savemat`
"""
outmat = {att: getattr(self, att) for att in self.attrs}
return outmat
def from_matlab(self, matlab_struct):
"""Associate all values from an incoming .mat file (i.e. a dictionary from :func:`scipy.io.loadmat`) with appropriate attributes
"""
for attr, attr_dim in zip(self.attrs, self.attr_dims):
setattr(self, attr, matlab_struct[attr][0][0][0])
# Use this because matlab inputs may have zeros for flags that
# were lazily appended to be arrays, but we preallocate
if attr_dim is not None and getattr(self, attr).shape[0] == 1:
setattr(self, attr, np.zeros((attr_dim, )))

@@ -29,4 +29,4 @@ #! /usr/bin/env python

import re
from ..ImpdarError import ImpdarError
class ApresHeader():

@@ -118,3 +118,3 @@ """

else:
raise TypeError('Unknown file format - check file')
raise ImpdarError('Unknown file format - check file')

@@ -274,4 +274,1 @@ def update_parameters(self,fn_apres=None):

setattr(self, attr, np.zeros((attr_dim, )))
for attr in self.bool_attrs:
setattr(self, attr, True if matlab_struct[attr][0][0][0] == 1 else 0)

@@ -5,3 +5,3 @@ #! /usr/bin/env python

#
# Copyright © 2019 David Lilien <dlilien90@gmail.com>
# Copyright © 2019 Benjamin Hills <bhills@uw.com>
#

@@ -37,2 +37,7 @@ # Distributed under terms of the GNU GPL3 license.

try:
from netCDF4 import Dataset
nc_load = True
except:
nc_load = False

@@ -46,3 +51,7 @@ def load_apres(fns_apres, burst=1, fs=40000, *args, **kwargs):

each loads object to concatenate
burst: int
fs: int
sampling frequency (per second)
Returns

@@ -59,3 +68,3 @@ -------

fn, burst=burst, fs=fs, *args, **kwargs))
except ImpdarError:
except:
Warning('Cannot load file: '+fn)

@@ -65,5 +74,5 @@

out = deepcopy(apres_data[0])
ext = os.path.splitext(fns_apres[0])[1]
if len(apres_data)>1:
if len(apres_data)>1 or ext in ['.DAT','.dat']:
for dat in apres_data[1:]:

@@ -79,12 +88,21 @@ if out.snum != dat.snum:

out.data = np.vstack([[dat.data] for dat in apres_data])
out.chirp_num = np.vstack([[dat.chirp_num] for dat in apres_data])
out.chirp_att = np.vstack([[dat.chirp_att] for dat in apres_data])
out.chirp_time = np.vstack([[dat.chirp_time] for dat in apres_data])
out.temperature1 = np.hstack([dat.temperature1 for dat in apres_data])
out.temperature2 = np.hstack([dat.temperature2 for dat in apres_data])
out.battery_voltage = np.hstack(
[dat.battery_voltage for dat in apres_data])
out.bnum = np.shape(out.data)[0]
out.data = np.vstack([[dat.data] for dat in apres_data])
out.chirp_num = np.vstack([[dat.chirp_num] for dat in apres_data])
out.chirp_att = np.vstack([[dat.chirp_att] for dat in apres_data])
out.chirp_time = np.vstack([[dat.chirp_time] for dat in apres_data])
out.decday = np.hstack([dat.decday for dat in apres_data])
out.time_stamp = np.hstack([dat.time_stamp for dat in apres_data])
out.lat = np.hstack([dat.lat for dat in apres_data])
out.long = np.hstack([dat.long for dat in apres_data])
out.temperature1 = np.hstack([dat.temperature1 for dat in apres_data])
out.temperature2 = np.hstack([dat.temperature2 for dat in apres_data])
out.battery_voltage = np.hstack(
[dat.battery_voltage for dat in apres_data])
out.bnum = np.shape(out.data)[0]
else:
out = deepcopy(apres_data[0])
out.fn = os.path.splitext(fns_apres[0])[0]
return out

@@ -127,3 +145,3 @@

# We can be raw, impdar mat, BAS mat, or impdar h5
# We can be raw, impdar mat, BAS mat, impdar h5, or BAS nc
ext = os.path.splitext(fn_apres)[1]

@@ -148,2 +166,9 @@ if ext == '.mat':

return ApresData(fn_apres)
elif ext == '.nc':
return load_BAS_nc(fn_apres)
elif ext not in ['.dat','.DAT']:
raise ValueError('Expecting a certain filetype; either .mat, .h5, .dat, .DAT, .nc')
else:

@@ -155,18 +180,11 @@ # Load data and reshape array

# Extract just good chirp data from voltage record and rearrange into
# matrix with one chirp per row
# note: you can't just use reshape as we are also cropping the 20K samples
# of sync tone etc which occur after each 40K of chirp.
AttSet = apres_data.header.attenuator1 + 1j * \
apres_data.header.attenuator2 # unique code for attenuator setting
# Extract just good chirp data from voltage record and rearrange into
# matrix with one chirp per row
# note: you can't just use reshape as we are also cropping the 20K samples
# of sync tone etc which occur after each 40K of chirp.
AttSet = apres_data.header.attenuator1 + 1.0j * apres_data.header.attenuator2 # unique code for attenuat
# Add metadata to structure
# Sampling parameters
if apres_data.header.file_format is None:
raise TypeError("File format is 'None', cannot load")
else:
if apres_data.header.file_format != 5:
raise TypeError('Loading functions have only been written for rmb5 data.\
Look back to the original Matlab scripts if you need to implement earlier formats.')
# Sampling parameters
if apres_data.header.file_format is None:
raise TypeError("File format is 'None', cannot load")
else:

@@ -187,6 +205,5 @@ apres_data.header.f1 = apres_data.header.f0 + \

apres_data.chirp_num = np.arange(apres_data.cnum)
apres_data.chirp_att = np.zeros(
(apres_data.cnum)).astype(np.cdouble)
apres_data.chirp_att = np.zeros((apres_data.cnum)).astype(np.cdouble)
apres_data.chirp_time = np.zeros((apres_data.cnum))
apres_data.chirp_interval = 1.6384/(24.*3600.) # in days; apparently this is always the interval?
apres_data.header.chirp_interval = 1.6384/(24.*3600.) # in days; apparently this is always the interval?
for chirp in range(apres_data.cnum):

@@ -196,3 +213,3 @@ data_load[chirp, :] = apres_data.data[start_ind[chirp]: end_ind[chirp]]

apres_data.chirp_att[chirp] = AttSet[chirp//apres_data.cnum]
apres_data.chirp_time[chirp] = apres_data.decday + apres_data.chirp_interval*chirp
apres_data.chirp_time[chirp] = apres_data.decday + apres_data.header.chirp_interval*chirp
apres_data.data = data_load

@@ -208,2 +225,4 @@

apres_data.check_attrs()
return apres_data

@@ -282,4 +301,4 @@

self.snum = int(output[0])
self.n_subbursts = int(output[1])
self.average = int(output[2])
self.header.average = int(output[2])
self.header.n_subbursts = int(output[1])
self.header.n_attenuators = int(output[3])

@@ -296,6 +315,6 @@ self.header.attenuator1 = np.array(output[4].split(',')).astype(int)[

if self.average != 0:
if self.header.average != 0:
self.cnum = 1
else:
self.cnum = self.n_subbursts*len(self.header.tx_ant) *\
self.cnum = self.header.n_subbursts*len(self.header.tx_ant) *\
len(self.header.rx_ant)*self.header.n_attenuators

@@ -317,3 +336,3 @@

if burst_count < burst and burst_pointer <= file_len - max_header_len:
if self.average != 0:
if self.header.average != 0:
burst_pointer += self.cnum*self.snum*4

@@ -328,3 +347,4 @@ else:

# Look for a few different strings and save output
strings = ['Time stamp=', 'Temp1=', 'Temp2=', 'BatteryVoltage=']
strings = ['Time stamp=', 'Latitude=', 'Longitude=',
'Temp1=', 'Temp2=', 'BatteryVoltage=']
output = []

@@ -346,4 +366,3 @@ for i, string in enumerate(strings):

else:
self.time_stamp = np.array([datetime.datetime.strptime(
str_time, '%Y-%m-%d %H:%M:%S') for str_time in output[0]])
self.time_stamp = np.array([datetime.datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S') for str_time in output[0]])
timezero = datetime.datetime(1, 1, 1, 0, 0, 0)

@@ -353,5 +372,7 @@ day_offset = self.time_stamp - timezero

self.temperature1 = np.array(output[1]).astype(float)
self.temperature2 = np.array(output[2]).astype(float)
self.battery_voltage = np.array(output[3]).astype(float)
self.lat = np.array(output[1]).astype(float)
self.long = np.array(output[2]).astype(float)
self.temperature1 = np.array(output[3]).astype(float)
self.temperature2 = np.array(output[4]).astype(float)
self.battery_voltage = np.array(output[5]).astype(float)

@@ -371,8 +392,8 @@ # --- Read in the actual data --- #

self.bnum = burst_count - 1
raise ValueError('Burst {:d} not found in file {:s}'.format(self.bnum, self.header.fn))
raise ImpdarError('Burst {:d} not found in file {:s}'.format(self.bnum, self.header.fn))
else:
if self.average == 2:
if self.header.average == 2:
self.data = np.fromfile(
fid, dtype='uint32', count=self.cnum*self.snum)
elif self.average == 1:
elif self.header.average == 1:
fid.seek(burst_pointer+1)

@@ -392,4 +413,4 @@ self.data = np.fromfile(

if self.average == 2:
self.data /= (self.n_subbursts*self.header.n_attenuators)
if self.header.average == 2:
self.data /= (self.header.n_subbursts*self.header.n_attenuators)

@@ -430,5 +451,16 @@ start_ind = np.transpose(np.arange(0, self.snum*self.cnum, self.snum))

apres_data = ApresData(None)
# Load the header first. Some of these are pre-set, some really loaded
apres_data.header.f0 = mat['vdat'][0]['f0'][0][0][0]
apres_data.header.fs = mat['vdat'][0]['fs'][0][0][0]
apres_data.header.f1 = mat['vdat'][0]['f1'][0][0][0]
apres_data.header.fc = mat['vdat'][0]['fc'][0][0][0]
apres_data.header.attenuator1 = mat['vdat'][0]['Attenuator_1'][0][0][0]
apres_data.header.attenuator2 = mat['vdat'][0]['Attenuator_2'][0][0][0]
apres_data.header.chirp_length = mat['vdat'][0]['T'][0][0][0]
apres_data.header.chirp_grad = mat['vdat'][0]['K'][0][0][0]
apres_data.header.bandwidth = mat['vdat'][0]['B'][0][0][0]
apres_data.header.lambdac = mat['vdat'][0]['lambdac'][0][0][0]
apres_data.header.er = mat['vdat'][0]['er'][0][0][0]
apres_data.header.ci = mat['vdat'][0]['ci'][0][0][0]

@@ -438,6 +470,5 @@ apres_data.snum = mat['vdat'][0]['Nsamples'][0][0][0]

apres_data.bnum = mat['vdat'][0]['Burst'][0][0][0]
apres_data.n_subbursts = mat['vdat'][0]['SubBurstsInBurst'][0][0][0]
apres_data.average = mat['vdat'][0]['Average'][0][0][0]
apres_data.header.n_subbursts = mat['vdat'][0]['SubBurstsInBurst'][0][0][0]
apres_data.header.average = mat['vdat'][0]['Average'][0][0][0]
apres_data.data = mat['vdat'][0]['v'][0].T
apres_data.travel_time = mat['vdat'][0]['t'][0].T

@@ -450,6 +481,153 @@ apres_data.frequencies = mat['vdat'][0]['f'][0].T

apres_data.decday = mat['vdat'][0]['TimeStamp'][0][0][0]
apres_data.header.chirp_interval = chirp_interval
apres_data.chirp_time = apres_data.decday + apres_data.header.chirp_interval * np.arange(0.0, apres_data.cnum, 1.0)
apres_data.chirp_interval = chirp_interval
apres_data.chirp_time = apres_data.decday + apres_data.chirp_interval * np.arange(0.0, apres_data.cnum, 1.0)
apres_data.data = mat['vdat'][0]['vif'][0]
if len(apres_data.data.shape) == 2:
apres_data.data = np.reshape(apres_data.data, (1, apres_data.data.shape[0], apres_data.data.shape[1]))
apres_data.check_attrs()
return apres_data
def load_BAS_nc(fn, fs=40000, chirp_interval=1.6384/(24.*3600.), *args, **kwargs):
"""Load apres data from a netcdf file saved by software from the British Antarctic Survey.
https://github.com/antarctica/bas-apres
Parameters
----------
fn: str
netCDF file name for ApresData
Returns
-------
apres_data: class
ImpDAR data object
"""
if not nc_load:
raise ImportError('Need the netCDF4 library to load nc files.')
apres_data = ApresData(None)
apres_data.bnum = int(0)
with Dataset(fn, 'r') as fin:
# Convert each netCDF group to an ApRESBurst object
# each key is its own burst
apres_data.bnum += 1
if len(fin.groups) > 0:
key = list(fin.groups.keys())[0]
attrs = vars(fin.groups[key]).copy()
apres_data.data = np.array([fin.groups[key].variables['data'][:]])
else:
attrs = vars(fin).copy()
apres_data.data = np.array([fin.variables['data'][:]])
apres_data.header.fs = fs
apres_data.header.fn = fn
apres_data.header.file_format = 'BAS_nc'
apres_data.header.noDwellHigh = int(attrs['NoDwell'])
#apres_data.header.noDwellLow = int(attrs['NoDwell'])
apres_data.header.f0 = float(attrs['StartFreq'])
apres_data.header.f_stop = float(attrs['StopFreq'])
apres_data.header.ramp_up_step = float(attrs['FreqStepUp'])
apres_data.header.ramp_down_step = float(attrs['FreqStepDn'])
apres_data.header.tstep_up = float(attrs['TStepUp'])
apres_data.header.tstep_down = float(attrs['TStepDn'])
apres_data.header.ramp_dir = 'up'
apres_data.header.nsteps_DDS = round(abs((apres_data.header.f_stop - apres_data.header.f0)/apres_data.header.ramp_up_step)) # abs as ramp could be down
apres_data.header.chirp_length = int(apres_data.header.nsteps_DDS * apres_data.header.tstep_up)
apres_data.header.nchirp_samples = round(apres_data.header.chirp_length * apres_data.header.fs)
# If number of ADC samples collected is less than required to collect
# entire chirp, set chirp length to length of series actually collected
apres_data.header.snum = float(attrs['N_ADC_SAMPLES'])
if apres_data.header.nchirp_samples > apres_data.header.snum:
apres_data.header.chirp_length = apres_data.header.snum / apres_data.header.fs
apres_data.header.chirp_grad = 2.*np.pi*(apres_data.header.ramp_up_step/apres_data.header.tstep_up) # chirp gradient (rad/s/s)
if apres_data.header.f_stop > 400e6:
apres_data.header.ramp_dir = 'down'
else:
apres_data.header.ramp_dir = 'up'
apres_data.header.f1 = apres_data.header.f0 + \
apres_data.header.chirp_length * apres_data.header.chirp_grad/2./np.pi
apres_data.header.bandwidth = apres_data.header.chirp_length * \
apres_data.header.chirp_grad/2/np.pi
apres_data.header.fc = apres_data.header.f0 + apres_data.header.bandwidth/2.
apres_data.header.er = 3.18
apres_data.header.ci = 3e8/np.sqrt(apres_data.header.er)
apres_data.header.lambdac = apres_data.header.ci/apres_data.header.fc
apres_data.header.n_attenuators = int(attrs['nAttenuators'])
str_attenuators = attrs['Attenuator1']
apres_data.header.attenuator1 = np.array(str_attenuators.split(',')).astype(int)[
:apres_data.header.n_attenuators]
AFGain = attrs['AFGain']
apres_data.header.attenuator2 = np.array(AFGain.split(',')).astype(int)[
:apres_data.header.n_attenuators]
apres_data.header.tx_ant = attrs['TxAnt']
apres_data.header.rx_ant = attrs['RxAnt']
# Should be the same for every burst
apres_data.header.average = float(attrs['Average'])
apres_data.header.chirp_interval = chirp_interval
apres_data.dt = 1.0 / apres_data.header.fs
apres_data.snum = int(attrs['N_ADC_SAMPLES'])
apres_data.cnum = int(attrs['NSubBursts'])
apres_data.header.n_subbursts = int(attrs['NSubBursts'])
# Should be appended to every burst
apres_data.temperature1 = np.array([float(attrs['Temp1'])])
apres_data.temperature2 = np.array([float(attrs['Temp2'])])
apres_data.battery_voltage = np.array([float(attrs['BatteryVoltage'])])
time_stamp = attrs['Time stamp']
apres_data.time_stamp = np.array([datetime.datetime.strptime(time_stamp, '%Y-%m-%d %H:%M:%S')])
timezero = datetime.datetime(1, 1, 1, 0, 0, 0)
day_offset = apres_data.time_stamp - timezero
apres_data.decday = np.array([offset.days + offset.seconds/86400. for offset in day_offset]) + 366. # Matlab compatable
apres_data.chirp_time = apres_data.decday + apres_data.header.chirp_interval * np.arange(0.0, apres_data.cnum, 1.0)
AttSet = apres_data.header.attenuator1 + 1j * \
apres_data.header.attenuator2 # unique code for attenuator setting
apres_data.chirp_att = np.zeros((apres_data.cnum)).astype(np.cdouble)
for chirp in range(apres_data.cnum):
apres_data.chirp_att[chirp] = AttSet[chirp//apres_data.cnum]
apres_data.chirp_num = np.array([np.arange(apres_data.cnum) + 1])
if len(fin.groups) > 1:
for key in fin.groups[1:]:
apres_data.data = np.append(apres_data.data,
[fin.groups[key].variables['data'][:]],
axis=0)
# Append to these data arrays for subsequent bursts
apres_data.temperature1 = np.append(apres_data.temperature1, float(attrs['Temp1']))
apres_data.temperature2 = np.append(apres_data.temperature2, float(attrs['Temp2']))
apres_data.battery_voltage = np.append(apres_data.battery_voltage, float(attrs['BatteryVoltage']))
time_stamp = attrs['Time stamp']
ts = datetime.datetime.strptime(time_stamp, '%Y-%m-%d %H:%M:%S')
apres_data.time_stamp = np.append(apres_data.time_stamp,ts)
timezero = datetime.datetime(1, 1, 1, 0, 0, 0)
day_offset = ts - timezero
decday = day_offset.days + day_offset.seconds/86400. + 366.
apres_data.decday = np.append(apres_data.decday, decday)
apres_data.chirp_time = np.append(apres_data.chirp_time,
decday + apres_data.header.chirp_interval * np.arange(0.0, apres_data.cnum, 1.0),
axis=0)
apres_data.chirp_num = np.append(apres_data.chirp_num,
[np.arange(apres_data.cnum) + 1],
axis=0)
apres_data.travel_time = apres_data.dt * np.arange(apres_data.snum)
apres_data.frequencies = apres_data.header.f0 + apres_data.travel_time * apres_data.header.chirp_grad/(2.*np.pi)
apres_data.travel_time *= 1.0e6
apres_data.data_dtype = apres_data.data.dtype
apres_data.check_attrs()
return apres_data

@@ -17,3 +17,3 @@ #! /usr/bin/env python

from . import load_gssi, load_pulse_ekko, load_gprMax, load_olaf, load_segy, load_UoA
from . import load_delores, load_osu, load_stomat, load_ramac, load_bsi, load_tek
from . import load_delores, load_osu, load_stomat, load_ramac, load_bsi, load_tek, load_apres_profile
from ..RadarData import RadarData

@@ -26,3 +26,3 @@

'mcords_nc', 'UoA_mat', 'UoA_h5', 'ramac', 'bsi', 'delores', 'osu',
'ramac', 'tek']
'ramac', 'tek', 'apres']

@@ -139,2 +139,4 @@

dat = [load_tek.load_tek(fn) for fn in fns_in]
elif filetype == 'apres':
dat = [load_apres_profile.load_apres_profile([fn]) for fn in fns_in]
else:

@@ -141,0 +143,0 @@ raise ValueError('Unrecognized filetype')

@@ -52,4 +52,5 @@ #! /usr/bin/env python

def load_bsi(fn_h5, nans=None, *args, **kwargs):
def load_bsi(fn_h5, XIPR=True, channel=0., line=None, nans=None, *args, **kwargs):
"""Load a BSI IceRadar file, which is just an h5 file, into ImpDAR."""
if not H5:

@@ -67,2 +68,4 @@ raise ImportError('You need H5 to load bsi')

for dset_name in dset_names:
if line != None and dset_name != 'line_' + str(line):
continue
print('Loading {:s} from {:s}'.format(dset_name, fn_h5))

@@ -86,11 +89,38 @@ # Just in case there is something else that can be here

if type(dset['location_0']['datacapture_0']['echogram_0'].attrs['Digitizer-MetaData_xml']) == str:
digitizer_data = dset['location_0']['datacapture_0'][
'echogram_0'].attrs['Digitizer-MetaData_xml']
if not XIPR:
dig_meta_str = 'Digitizer-MetaData_xml'
gps_cluster_str = 'GPS Cluster- MetaData_xml'
gps_fix_str = 'GPS Fix valid'
gps_message_str = 'GPS Message ok'
sample_rate_str = ' Sample Rate'
trigger_level_str = 'trigger level'
gps_timestamp_str = 'GPS_timestamp_UTC'
alt_asl = 'Alt_asl_m'
ch = '0'
h5_data.chan = 0
elif XIPR:
dig_meta_str = 'DigitizerMetaData_xml'
gps_cluster_str = 'GPSData_xml'
gps_fix_str = 'GPSFixValid'
gps_message_str = 'GPSMessageOk'
sample_rate_str = 'SampleRate'
trigger_level_str = 'TriggerLevel'
gps_timestamp_str = 'GPSTimestamp_UTC'
alt_asl = 'Alt_ASL_m'
if channel == 0 or channel == 'unamped':
ch = '0'
h5_data.chan = 0
if channel == 1 or channel == 'amped':
ch = '1'
h5_data.chan = 1
if type(dset['location_0']['datacapture_'+ch]['echogram_'+ch].attrs[dig_meta_str]) == str:
digitizer_data = dset['location_0']['datacapture_'+ch][
'echogram_'+ch].attrs[dig_meta_str]
else:
digitizer_data = dset['location_0']['datacapture_0'][
'echogram_0'].attrs['Digitizer-MetaData_xml'].decode('utf-8')
digitizer_data = dset['location_0']['datacapture_'+ch][
'echogram_'+ch].attrs[dig_meta_str].decode('utf-8')
for location_num in range(h5_data.tnum):
# apparently settings can change mid-line
nsamps = dset['location_{:d}'.format(location_num)]['datacapture_0']['echogram_0'].shape[0]
nsamps = dset['location_{:d}'.format(location_num)]['datacapture_'+ch]['echogram_'+ch].shape[0]
if nsamps > h5_data.snum:

@@ -101,24 +131,24 @@ h5_data.data = np.vstack((h5_data.data, np.zeros((nsamps - h5_data.snum, h5_data.tnum))))

'location_{:d}'.format(location_num)][
'datacapture_0']['echogram_0']
'datacapture_'+ch]['echogram_'+ch]
if type(dset['location_{:d}'.format(location_num)][
'datacapture_0']['echogram_0'].attrs[
'GPS Cluster- MetaData_xml']) == str:
'datacapture_'+ch]['echogram_'+ch].attrs[
gps_cluster_str]) == str:
gps_data = dset['location_{:d}'.format(location_num)][
'datacapture_0']['echogram_0'].attrs[
'GPS Cluster- MetaData_xml']
'datacapture_'+ch]['echogram_'+ch].attrs[
gps_cluster_str]
else:
gps_data = dset['location_{:d}'.format(location_num)][
'datacapture_0']['echogram_0'].attrs[
'GPS Cluster- MetaData_xml'].decode('utf-8')
if (float(_xmlGetVal(gps_data, 'GPS Fix valid')) > 0) and (
float(_xmlGetVal(gps_data, 'GPS Message ok')) > 0):
'datacapture_'+ch]['echogram_'+ch].attrs[
gps_cluster_str].decode('utf-8')
if (float(_xmlGetVal(gps_data, gps_fix_str)) > 0) and (
float(_xmlGetVal(gps_data, gps_message_str)) > 0):
# sometimes, there are bad entries that are unmarked
try:
lat[location_num] = float(_xmlGetVal(gps_data, 'Lat_N'))
lat[location_num] = float(_xmlGetVal(gps_data, 'Lat'))
lon[location_num] = float(
_xmlGetVal(gps_data, 'Long_ W'))
_xmlGetVal(gps_data, 'Long'))
time[location_num] = float(
_xmlGetVal(gps_data, 'GPS_timestamp_UTC'))
_xmlGetVal(gps_data, gps_timestamp_str))
h5_data.elev[location_num] = float(
_xmlGetVal(gps_data, 'Alt_asl_m'))
_xmlGetVal(gps_data, alt_asl))
except:

@@ -136,3 +166,3 @@ lat[location_num] = np.nan

h5_data.dt = 1.0 / float(
_xmlGetVal(digitizer_data, ' Sample Rate'))
_xmlGetVal(digitizer_data, sample_rate_str))
h5_data.travel_time = np.arange(h5_data.snum) * h5_data.dt * 1.0e6

@@ -143,3 +173,3 @@

h5_data.trig_level = float(
_xmlGetVal(digitizer_data, 'trigger level'))
_xmlGetVal(digitizer_data, trigger_level_str))
time_offset = float(_xmlGetVal(digitizer_data, 'relativeInitialX'))

@@ -181,3 +211,3 @@ h5_data.travel_time = h5_data.travel_time + time_offset * 1.0e6

h5_data.lat = _dm2dec(lat)
h5_data.long = -_dm2dec(lon)
h5_data.long = np.sign(lon) * _dm2dec(abs(lon))
h5_data.trace_num = np.arange(h5_data.tnum).astype(int) + 1

@@ -189,3 +219,12 @@

# need to access a comment to get the day
day_collection = _dt_from_comment(dset)
try:
day_collection = _dt_from_comment(dset)
except:
c_timestamp = dset['location_0'].attrs['CreationTimestamp']
if not isinstance(c_timestamp, str):
c_timestamp = c_timestamp.decode('utf-8')
c_timestamp = c_timestamp[:c_timestamp.find(' ')]
dmy = list(map(int, c_timestamp.split('/')))
day_collection = datetime.datetime(dmy[2], dmy[1], dmy[0], 0, 0, 0)
day_offset = (day_collection - datetime.datetime(1, 1, 1, 0, 0, 0)

@@ -195,11 +234,14 @@ ).days

try:
h5_data.get_projected_coords()
except ImportError:
temp_x = h5_data.long * 110. * np.cos(h5_data.lat)
temp_y = 110. * h5_data.lat
h5_data.dist = np.hstack((
[0], np.cumsum(np.sqrt(
np.diff(temp_x) ** 2.0 + np.diff(temp_y) ** 2.0))))
print('Geographic coordinate conversion skipped: no ogr')
if np.any(np.isfinite(h5_data.lat)):
try:
h5_data.get_projected_coords()
except ImportError:
temp_x = h5_data.long * 110. * np.cos(h5_data.lat)
temp_y = 110. * h5_data.lat
h5_data.dist = np.hstack((
[0], np.cumsum(np.sqrt(
np.diff(temp_x) ** 2.0 + np.diff(temp_y) ** 2.0))))
print('Geographic coordinate conversion skipped: no ogr')
else:
h5_data.dist = np.zeros(h5_data.tnum)

@@ -211,5 +253,5 @@ h5_data.trace_int = np.hstack(

h5_data.chan = 0
h5_data.check_attrs()
h5_data_list.append(h5_data)
return h5_data_list

@@ -204,2 +204,3 @@ #! /usr/bin/env python

doy = (int(line[6:10]), int(line[:2]), int(line[3:5]))
day_offset = datetime.datetime(doy[0], doy[1], doy[2], 0, 0, 0)

@@ -253,9 +254,9 @@ if pe_data.version == '1.0':

pe_data.elev = pe_data.gps_data.z
day_offset = datetime.datetime(doy[0], doy[1], doy[2], 0, 0, 0)
pe_data.trace_int = np.hstack((np.array(np.nanmean(
np.diff(pe_data.dist))), np.diff(pe_data.dist)))
tmin = day_offset.toordinal() + np.min(pe_data.gps_data.dectime) + 366.
tmax = day_offset.toordinal() + np.max(pe_data.gps_data.dectime) + 366.
# 366 for matlab compat
tmax = day_offset.toordinal() + np.max(pe_data.gps_data.dectime) + 366. # 366 for matlab compatability
pe_data.decday = np.linspace(tmin, tmax, pe_data.tnum)
pe_data.trace_int = np.hstack((np.array(np.nanmean(
np.diff(pe_data.dist))), np.diff(pe_data.dist)))
else:

@@ -269,6 +270,8 @@ print('Warning: Cannot find gps file, %s.' % gps_fn)

pe_data.elev = np.zeros((pe_data.data.shape[1],))
pe_data.decday = np.arange(pe_data.data.shape[1])
pe_data.trace_int = np.ones((pe_data.data.shape[1],))
seconds_of_day = pe_data.traceheaders.time_of_day.flatten()
pe_data.decday = day_offset.toordinal() + 366. + seconds_of_day/60./60./24.
pe_data.check_attrs()
return pe_data

@@ -25,4 +25,4 @@ #! /usr/bin/env python

"""Equivalent to Matlab fread function"""
if dtype is np.str:
dt = np.uint8 # WARNING: assuming 8-bit ASCII for np.str!
if dtype is str:
dt = np.uint8 # WARNING: assuming 8-bit ASCII for str!
else:

@@ -29,0 +29,0 @@ dt = dtype

@@ -55,6 +55,6 @@ #! /usr/bin/env python

UoA_data.lat = interp1d(nminfo.ppstime, nminfo.lat, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.long = interp1d(nminfo.ppstime, nminfo.lon, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.elev = interp1d(nminfo.ppstime, nminfo.elev, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.decday = interp1d(nminfo.ppstime, nminfo.time, fill_value='extrapolate')(fin['Data']['POSIX_time'][:].flatten())
UoA_data.lat = interp1d(nminfo.ppstime, nminfo.lat, fill_value='extrapolate')(fin['Data']['POSIX_time'].flatten())
UoA_data.long = interp1d(nminfo.ppstime, nminfo.lon, fill_value='extrapolate')(fin['Data']['POSIX_time'].flatten())
UoA_data.elev = interp1d(nminfo.ppstime, nminfo.elev, fill_value='extrapolate')(fin['Data']['POSIX_time'].flatten())
UoA_data.decday = interp1d(nminfo.ppstime, nminfo.time, fill_value='extrapolate')(fin['Data']['POSIX_time'].flatten())

@@ -147,7 +147,8 @@ try:

UoA_data.lat = interp1d(nminfo.ppstime, nminfo.lat, fill_value='extrapolate')(dt)
UoA_data.long = interp1d(nminfo.ppstime, nminfo.lon, fill_value='extrapolate')(dt)
len_min = np.min([nminfo.ppstime.shape[0], nminfo.lat.shape[0], nminfo.lon.shape[0]])
UoA_data.lat = interp1d(nminfo.ppstime[:len_min], nminfo.lat[:len_min], fill_value='extrapolate')(dt[:len_min])
UoA_data.long = interp1d(nminfo.ppstime[:len_min], nminfo.lon[:len_min], fill_value='extrapolate')(dt[:len_min])
UoA_data.elev = np.zeros_like(UoA_data.lat)
UoA_data.elev[:] = np.nan
UoA_data.decday = interp1d(nminfo.ppstime, nminfo.time, fill_value='extrapolate')(dt)
UoA_data.decday = interp1d(nminfo.ppstime[:len_min], nminfo.time[:len_min], fill_value='extrapolate')(dt[:len_min])

@@ -154,0 +155,0 @@ if 'x' in grp:

@@ -92,3 +92,3 @@ #! /usr/bin/env python

if dat.fn is not None:
fig[0].canvas.set_window_title(dat.fn)
fig[0].canvas.manager.set_window_title(dat.fn)

@@ -729,2 +729,179 @@ if s:

def plot_apres(dat, p=2, s=False, facecolor = 'w', linecolor = 'k', linewidth = 1., linestyle = '-',
ftype = 'png', dpi = 300, *args, **kwargs):
"""Plot power vs depth or twtt in a trace.
Parameters
----------
dat: impdar.lib.ApresData.ApresData
The ApresData object to plot.
ydat: str, optional
The vertical axis units. Either twtt or or depth. Default twtt.
"""
if dat.Rcoarse is None:
fig, axs = plt.subplots(1,2, figsize=(6, 6),facecolor=facecolor)
for ax in axs:
ax.invert_yaxis()
axs[0].plot(dat.data[0,0,:], dat.travel_time, linewidth=linewidth, linestyle=linestyle, c=linecolor)
axs[0].set_ylabel('Two way travel time (usec)')
axs[0].set_xlabel('V')
axs[0].set_title('Amplitude')
nf = int(np.floor(2*dat.snum/2)) # number of frequencies to recover
tau = np.arange(nf)/(dat.header.bandwidth*p)
ϕ_r = 2.*np.pi*dat.header.fc*tau - (dat.header.chirp_grad*tau**2)/2.
axs[1].plot(np.exp(-1j*ϕ_r),dat.travel_time,'.',c=linecolor,ms=linewidth)
axs[1].set_title('Reference Phasor')
else:
fig, axs = plt.subplots(1,3, figsize=(8, 6),facecolor=facecolor)
for ax in axs:
ax.invert_yaxis()
axs[0].plot(dat.data[0,0,:], dat.Rcoarse, linewidth=linewidth, linestyle=linestyle, c=linecolor)
axs[0].set_ylabel('Range (m)')
axs[0].set_xlabel('V')
axs[0].set_title('Amplitude')
axs[1].plot(10.*np.log10(dat.data[0,0,:]**2.), dat.Rcoarse, linewidth=linewidth, linestyle=linestyle, c=linecolor)
axs[1].tick_params(labelleft=False)
axs[1].set_xlabel('dB')
axs[1].set_title('Power')
if dat.uncertainty is not None:
axs[2].plot(dat.uncertainty,dat.Rcoarse, linewidth=linewidth, linestyle=linestyle, c=linecolor)
axs[2].tick_params(labelleft=False)
axs[2].set_xlabel('rad')
axs[2].set_title('Phase Uncertainty')
fig.canvas.manager.set_window_title(dat.fn)
if s:
fig.savefig(os.path.splitext(dat.fn)[0] + '.' + ftype, dpi=dpi)
else:
plt.tight_layout()
plt.show()
def plot_apres_diff(diffdat, s=False, facecolor = 'w',
markercolor = 'k', markercolor2 = 'grey', markersize = 5., markerstyle = '.', linestyle='',
ftype = 'png', dpi = 300, *args, **kwargs):
"""Plot power vs depth or twtt in a trace.
Parameters
----------
diffdat: impdar.lib.ApresData.ApresTimeDiff
The ApresTimeDiff object to plot.
"""
fig, axs = plt.subplots(1,4, figsize=(10, 6),facecolor=facecolor)
for ax in axs:
ax.invert_yaxis()
axs[0].plot(10.*np.log10(diffdat.data**2.), diffdat.range,
marker=markerstyle, ms=markersize, linestyle=linestyle, c=markercolor,
label='acquisition 1')
axs[0].plot(10.*np.log10(diffdat.data**2.), diffdat.range,
marker=markerstyle, ms=markersize//2, linestyle=linestyle, c=markercolor2,
label='acquisition 2')
axs[0].legend()
axs[0].set_ylabel('Range (m)')
axs[0].set_xlabel('dB')
axs[0].set_title('Power')
if diffdat.co is not None:
axs[1].plot(abs(diffdat.co), diffdat.ds,
marker=markerstyle, ms=markersize, c=markercolor, linestyle=linestyle)
axs[1].tick_params(labelleft=False)
axs[1].set_xlabel('')
axs[1].set_title('Coherence')
if diffdat.co is not None:
axs[2].plot(np.angle(diffdat.co), diffdat.ds,
marker=markerstyle, ms=markersize, c=markercolor, linestyle=linestyle)
axs[2].tick_params(labelleft=False)
axs[2].set_xlabel('rad')
axs[2].set_xticks([-np.pi,0,np.pi])
axs[2].set_xticklabels(['-π','0','π'])
axs[2].set_title('Phase Offset')
if diffdat.w is not None:
axs[3].plot(diffdat.w,diffdat.ds,
marker=markerstyle, ms=markersize, c=markercolor, linestyle=linestyle)
axs[3].tick_params(labelleft=False)
axs[3].set_xlabel('m/yr')
axs[3].set_title('Vertical Velocity')
fig.canvas.manager.set_window_title(diffdat.fn)
if s:
fig.savefig(os.path.splitext(diffdat.fn)[0] + '.' + ftype, dpi=dpi)
else:
plt.tight_layout()
plt.show()
def plot_apres_quadpol(qpdat, s=False, facecolor = 'w', tick_color = 'k', fg_color='k',
bed=4000, cmap1='hot', cmap2='Greys', cmap3='twilight_shifted',
ftype = 'png', dpi = 300, *args, **kwargs):
"""Plot relevant data fields for a quadpol data object, including:
1 - co-polarized image
2 - cross-polarized image
3 - co-polarized coherence
4 - phase gradient
Parameters
----------
qpdat: impdar.lib.ApresData.ApresQuadPol
The ApresQuadPol object to plot.
"""
Θs,Ds = np.meshgrid(qpdat.thetas,qpdat.range)
fig, axs = plt.subplots(1,5, figsize=(10, 4),facecolor=facecolor)
axs[0].tick_params(labelleft=True,color=tick_color,labelcolor=tick_color)
cf = axs[0].pcolormesh(Θs,Ds,10.*np.log10(qpdat.HH**2.).real,cmap=cmap1,zorder=-1)
axs[0].set_ylabel('Range (m)',c=tick_color)
axs[1].tick_params(labelleft=False,color=tick_color,labelcolor=tick_color)
axs[1].pcolormesh(Θs,Ds,10.*np.log10(qpdat.HV**2.).real,cmap=cmap1,zorder=-1)
if qpdat.cpe is not None:
axs[1].plot(qpdat.cpe,qpdat.range,'m',zorder=3)
cb = plt.colorbar(cf, ax=axs[0], orientation='horizontal')
cb.set_label('Power (dB)',c=fg_color)
cb = plt.colorbar(cf, ax=axs[1], orientation='horizontal')
cb.set_label('Power (dB)')
axs[2].tick_params(labelleft=False,color=tick_color,labelcolor=tick_color)
if qpdat.chhvv is not None:
cf = axs[2].contourf(Θs,Ds,np.abs(qpdat.chhvv),cmap=cmap2,levels=100,zorder=-1)
cb = plt.colorbar(cf, ax=axs[2], ticks=[0,0.5,1.], orientation='horizontal')
cb.set_label('$|c_{hhvv}|$',c=fg_color)
axs[3].tick_params(labelleft=False,color=tick_color,labelcolor=tick_color)
if qpdat.chhvv is not None:
cf = axs[3].contourf(Θs,Ds,np.angle(qpdat.chhvv),cmap=cmap3,levels=100,zorder=-1)
cb = plt.colorbar(cf, ax=axs[3], ticks=[-np.pi,0,np.pi], orientation='horizontal')
cb.set_label('$\phi_{hhvv}$',c=fg_color)
cb.ax.set_xticklabels(['-π','0','π'],color=fg_color)
for ax in axs[:4]:
ax.fill_between(np.linspace(0,np.pi,10),bed,10000,color='w',alpha=0.8,zorder=1)
ax.axhline(bed,c='k',lw=2,zorder=2)
ax.set_ylim(bed+200,0)
ax.set_xlim(0,np.pi)
ax.set_xticks([0,np.pi/2.,np.pi])
ax.set_xticklabels(['0','π/2','π'],color=tick_color)
axs[4].tick_params(labelleft=False)
if qpdat.chhvv is not None:
axs[4].plot(np.angle(qpdat.chhvv_cpe),Ds[:,0],'k.',ms=2)
axs[4].set_ylim(bed+200,0)
axs[4].set_xlim(-np.pi,np.pi)
axs[4].set_xticks([-np.pi,0.,np.pi])
axs[4].set_xticklabels(['-π','0','π'])
cf = axs[4].scatter(np.angle(qpdat.chhvv_cpe)+5.,Ds[:,0],c=np.zeros_like(Ds[:,0]),alpha=0.)
cb = plt.colorbar(cf, ax=axs[4], shrink=0.6, orientation='horizontal')
cb.ax.xaxis.set_tick_params(color=facecolor)
cb.outline.set_edgecolor(facecolor)
plt.setp(plt.getp(cb.ax.axes, 'xticklabels'), color=facecolor)
fig.canvas.manager.set_window_title(qpdat.fn)
if s:
fig.savefig(os.path.splitext(qpdat.fn)[0] + '.' + ftype, dpi=dpi)
else:
plt.tight_layout()
plt.show()
def get_offset(dat, flatten_layer=None):

@@ -731,0 +908,0 @@ if flatten_layer is None:

@@ -63,3 +63,3 @@ #! /usr/bin/env python

def nmo(self, ant_sep, uice=1.69e8, uair=3.0e8, const_firn_offset=None, rho_profile=None, permittivity_model=firn_permittivity, const_sample=True):
def nmo(self, ant_sep, uice=1.69e8, uair=3.0e8, const_firn_offset=None, rho_profile=None, permittivity_model=firn_permittivity, const_sample=False):
"""Normal move-out correction.

@@ -129,3 +129,3 @@

# Interpolate velocity profile onto constant depth spacing
d_interp = np.linspace(np.min(profile_depth, 0), max(profile_depth), self.snum)
d_interp = np.linspace(np.min(profile_depth, 0), max(profile_depth), 10 * self.snum)
u_interp = interp1d(profile_depth, profile_u)(d_interp)

@@ -138,2 +138,4 @@

if rho_profile is not None:
print('Iterating velocity profile in firn...')
for i,t in enumerate(self.travel_time):

@@ -143,7 +145,13 @@ if rho_profile is None:

else:
# get RMS velocity used for correction
d = minimize(optimize_moveout_depth,.5*t*uice,args=(t,ant_sep,d_interp,u_interp),
tol=1e-8,bounds=[(0,0.5*t*uair)])['x'][0]
u_rms = np.sqrt(np.mean(u_interp[d_interp<d]**2.))
# get the upper leg of the trave_path triangle (direct arrival) from the antenna separation and the rms velocity
### Get RMS velocity used for correction
d = t / 2. * uice * 1.0e-6 # start by guessing
d_last = d.copy()
j, tol = 0, 0.1 * self.dt / 2. * uice
while abs(d-d_last) > tol or j < 5: # Iterate until convergence
d_last = d.copy()
vels = u_interp[d_interp<=d]
u_rms = np.sqrt(np.mean(vels**2.))
d = t / 2. * u_rms * 1.0e-6
j += 1
# get the upper leg of the travel_path triangle (direct arrival) from the antenna separation and the rms velocity
tsep_ice = 1e6*(ant_sep / u_rms)

@@ -155,6 +163,16 @@ # hypotenuese, adjust to 'transmit time' by adding the separation time

# Time vector with original time step
self.travel_time = np.arange(min(self.travel_time),max(nmotime),self.dt*1e6)
self.snum = len(self.travel_time)
# Interpolate onto new time vector
newdata = np.empty((self.snum,self.tnum))
for ti in np.arange(self.tnum):
trace = self.data[:,ti]
f = interp1d(nmotime,trace,kind='linear')
newdata[:,ti] = f(self.travel_time)
self.data = newdata
# --- Cleanup --- #
# save the updated time vector
self.travel_time = nmotime
# time to depth conversion

@@ -171,2 +189,4 @@ if rho_profile is None:

print('Normal Moveout filter complete.')
# Set flags

@@ -181,29 +201,2 @@ try:

def optimize_moveout_depth(d_in, t, ant_sep, profile_depth, profile_u):
"""Optimize depth in the nmo filter.
In the case of variable velocity, we need to iterate on the depth
and rms velocity within this function until it converges.
Parameters
----------
d_in: float
initial guess at the depth
t: float
time
ant_sep: float
antennae separation
profile_depth: array
depths corresponding to input velocity profile
profile_u: array
velocity
"""
args = np.argwhere(profile_depth<=d_in)
if len(args) == 0:
raise ValueError('Profile not shallow enough. Extend to cover top')
vels = profile_u[args][:,0]
u_rms = np.sqrt(np.mean(vels**2.))
return abs(np.sqrt((t/2.*u_rms)**2.-ant_sep**2.)-d_in)
def traveltime_to_depth(self, profile_depth, profile_rho, c=3.0e8, permittivity_model=firn_permittivity):

@@ -470,3 +463,3 @@ """

"""
if isinstance(self.trig, (float, int, np.float, np.int64)):
if isinstance(self.trig, (float, int, float, np.int64)):
gain = self.travel_time[int(self.trig) + 1:] * slope

@@ -473,0 +466,0 @@ self.data[int(self.trig + 1):, :] *= np.atleast_2d(gain).transpose()

Metadata-Version: 2.1
Name: impdar
Version: 1.1.5
Version: 1.1.6
Summary: Scripts for impulse radar

@@ -5,0 +5,0 @@ Home-page: http://github.com/dlilien/impdar

@@ -31,3 +31,4 @@ #! /usr/bin/env python

'impplot=impdar.bin.impplot:main',
'apdar=impdar.bin.apdar:main']
'apdar=impdar.bin.apdar:main',
'qpdar=impdar.bin.qpdar:main']

@@ -39,2 +40,7 @@ ext = '.pyx' if CYTHON else '.c'

"impdar/lib/migrationlib/mig_cython.c"],
include_dirs=[np.get_include()]),
Extension("impdar.lib.ApresData.coherence",
sources=["impdar/lib/ApresData/_coherence"
+ ext,
"impdar/lib/ApresData/coherence.c"],
include_dirs=[np.get_include()])]

@@ -45,3 +51,3 @@ if CYTHON:

version = '1.1.5'
version = '1.1.6'
packages = ['impdar',

@@ -48,0 +54,0 @@ 'impdar.lib',