#!/usr/bin/env python
usage="""\
Takes output of RunPlayback.py and produce some basic statictics
for the web/ app.
Normally RunPlayback.py handles calling this script, but you
can call it manually for a couple reasons:
 1- For manual testing, you can run:
      ./ProcessResults.py --reprocess RX17-1234
    to force processing a single directory.
 2- If you've manually modified XML files in ResultsQueue/ the
    script may not figure out that you made change, so force it with:
      ./ProcessResults.py --force
    to update the web pages.
"""

######################################################################
# Copyright Trimble 2018
######################################################################

if __name__ == "__main__":
    import matplotlib
    # Allow running headless from the command line
    matplotlib.use("agg")

import multiprocessing as mp
if __name__ == "__main__":
    # The default 'fork' method will randomly hang on rare occasions
    mp.set_start_method("spawn")

from mutils import *
from mutils.PosPacConst import *
import xml.etree.ElementTree as ET
import json
import os
from collections import OrderedDict, namedtuple
from concurrent.futures import ProcessPoolExecutor
import subprocess
import shutil
import glob
import re
import time
import shlex
import smtplib
from email.message import EmailMessage
from overpasses import find_overpasses, get_rcvr_err
from treedata import find_tree
import sys
from collections import defaultdict
from numba import njit
from ProcessResultsStorage import *
from ProcessResultsSlips import run_SNPC_slip_check, analyze_slip_data
from ProcessResultsRange import analyze_range_resids
import TimeDebugger as TD

# where to send email in case of fatal error:
def send_email( subject, body ):
    to_email = ["will_lentz@trimble.com", "stuart_riley@trimble.com", "rui_xu@trimble.com"]
    # to_email = ["rui_xu@trimble.com"]
    msg = EmailMessage()
    msg['Subject'] = subject
    msg['From'] = "Spirent_RF_playback@trimble.com"
    msg['To'] = ', '.join(to_email)
    msg.set_content( body )
    with smtplib.SMTP('smtp.trimble.com') as smtp:
        smtp.send_message(msg)


# Where does processing output go?
output_dir = "OutputResults"

# where is NoPI?
run_dir = os.path.abspath(os.path.curdir)
nopi_dir = run_dir + "/../NoPi/"
nopi_exe = "NoPi" # should be in path
nopi_di  = run_dir + "/../NoPi_Display/NoPiDI_Main.py"

# For position statistics, what CDF percentages to calculate?
cdf_vals = [50,68,95,99,100]

def default_scenario():
    d = dotdict({})
    d.config_filename = ""
    d.desc = ""
    d.type = None
    d.rm_iono = False
    d.truth_format = None
    d.NoPI_base = None
    d.NoPI_ini = None
    d.truth_file = None
    d.truth = None
    d.span = []
    d.overpass_auto_type = None
    d.tree_auto_type = None
    return d

def default_stats():
    """self.seg[segment_name][fixtype][val]:
    segment_name = All | Highway | etc.
    fixtype = 0 | 4 | etc.  (same as in rec 29)
    val = 2d | 3d | cdf_vals | len
    """
    d = dotdict({})
    d.nopi = {}
    d.seg = {}
    return d

def do_NoPI( config, data_dir, out_nopi_path, stats ):
    """Inputs: config = default_scenario() = info from scenario XML file
       data_dir = where are T04 receiver files?
       out_nopi_path = directory to put NoPI output in?
    Output: stats = default_stats() = will fill in stats.nopi
    Run NoPi and NoPi_Display on the given receiver files
    """
    if out_nopi_path.find(output_dir + '/RX') != 0:
        raise RuntimeError("Suspicious NoPI output directory",out_nopi_path)

    if not os.path.isdir(out_nopi_path):
        # Copy files into place for NoPI
        os.mkdir(out_nopi_path)
        shutil.copy( nopi_dir+'/NoPi_Default.cfg', out_nopi_path )
        shutil.copyfile( config.NoPI_ini, out_nopi_path+'/piline.ini' )

        # Run NoPI & generate plots
        cmd = "cd %s && %s %s %s/.rcvr.T04" % (out_nopi_path,
                                               nopi_exe,
                                               config.NoPI_base,
                                               os.getcwd() + '/' + data_dir)
        print(cmd)
        subprocess.check_call(cmd,shell=True)
        cmd = "cd %s && %s -p ." % (out_nopi_path, nopi_di)
        print(cmd)
        subprocess.check_call(cmd,shell=True)
        cmd = "cd %s && gzip *.mtb" % (out_nopi_path)
        print(cmd)
        subprocess.check_call(cmd,shell=True)

    num_pos = -1
    car_std_pos = -1
    code_std_pos = -1
    combo_type_pos = -1
    valid_pos = -1
    for line in open(out_nopi_path+'/NoPiDI_Stats_Summary.txt'):
        w = line.rstrip().split()
        if line.startswith('%%'):
            valid_pos = w.index("Valid")-1
            combo_type_pos = w.index("ComboType")-1
            num_pos = w.index("DDCa_#epochs")-1
            car_std_pos = w.index("DDCa_std")-1
            code_std_pos = w.index("DDCd_std")-1
            continue
        if w[valid_pos] != '1':
            continue
        combo_type = w[combo_type_pos]
        stats.nopi[combo_type] = {}
        stats.nopi[combo_type]['num'] = int(w[num_pos])
        stats.nopi[combo_type]['car_std'] = float(w[car_std_pos])
        stats.nopi[combo_type]['code_std'] = float(w[code_std_pos])


def do_cdf_plots( out_path, desc, fixtype, main_pos, cdf2, cdf3, vcdf2, vcdf3 ):
    """Inputs: out_path = what directory to put plots in
    desc = Environment description (e.g., Urban)
    fixtype = 'RTK fixed', etc.
    main_pos = True: data from rec 35:2.  False: data from rec35:16
    cdf2 = (cx,cy) for 2D position error
    cdf3 = (cx,cy) for 3D position error
    vcdf2 = (cx,cy) for 2D velocity error
    vcdf3 = (cx,cy) for 3D velocity error
    Generate some position/velocity error CDF plots
    """
    if main_pos:
        f_prefix = 'Main'
    else:
        f_prefix = 'Stinger'
    f_fixtype = fixtype.replace(' ','_')

    out_path += '/%s_CDFs/%s'%(f_prefix,f_fixtype) # eg Main_CDFs/RTK_Fixed/
    if not os.path.isdir(out_path):
        os.makedirs(out_path)

    if len(cdf2[0]) < 100:
        # Too few points to be meaningful
        return

    for txt,cxcy2,cxcy3 in [('pos',cdf2,cdf3),('vel',vcdf2,vcdf3)]:
        figure()

        cdf50, cdf95 = get_cdf_percents( cxcy2[0], cxcy2[1], [50,95] )
        label = '2D 50%%=%.3f 95%%=%.3f'%(cdf50,cdf95)
        semilogx( cxcy2[0], cxcy2[1]*100, label=label )

        cdf50, cdf95 = get_cdf_percents( cxcy3[0], cxcy3[1], [50,95] )
        label = '3D 50%%=%.3f 95%%=%.3f'%(cdf50,cdf95)
        semilogx( cxcy3[0], cxcy3[1]*100, label=label )

        grid()
        if txt == 'vel':
            xlabel('Velocity Error[m/s]')
        else:
            xlabel('Position Error[m]')
        ylabel('CDF %')
        legend(loc='lower right')
        title('%s %s FixType:%s Desc:%s (# pts %d)'%
              (out_path.split('/')[-1],
               f_prefix,
               fixtype,
               desc,
               len(cxcy2[0])
              ))
        savefig(out_path+'/%s_%s_%s_%s_CDF.png'%
                (f_prefix,
                 f_fixtype,
                 desc.replace(' ','_'),
                 txt
                ),
                dpi=150)
        close()


def do_one_overpass_plot( f_prefix,
                          out_path,
                          d,
                          err_type,
                          overpass_locs,
                          events,
                          rcvr_err ):
    """Inputs: f_prefix = filename prefix (Stinger/Main)
    out_path = directory to put plots in
    d = position data from receiver (from vd2cls)
    err_type = '2D', '3D', 'Hgt'
    overpass_locs = overpass locations (from find_overpasses)
    events = overpass events (from find_overpasses)
    rcvr_err = receiver error near each overpass (from get_rcvr_err)

    Creates range vs. err plots and also sorted text list of errors.
    """
    fig,ax=subplots(2,1,sharex=True)
    max_overpass_errs = []
    all_dist = []
    all_err = []
    for loc,event,(err,i_ev,i_d) in zip(overpass_locs,events, rcvr_err):
        curr_err = err[:,err.k['err'+err_type]]
        i_max = argmax(curr_err)
        max_err = curr_err[i_max]
        curr_d = d[i_d][i_max]
        max_fixtype = curr_d.FIXTYPE
        if err_type=='3D':
            max_sigma = sqrt(curr_d.SigE**2 + curr_d.SigN**2 + curr_d.SigU**2)
        elif err_type=='2D':
            max_sigma = sqrt(curr_d.SigE**2 + curr_d.SigN**2)
        elif err_type=='Hgt':
            max_sigma = curr_d.SigU
        else:
            raise RuntimeError("Unknown err_type",err_type)
        max_overpass_errs.append( (max_err,
                                   max_sigma,
                                   max_fixtype,
                                   loc,
                                   err.TIME[0],
                                   err.TIME[-1]) )
        dist = event[i_ev].RANGE
        dist[event[i_ev].towards!=0] *= -1
        all_dist.append( dist )
        all_err.append( curr_err )
        ax[0].plot( dist, curr_err, alpha=0.5 )
    all_dist = concatenate(all_dist)
    all_err = concatenate(all_err)
    max_overpass_errs = sorted( max_overpass_errs,
                                key=lambda x:x[0],
                                reverse=True )

    # Print list of overpasses sorted by maximum error.  That way
    # people can look at the worst overpass data
    with open(out_path+'/%s_%s_overpasses_sorted.txt'%(f_prefix,err_type),'w') as f_txt:
        f_txt.write('# max_%s_err[m], %s_sigma[m], fixtype, lat[deg], lon[deg], t_start, t_end\n'%(err_type,err_type))
        for max_err,max_sigma,max_fixtype,(lat,lon),t0,t1 in max_overpass_errs:
            f_txt.write('%.3f %.3f %2d %.9f %.9f %.1f %.1f\n'%
                        (max_err,
                         max_sigma,
                         int(max_fixtype),
                         lat,
                         lon,
                         t0,
                         t1))

    ax[0].set_ylabel('per-run %s err[m]'%err_type)
    ax[0].set_title('%s %s overpass %s error'%
                    (out_path.split('/')[-1],
                     f_prefix,
                     err_type
                    ))

    x_bins = r_[-200:200:2]
    y_med = ones(len(x_bins))*nan
    for n in range(len(x_bins)-1):
        i = find( (all_dist>=x_bins[n]) & (all_dist<=x_bins[n+1]) )
        if len(i) > 0:
            y_med[n] = median( all_err[i] )

    ax[1].plot( x_bins, y_med )
    ax[1].set_ylabel('median %s err[m]'%err_type)
    ax[1].set_xlabel('along-track range[m]')
    for a in ax: a.grid()
    tight_layout()

    savefig(out_path+'/%s_%s_overpasses.png'%(f_prefix,err_type),
            dpi=150)
    close()


def gen_overpass_text_time_series( f_out, d, pp, events ):
    """Inputs: f_out = text file to create
    d = position data from receiver (from vd2cls)
    pp = pospac truth data
    events = overpass events (from find_overpasses)

    Creates text file of raw overpass data for further analysis.
    """
    f_out.write("# pass_num time along_trk_range_to_overpass fixtype lat lon hgt pdop dE dN dU sigmaE sigmaN sigmaU true_heading true_roll true_pitch true_sigE true_sigN true_sigU\n")
    for pass_num, overpass in enumerate(events):
        t,i_o,i_d,i_p = intersect(overpass.TIME, d.TIME, pp[:,dPP_TIME])
        dE,dN,dU = llh2enu( d[i_d,d.k.LAT:d.k.LAT+3],
                            overpass[i_o,overpass.k.LAT:overpass.k.LAT+3] )
        dist = overpass[i_o].RANGE
        dist[overpass[i_o].towards!=0] *= -1
        for i in range(len(t)):
            f_out.write("%d %.3f %.3f %d %.9f %.9f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%
                        (pass_num,
                         t[i],
                         dist[i],
                         d.FIXTYPE[i_d[i]],
                         d.LAT[i_d[i]],
                         d.LON[i_d[i]],
                         d.HGT[i_d[i]],
                         d.PDOP[i_d[i]],
                         dE[i],
                         dN[i],
                         dU[i],
                         d.SigE[i_d[i]],
                         d.SigN[i_d[i]],
                         d.SigU[i_d[i]],
                         pp[i_p[i],dPP_heading],
                         pp[i_p[i],dPP_roll],
                         pp[i_p[i],dPP_pitch],
                         pp[i_p[i],dPP_sigE],
                         pp[i_p[i],dPP_sigN],
                         pp[i_p[i],dPP_sigU]
                        ))


# Find indices for t[] that lie between t0[0]->t1[0], t0[1]->t1[1], etc.
@njit
def get_time_range(t,t0,t1):
    i0=-np.ones(len(t0))
    i1=-np.ones(len(t1))
    for i in range(len(t)):
        for j in range(len(t0)):
            if t[i]>=t0[j] and t[i]<t1[j]:
                if i0[j] < 0:
                    i0[j] = i
                elif i1[j] < i+1:
                    i1[j] = i+1
    return (i0,i1)


def bin_measurements( meas, events, el_mask, start=-200,stop=200.1,step=10 ):
    """Lump measurement tracking data into overpass distance bins.
        meas = rec 35:19 data
        events = from find_overpasses() of find_tree()
          If this is overpass data event.RANGE is in meters, if it is a tree
          we use time so event.RANGE is seconds before/after a tree. By
          default we use the start/stop range of +/- 200m and assume we are
          getting overpass data
        el_mask = elevation mask [degrees]
    Returns: data for plot_meas_bins()
    """
    x = r_[start:stop:step]
    all_count = defaultdict( lambda: zeros(len(x)-1) )
    pll_count = defaultdict( lambda: zeros(len(x)-1) )
    lock_count = defaultdict( lambda: zeros(len(x)-1) )
    lock_no_slips_count = defaultdict( lambda: zeros(len(x)-1) )
    dLOCK_POINT = (meas.kf.dMEAS_LOCK_POINT|meas.kf.dMEAS_PHASE_VALID)
    dSLIP_FLAG = (meas.kf.dMEAS_SLIP)

    # Find start/end indices for each event.  This is JIT-accelerated.
    i0,i1 = get_time_range( array(meas.TIME),
                            r_[[ev.TIME[0] for ev in events]],
                            r_[[ev.TIME[-1] for ev in events]] )
    i0 = array(i0,dtype=int)
    i1 = array(i1,dtype=int)

    for n,event in enumerate(events):
        # 'meas' is often huge, so narrowing the working set dramatically
        # improves performance of the rest of the loop.
        if i0[n]==-1 or i1[n]==-1:
            continue
        m0 = meas[ i0[n]:i1[n] ]
        m0 = m0[ m0.EL>=el_mask ]
        if len(m0) < 2:
            continue

        # Convert 10m range bins to time bins
        # Use msecs so we don't run into time tag rounding errors.
        range_t_ms = []
        for i in range(len(x)-1):
            idx = find((event.RANGE>=x[i]) & (event.RANGE<x[i+1]) )
            if len(idx) == 0:
                range_t_ms.append( nan )
            else:
                range_t_ms.append( int(round(event.TIME[idx[0]]*1000)) )
        if len(idx) > 1:
            range_t_ms.append( int(round(event.TIME[idx[-1]]*1000)) )
        else:
            range_t_ms.append( nan )

        # Compute receiver time tags for current event assuming no interruptions.
        dt = unique(diff(around(m0.TIME*1000)))[1]/1000.
        synthetic_t_ms = around(r_[min(m0.TIME):max(m0.TIME)+.001:dt]*1000)

        # For each signal type, accumulate PLL and lock point known statistics
        sigs = get_signals(m0,m0.k)
        for sat_type,freq_track in sigs.items():
            if sat_type == RTConst.RT_SatType_SBAS:
                continue

            for freq,track in freq_track:
                m00 = m0[ (m0.SAT_TYPE==sat_type)
                          &(m0.FREQ==freq)
                          &(m0.TRACK==track) ]
                if len(m00) == 0:
                    continue

                idx = (sat_type,freq,track)
                for i in range(len(range_t_ms)-1):
                    m00_ms = around(m00.TIME*1000).astype(int)
                    m000 = m00[ (m00_ms>=range_t_ms[i])
                                &(m00_ms<range_t_ms[i+1]) ]
                    if len(m000) == 0:
                        continue

                    # Compute max # of measurements for current range_t[]
                    # assuming no interruptions
                    synthetic_len = len(
                        synthetic_t_ms[(synthetic_t_ms>=range_t_ms[i])
                                       &(synthetic_t_ms<range_t_ms[i+1])])
                    scale = 1./synthetic_len

                    all_count[idx][i] += len(m000)*scale
                    cstate = m000.CSTATE.astype(int)
                    if int(m000.MEAS[0])&meas.f.dMEAS_MASTER_SUB:
                        pll_count[idx][i] += len(find(cstate==16))*scale
                    else:
                        # for new code, the cstate should be 11.
                        # for old code it will be 9 (but L2E can sometimes be 7...)
                        pll_count[idx][i] += len(find((cstate==11)
                                                      |(cstate==9)
                                                      |(cstate==7)))*scale
                    mlock = m000.MEAS.astype(int)&dLOCK_POINT
                    slip = m000.MEAS.astype(int)&dSLIP_FLAG
                    lock_count[idx][i] += len(find(mlock==dLOCK_POINT))*scale
                    lock_no_slips_count[idx][i] += len(find( (mlock==dLOCK_POINT) & (slip==0)))*scale

    return {'x':x,
            'el_mask':el_mask,
            'all':all_count,
            'pll':pll_count,
            'lock':lock_count,
            'lock_no_slip':lock_no_slips_count}


def plot_meas_bins( out_path, num_events, results, 
                    all_sat_types=None,
                    xlabel='distance to overpass[m]'):
    """Plot measurement epochs near overpasses.
        out_path = directory to save PNGs in
        events = from find_overpasses() - len(events)
        results = from bin_measurements()
        all_sat_types = optional.  List of RT sat types to include.
    """
    out_path= os.path.abspath(out_path)
    el_mask = results['el_mask']
    x = copy(results['x'][1:])
    dx = x[1]-x[0]
    x -= dx/2 # center bins for plot
    all_types = array(list(results['pll'].keys())) # sat_type, freq, track
    if all_sat_types is None:
        all_sat_types = unique(all_types[:,0])
    for scale_type in ['%','#']:
        for sat_type in all_sat_types:
            pll_metric = []
            sat_str = get_sat_type(sat_type).sysstr
            fig,ax=subplots(3,1,sharex=True,sharey=True,figsize=(8,6))
            for freq,track in all_types[all_types[:,0]==sat_type,1:]:
                label = get_sub_type(sat_type,freq,track).sigstr.split()[1]
                idx = (sat_type,freq,track)
                all_count = results['all'][idx]
                pll_count = results['pll'][idx]
                lock_count = results['lock'][idx]
                if scale_type=='%':
                    y_max = max(all_count)/100.
                else:
                    y_max = 1
                if y_max == 0.:
                    continue
                ax[0].plot( x, all_count/y_max, '-1', label=label)
                ax[1].plot( x, pll_count/y_max, '-1', label=label)
                ax[2].plot( x, lock_count/y_max, '-1', label=label)

                y_max = max(all_count)/100.
                i = find( (x>0) & (lock_count/y_max>=50.) )
                if len(i)>0:
                    pll_metric.append( '%s=%.0fm'%(label,x[i[0]]))
                else:
                    pll_metric.append( '%s?'%label)

            title_str = '%d overpasses: %s tracking elev mask %d°\n'%(
                num_events,
                sat_str,
                el_mask)
            if scale_type=='%':
                title_str += 'back to 50% lock:'
                for sig_info in pll_metric:
                    title_str += ' '+sig_info
            ax[0].set_title(title_str)
            ax[0].set_ylabel('{} all meas(scaled)'.format(scale_type))
            ax[1].set_ylabel('{} PLL meas(scaled)'.format(scale_type))
            ax[2].set_ylabel('{} lock point(scaled)'.format(scale_type))
            ax[2].set_xlabel(xlabel)
            for a in ax:
                a.legend(loc='center left',bbox_to_anchor=(1,.5))
                a.grid()
            tight_layout()
            label=sat_str
            if scale_type=='#':
                label+='_num'
            else:
                label+='_percent'
            filename = out_path + '/meas_overpasses_{}.png'.format(label)
            save_compressed_png(filename,dpi=150)
            close()

def do_overpass_plots( meas, out_path, data_dir, main_pos, d, pp ):
    """Inputs: meas = rec35:19 data (or None)
    out_path = what directory to put plots in
    data_dir = where is T04 data directory?
    main_pos = True: data from rec 35:2.  False: data from rec35:16
    d = position data from receiver (from vd2cls)
    pp = pospac truth data
    """
    if main_pos:
        f_prefix = 'Main'
    else:
        f_prefix = 'Stinger'

    # The pospac position should only contain overpass timestamps, so
    # this should be fast.
    overpass_locs, events = find_overpasses( pp, False )
    rcvr_err = get_rcvr_err( events, d )

    if main_pos and meas is not None:
        print("Analyzing measurements near overpasses")
        results = bin_measurements( meas, events, 10 )
        plot_meas_bins( out_path+'/tracking', len(events), results )

    for err_type in ['3D','2D','Hgt']:
        do_one_overpass_plot( f_prefix, out_path, d, err_type,
                              overpass_locs, events, rcvr_err )

    filename = out_path+'/%s_overpass_time_series.txt'%f_prefix
    with open(filename,'w') as f_out:
        gen_overpass_text_time_series( f_out, d, pp, events )


def do_combined_overpass_plot( out_path, titleStr, d_primary, pp, err_type='2D' ):
    """Inputs: out_path = what directory to put plots in
    main_pos = True: data from rec 35:2.  False: data from rec35:16
    d_primary = position data from receiver 35:2 (from vd2cls)
    d_secondary = position data from receiver 35:19 (from vd2cls)
    pp = pospac truth data
    """
    _,i_pp,i_d = intersect( pp[:,dPP_TIME], d_primary.TIME )
    pp = pp[i_pp,:]
    d_primary = d_primary[i_d]

    overpass_locs, events = find_overpasses( pp, False )
    primary_rcvr_err = get_rcvr_err( events, d_primary )
    #secondary_rcvr_err = get_rcvr_err( events, d_secondary)
    
    fig = figure()
    index = 0
    for loc,event,(err,i_ev,i_d) in zip(overpass_locs,events, primary_rcvr_err):
        curr_err = err[:,err.k['err' + err_type]]
        curr_d = d_primary[i_d]
        iRTKFloat = find(curr_d.FIXTYPE == 3)
        iRTKFixed = find(curr_d.FIXTYPE == 4)
        iCode = find(curr_d.FIXTYPE > 4)
        dist = event[i_ev].RANGE
        dist[event[i_ev].towards!=0] *= -1
        if(index == 0):
          plot( dist[iCode], curr_err[iCode], 'b.', label='Code')
          plot( dist[iRTKFloat], curr_err[iRTKFloat], 'g.', label='Float')
          plot( dist[iRTKFixed], curr_err[iRTKFixed], 'r.', label='Fixed')
        else:
          plot( dist[iCode], curr_err[iCode], 'b.')
          plot( dist[iRTKFloat], curr_err[iRTKFloat], 'g.')
          plot( dist[iRTKFixed], curr_err[iRTKFixed], 'r.')
        index += 1
    
    grid(True)
    legend()
    xlabel('along-track range[m]')
    ylabel('per-run %s err[m]' % err_type)
    title('overpass %s errors | %d overpasses | %s' % (err_type,index,titleStr) )
    tight_layout()
    save_compressed_png(out_path + '/' + err_type + '-overpasses-combined.png',dpi=300)
    close()

    fig = figure()
    index = 0
    for loc,event,(err,i_ev,i_d) in zip(overpass_locs,events, primary_rcvr_err):
        curr_err = err[:,err.k['err' + err_type]]
        curr_d = d_primary[i_d]
        if err_type=='3D':
          sigma = sqrt(curr_d.SigE**2 + curr_d.SigN**2 + curr_d.SigU**2)
        elif err_type=='2D':
          sigma = sqrt(curr_d.SigE**2 + curr_d.SigN**2)
        elif err_type=='Hgt':
          sigma = curr_d.SigU

        iRTKFloat = find( (curr_d.FIXTYPE == 3) & (curr_err > (sigma*3)))
        iRTKFixed = find( (curr_d.FIXTYPE == 4) & (curr_err > (sigma*3)))
        iCode = find( (curr_d.FIXTYPE > 4) & (curr_err > (sigma*3)))

        for i in range(len(iCode)):
          print(curr_d.TIME[iCode[i]],curr_d.FIXTYPE[iCode[i]], sigma[iCode[i]], curr_err[iCode[i]])
        dist = event[i_ev].RANGE
        dist[event[i_ev].towards!=0] *= -1
        if(index == 0):
          plot( dist[iCode], curr_err[iCode], 'b.', label='Code')
          plot( dist[iRTKFloat], curr_err[iRTKFloat], 'g.', label='Float')
          plot( dist[iRTKFixed], curr_err[iRTKFixed], 'r.', label='Fixed')
        else:
          plot( dist[iCode], curr_err[iCode], 'b.')
          plot( dist[iRTKFloat], curr_err[iRTKFloat], 'g.')
          plot( dist[iRTKFixed], curr_err[iRTKFixed], 'r.')
        index += 1
    
    grid(True)
    legend()
    xlabel('along-track range[m]')
    ylabel('per-run %s err[m]' % err_type)
    title('overpass %s error > 3 sigma | %d overpasses | %s' % (err_type,index,titleStr) )
    tight_layout()
    save_compressed_png(out_path + '/' + err_type + '-overpasses_outliers.png',dpi=300)
    close()



def do_time_plots( out_plot_path, primary_pos, d, dU, err_2d, dVU, verr_2d ):
    """Inputs: out_plot_path = what directory to put plots
    primary_pos = True: data from rec 35:2.  False: data from rec35:16
    d = rec 35:2/16 position data & index info
    dU = up error from truth [m]
    err_2d = horizontal error from truth [m]
    Generate some position time-series plots
    """
    if primary_pos:
        f_prefix = 'Main'
    else:
        f_prefix = 'Stinger'
    print(" do_plots " + out_plot_path + " " + f_prefix)
    unique_fixtypes = unique( d.FIXTYPE )

    start_week = d.WEEK[0]
    t = d.TIME + 60*60*24*7*(d.WEEK-start_week)

    for err, verr, err_name in [ (dU,dVU,'Height'), (err_2d,verr_2d,'2D') ]:
        p_fig,p_ax=subplots(1,1)
        v_fig,v_ax=subplots(1,1)
        for fixtype in unique_fixtypes:
            fixtype = int(fixtype)
            i = find( d.FIXTYPE == fixtype )
            cx,cy = docdf( abs(err[i]) )
            err95 = get_cdf_percents( cx,cy, [95])[0]
            if(len(i) < 100):
                # IF we don't have many points the 95% have very little
                # confidence
                label = PosTypeStrConst[fixtype] + " (points= " + str(len(i)) + ")"
                v_label = label
            else:
                label = PosTypeStrConst[fixtype] + " 95% = " + "{:.3f}".format(err95) + "m (points= " + str(len(i)) + ")"
                cx,cy = docdf( abs(verr[i]) )
                v_err95 = get_cdf_percents( cx,cy, [95])[0]
                v_label = PosTypeStrConst[fixtype] + " 95% = " + "{:.3f}".format(v_err95) + "m/s (points= " + str(len(i)) + ")"
            p_ax.scatter( t[i], err[i], s=5, edgecolor='none', label=label )
            v_ax.scatter( t[i], verr[i], s=5, edgecolor='none', label=v_label )
        for ax in [p_ax,v_ax]:
            ax.set_xlabel('Time [GPS Seconds]')
            ax.grid(True)
            ax.legend(loc='best',markerscale=4)
        p_ax.set_ylabel('%s Pos Error [m]'%err_name)
        p_ax.set_title(out_plot_path.split('/')[-1] + ' %s Pos Error'%err_name)
        v_ax.set_ylabel('%s Vel Error [m/s]'%err_name)
        v_ax.set_title(out_plot_path.split('/')[-1] + ' %s Vel Error'%err_name)
        p_fig.tight_layout()
        v_fig.tight_layout()
        p_fig.savefig(out_plot_path+'/%s%sErr.png'%(f_prefix,err_name),dpi=150)
        v_fig.savefig(out_plot_path+'/%s%sVelErr.png'%(f_prefix,err_name),dpi=150)
        close(p_fig)
        close(v_fig)


def do_ag_time_plots( out_plot_path, d, ag_est_err, double_sigma, tc_arr ):
    """Inputs: out_plot_path = what directory to put plots
    d = rec 35:2 position data & index info
    err_2d = horizontal error from truth [m]
    tc_arr = list of threshold eg [6,10,50,80, 2sigma]
    Generate some position time-series plots
    """

    f_prefix = 'Ag'
    print(" do_plots " + out_plot_path + " " + f_prefix)
    unique_fixtypes = unique( d.FIXTYPE )

    start_week = d.WEEK[0]
    t = d.TIME + 60*60*24*7*(d.WEEK-start_week)

    err_name = '2D'

    p_fig,p_ax=subplots(1,1)
    p_ax.plot( t, double_sigma, 'm-', lw=0.2, label='2 * Sigma', zorder=-1)
    for fixtype in unique_fixtypes:
        fixtype = int(fixtype)
        i = find( d.FIXTYPE == fixtype )
        label = PosTypeStrConst[fixtype] + " (points= " + str(len(i)) + ")"
        p_ax.scatter( t[i], ag_est_err[i], zorder=1, s=5, edgecolor='none', label='Ag Est Err,'+label)
    for tc in tc_arr:
        if tc>0:
            tc = tc*0.01
            p_ax.plot([t[0],t[-1]],[tc,tc],'r--')

    p_ax.set_xlabel('Time [GPS Seconds]')
    p_ax.grid(True)
    p_ax.legend(loc='best',markerscale=4)

    p_ax.set_ylim([0,0.9])
    p_ax.set_ylabel('%s Error [m]'%err_name)
    p_ax.set_title(out_plot_path.split('/')[-1] + ' %s Error'%err_name)
    p_fig.tight_layout()
    p_fig.savefig(out_plot_path+'/%s%sErr.png'%(f_prefix,err_name),dpi=150)
    close(p_fig)


def get_rounded_cdf_percents( cx, cy ):
    """Call get_cdf_percents(), but only return 3 digits of
    floating point precision because we don't need more"""
    vals = get_cdf_percents( cx, cy, cdf_vals )
    return [round(x,3) for x in vals]


def check_CNO( config, data_dir, out_plot_path,
                rec='-d35:19'):
    """Inputs: config = default_scenario() = info from scenario XML file
       data_dir = where are T04 receiver files?
       out_plot_path = output_dir/plots/x/ = directory where plots go
       rec = is this rec35:19
    Output: True/False, True/False = Successfully read T04 or not, if there is cno problem 
    Check BeiDou SNR get stuck at 65.1 dBHz issue by looking at d.CNO > 65 
    Create cno_problem.txt file at data_dir/plots/
    """

    fn = 'check_CNO()'
    cno_problem = False

    __debug_timer2__ = TD.init_timer("check_CNO")
    
    print("This is",fn,". Read", data_dir)

    # # Method 1: when reading rec35:19, concatenate() in mults.py is very slow
    # try:
    #     d = vd2cls(data_dir + '/*.T04', rec)
    #     i = find(d.CNO>65.0)
    #     if len(i) > 2:
    #         cno_problem = True
    #         break
    # except:
    #     print("Error in checking CNO by reading",rec,"from",data_dir,"Exit.")
    #     return False

    # Method 2: read T04 one by one to avoid using concatenate() in mults.py is very slow
    all_t04_files = glob.glob( data_dir + '/*.T0?' )
    print("This is",fn,". Read", data_dir, str(all_t04_files))
    if len(all_t04_files) == 0:
        print(fn,"No T04/t0x file")
        return False, False

    TD.lap_timer(__debug_timer2__)
    for t04 in all_t04_files:
        TD.lap_timer(__debug_timer2__)
        print(fn,'read',t04)
        try:
            TD.lap_timer(__debug_timer2__)
            d = vd2cls(t04, rec)
            TD.lap_timer(__debug_timer2__)
            i = find(d.CNO>65.0)
            if len(i) > 2:
                cno_problem = True
                break
        except:
            print(fn,"Error in checking CNO by reading",rec,"from",data_dir,"Exit.")
            return False, False
    TD.lap_timer(__debug_timer2__)
    if cno_problem:
        print("There is data CNO > 65 dBHz. Create file", out_plot_path+'/cno_problem.txt')
        f = open(out_plot_path+'/cno_problem.txt','w')
        f.close()
        return True, True
    else:
        print("There is no data CNO > 65 dBHz. Do not create file", out_plot_path+'/cno_problem.txt')
        return True, False


def calc_stats( config, pp, data_dir, out_nopi_path, out_plot_path,
                rec='-d35:2', partial=False, ag=False ):
    """Inputs: config = default_scenario() = info from scenario XML file
       pp = POSPac truth file (or None if no POSPac data)
       data_dir = where are T04 receiver files?
       out_nopi_path = directory to put NoPI output in?
       out_plot_path = directory to put position plots in?
       rec = is this rec35:2 or rec35:16 position data?
       partial = True/False. Do parital plot or not
       ag = True/False. If True do unique sort to detect duplicated epoch
    Output: (stats, ag_stats), where stats = default_stats() = get position and NoPi stats
    Compute info on position stats (and optionally run NoPi for those stats)
    Also create some plots in data_dir/plots/
    """
    t1 = time.time()
    stats = default_stats()
    ag_stats = default_stats()

    main_pos = (rec == '-d35:2')
    print('calc_stats {} start..'.format(main_pos))
    if main_pos and config.NoPI_base is not None:
        try:
            do_NoPI( config, data_dir, out_nopi_path, stats )
        except:
            print("NoPi error for %s !! " % data_dir)

    try:
        d = vd2cls(data_dir + '/*.T04', rec)
        # partial run ignores first and last 100 sec data for better analysis
        if partial:
            print("This is partial run. Remove first and last 100 sec data.")
            t = d.TIME
            ts = min(t)+100
            te = max(t)-100
            index = np.where( (t>ts)&(t<te) )
            d = d[index]
    except:
        if main_pos:
            # We should always have rec 35:2, but in some very rare cases
            # the receiver won't position at all.
            print("No position data for %s !! " % data_dir)
            stats.seg['All'] = None
            return stats, None
        else:
            return None, None
    # ref_pos format = [lat,lon,hgt,vE,vN,vU] (lined up with d[])
    if config.truth_format == "POSPAC_ASCII":
        if not ag:
            _,i_pp,i_d = intersect( pp[:,dPP_TIME], d.TIME )
        else:
            # set unique t in epoch for t04 from NAV900, 1 out of 3 run has duplicated epoch
            print("This is ag run. Remove duplicated epoch data.")
            t = d.TIME
            t.sort()
            ans = np.ediff1d(t)
            if 0 in ans:
                print("Duplicated time in position data for %s . make_quick_unique=True " % data_dir)
                _,i_pp,i_d = intersect( pp[:,dPP_TIME], d.TIME, make_quick_unique=True )
            else:
                _,i_pp,i_d = intersect( pp[:,dPP_TIME], d.TIME )
        if len(i_pp) != len(i_d):
            print("Error in position data for %s !! " % data_dir)
            stats.seg['All'] = None
            return stats, None
        ref_pos = pp[i_pp,dPP_LAT:dPP_velU+1]
        d = d[i_d,:]
    elif config.truth_format == "MULTIPLE_STATIC":
        ref_pos = zeros( (len(d), 6) )
        for desc,start_stops in config.span:
            if desc == "All":
                continue
            for start,stop in start_stops:
                i = find( (d.TIME>=start) & (d.TIME<=stop) )
                ref_pos[i,:3] = config.all_truth[desc]
    elif config.truth_format == "STATIC":
        ref_pos = zeros( (len(d), 6) )
        ref_pos[:,:3] = config.truth
    else:
        raise RuntimeError("Invalid truth format",config.truth_format)

    # Load meas if we have overpasses
    meas = None
    if main_pos and config.overpass_auto_type is not None:
        try:
            meas = vd2cls(data_dir + '/*.T04', '-d35:19')
        except:
            meas = None

    # combine timespans:
    #   spans[timespan_desc] = boolean array for each gps_second.  True
    #                          if in range.
    spans = {}
    for desc,start_stops in config.span:
        if not desc in spans:
            spans[desc] = zeros(len(d),dtype=bool)
        for start,stop in start_stops:
            i_d = find( (d.TIME>=start) & (d.TIME<=stop) )
            spans[desc][i_d] = True

    # process timespans
    for desc,is_in_span in spans.items():
        print("calc %s" % desc)
        i_d = find( is_in_span ) # convert from boolean array to indexes
        print(" len %d" % len(i_d))
        if len(i_d) == 0:
            continue
        stats.seg[desc] = {}
        ag_stats.seg[desc] = {}
        for fixtype in concatenate( ([-1], unique(d.FIXTYPE[i_d])) ):
            if fixtype == -1:
                i = slice(None)
                fixtype = "All"
                stats.seg[desc]['All'] = {}
                out_seg = stats.seg[desc]['All']
                ag_stats.seg[desc]['All'] = {}
                ag_out_seg = ag_stats.seg[desc]['All']
            else:
                i = find( d.FIXTYPE[i_d] == fixtype )
                fixtype = PosTypeStrConst[int(fixtype)]
                stats.seg[desc][fixtype] = {}
                out_seg = stats.seg[desc][fixtype]
                ag_stats.seg[desc][fixtype] = {}
                ag_out_seg = ag_stats.seg[desc][fixtype]
            (dE, dN, dU) = llh2enu( d[i_d[i],d.k.LAT:d.k.LAT+3],
                                    ref_pos[i_d[i],:3] )
            err_3d = sqrt( dE**2 + dN**2 + dU**2 )
            err_2d = sqrt( dE**2 + dN**2 )
            err_1d = dU
            cx3,cy3 = docdf( err_3d )
            cx2,cy2 = docdf( err_2d )
            cx1,cy1 = docdf( err_1d )
            out_seg['1d'] = get_rounded_cdf_percents( cx1, cy1 )
            out_seg['2d'] = get_rounded_cdf_percents( cx2, cy2 )
            out_seg['3d'] = get_rounded_cdf_percents( cx3, cy3 )
            out_seg['cdf_vals'] = cdf_vals
            out_seg['len'] = len(dE)
            dVE = d.VLON[i_d[i]] - ref_pos[i_d[i],3]
            dVN = d.VLAT[i_d[i]] - ref_pos[i_d[i],4]
            dVU = d.VHGT[i_d[i]] - ref_pos[i_d[i],5]
            verr_2d= sqrt(dVE**2 + dVN**2)
            vcx2,vcy2 = docdf( verr_2d )
            verr_3d= sqrt(dVE**2 + dVN**2 + dVU**2)
            vcx3,vcy3 = docdf( verr_3d )
            out_seg['vel2d'] = get_rounded_cdf_percents( vcx2, vcy2 )
            out_seg['vel3d'] = get_rounded_cdf_percents( vcx3, vcy3 )

            do_cdf_plots( out_plot_path,
                          desc,
                          fixtype,
                          main_pos,
                          (cx2, cy2), (cx3, cy3),
                          (vcx2, vcy2), (vcx3, vcy3) )
            if fixtype=="All":
                if desc=="All":
                    do_time_plots( out_plot_path, main_pos, d[i_d],
                                   dU, err_2d,
                                   dVU, verr_2d )
                if desc=="Overpasses":
                    do_overpass_plots( meas,
                                       out_plot_path,
                                       data_dir,
                                       main_pos,
                                       d[i_d[i]],
                                       pp[i_pp[i_d[i]],:] )

            # Ag Results
            if main_pos and ag:
                print("calc %s for ag results" % desc)
                arr = []
                ag_est_err = 2 * d[i_d[i],d.k.ErrSemiMaj ]
                double_sigma = 2 * err_2d
                tc_arr = [6,10,50,80,-1]
                for tc in tc_arr:
                    if tc == -1:
                        msk = np.where(ag_est_err[:]>double_sigma[:])
                    else:
                        msk = np.where(ag_est_err[:] > tc*0.01)
                    arr.append( len(ag_est_err[msk]) )
                ag_out_seg['2d_above_ctr'] = arr
                ag_out_seg['threshold'] = tc_arr
                ag_out_seg['len'] = len(dE)
                if fixtype=="All" and desc=="All":
                    print("calc %s for ag results and plot" % desc)
                    do_ag_time_plots( out_plot_path, d[i_d[i]],
                                    ag_est_err, double_sigma, tc_arr )


    print('calc_stats {} time {:.2f}'.format(main_pos,time.time()-t1))
    return stats, ag_stats

def parse_sampleConfig( config_filename ):
    """Inputs: config_filename = name of scenario XML file
    Output: default_scenario() = info on scenario XML file
    Get important info from config_filename
    """
    tree = ET.ElementTree(file=config_filename)
    d = default_scenario()
    d.config_filename = config_filename
    d.desc = "%s: %s" % (config_filename, tree.find('./desc').text)
    d.type = tree.find('./type').text
    truth_format = tree.find('./truth/format')
    if truth_format is not None:
        d.truth_format = truth_format.text
    else:
        d.truth_format = None
    rm_iono = tree.find('./rm_iono')
    if rm_iono is not None:
        d.rm_iono = True
    nopi_base = tree.find('./NoPI/base')
    nopi_ini = tree.find('./NoPI/ini')
    if nopi_base is not None and nopi_ini is not None:
        d.NoPI_base = nopi_base.text
        d.NoPI_ini = nopi_ini.text
    if d.truth_format is None:
        pass
    elif d.truth_format == "POSPAC_ASCII":
        d.truth_file = tree.find('./truth/file').text
    elif d.truth_format == "MULTIPLE_STATIC":
        d.all_truth = {}
        for x in tree.findall('./truth/pos'):
            lat = float(x.find('lat').text)
            lon = float(x.find('long').text)
            hgt = float(x.find('hgt').text)
            d.all_truth[ x.find('name').text ] = [lat,lon,hgt]
    else:
        d.truth = [float(tree.find('./truth/lat').text),
                   float(tree.find('./truth/long').text),
                   float(tree.find('./truth/hgt').text)]

    d.span = []
    all_timespan = []
    all_timespan_type = None
    for x in tree.findall('./timespan'):
        ts_type = x.find('type').text
        if x.find('auto_start_stop') is not None:
            all_timespan_type = ts_type
        elif x.find('auto_overpasses') is not None:
            d.overpass_auto_type = ts_type
        elif x.find('auto_trees') is not None:
            d.tree_auto_type = ts_type
        else:
            ts_start = int(x.find('start').text)
            ts_stop = int(x.find('stop').text)
            d.span.append( (ts_type, [(ts_start, ts_stop)]) )
            all_timespan.append( (ts_start, ts_stop) )

    if all_timespan_type is not None:
        d.span.append( (all_timespan_type, all_timespan) )

    return d


def update_test_list( config, all_seg, tests ):
    """Inputs: config = default_scenario() = info from scenario XML file
       all_seg = list of all time span segments for this test
    Output: tests
    Keep a list of all tests ever run
    """
    if config.desc not in tests:
        tests[config.desc] = {}
        tests[config.desc]["segment"] = OrderedDict()
        for desc,start_stops in config.span:
            if desc not in tests[config.desc]["segment"]:
                tests[config.desc]["segment"][desc] = 1
        tests[config.desc]["fixtype"] = OrderedDict()
    if all_seg is not None:
        for fixtype in all_seg.keys():
            tests[config.desc]["fixtype"][fixtype] = 1

def get_warning_log( data_dir ):
    """Inputs: data_dir = where are T04 files?
    Return a dictionary with information on errors and warnings.
    For example:
     { 'total':1, 'errors':[], 'warnings':[['4','0x80000098']] }
    shows one 0x80000098 warning from task ID 4.
    """

    d = {'total':0, 'errors':[], 'warnings':[]}

    # Only need to look at last file to get a summary
    try:
        filename = sorted(glob.glob(data_dir + '/*.T04'))[-1]
    except:
        d['no_files'] = True
        return d

    cmd = 'viewdat -d35:7 ' + filename
    pipe = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
    err_info = None
    while True:
        line = pipe.stdout.readline().decode()
        if line == '': break
        line = line.rstrip()
        if line.find('Total # of error entries') > 0:
            d['total'] = int(line.split()[-1])
        elif line.find('Warning 0x') > 0:
            err_info = line
        elif line.lower().find('error 0x') > 0:
            err_info = line
        elif line.find(' Task') > 0:
            if err_info.find('Warning 0x') > 0:
                key = 'warnings'
            else:
                key = 'errors'
            d[key].append( [line.split()[-1], err_info.split()[-1]] )
    return d

def run_t012kml(in_file,out_file,sub16=False):
    """Run t012kml in the background
    in_file = xyz.T04
    out_file = xyz.kmz
    sub16: False=do sub2, else sub16 position records
    """
    if sub16:
        cmd = "t012kml --ignore_err --sub16 %s %s" % (in_file,out_file)
    else:
        cmd = "t012kml --ignore_err %s %s" % (in_file,out_file)
    with open(out_file+".log","wb") as f_log:
        print("Running: t012kml for "+out_file)
        p = subprocess.Popen(shlex.split(cmd),
                             stderr=subprocess.STDOUT,
                             stdout=f_log)
    return [p]

def fill_in_auto_config_spans( config, pp ):
    """\
    config = from ProcessResults.py : parse_sampleConfig()
    pp = POSPac truth data
    This fills in config.span with auto-detected elements like
    overpasses and "tree" events.
    """
    if config.overpass_auto_type is not None:
        print("Finding overpasses")
        _,start_stop = find_overpasses( pp, True )
        config.span.append( (config.overpass_auto_type, start_stop) )
    if config.tree_auto_type is not None:
        print("Finding trees")
        _,start_stop = find_tree( None, pp, True )
        config.span.append( (config.tree_auto_type, start_stop) )


def analyze_single_rx( inFileName, tree,
                       out_nopi_path, out_plot_path ):
    """Inputs: inFileName = ResultsQueue/x.xml = filename to process
               tree = ET.ElementTree(infileName)
               out_nopi_path = output_dir/x/ = directory where NoPi data goes
               out_plot_path = output_dir/plots/x/ = directory where plots go
    If the scenario config file say so, run NoPi on data.
    Compare position residuals to truth.
    Returns info on RX/config file and data directory, e.g.:
      (DataDir/latest-RX9-2018-10-02-tree100, DataDir/RX9-567)
    """
    config = parse_sampleConfig( tree.find('SampleConfig').text )
    data_dir = tree.find('Data').text

    info = namedtuple('Info',['link_name','t0x_dir_name','output'])
    base_xml = os.path.splitext(os.path.basename(config.config_filename))[0]
    rx_name = re.match('RX([0-9]+)',os.path.basename(data_dir))[0]
    info.link_name = 'DataDir/latest-%s-%s' % (rx_name,base_xml)
    info.t0x_dir_name = data_dir
    info.output = None

    do_RTK = tree.find('do_RTK') is not None
    do_RTX = tree.find('do_RTX') is not None
    partialrun = tree.find('partialRun') is not None # True if tree.find('partialRun') != None and tree.find('partialRun').text == '1' else False
    ag_run = tree.find('agRun') is not None # True if tree.find('agRun') != None and tree.find('agRun').text == '1' else False

    if tree.find('skipPost') is not None or config.truth_format is None:
        print("Skip processing %s" % inFileName)
        return info

    # Process any new data
    print("Processing %s" % inFileName)
    __debug_timer__ = TD.init_timer("analyze_single_rx")
    print(__debug_timer__)
    TD.lap_timer(__debug_timer__)

    if not os.path.isdir(data_dir):
        raise RuntimeError("Missing directory: %s"%data_dir)

    shutil.copy( tree.find('SampleConfig').text, data_dir )
    shutil.copy( "config.xml", data_dir )

    tmp_all_T04 = data_dir + '/.rcvr.T04'
    with open(tmp_all_T04, "wb" ) as f_rcvr:
        sorted_T04s = sorted(glob.glob( data_dir + '/*.T0?' ))
        if len(sorted_T04s) == 0:
            print("WARNING - no T04 files found! Skip processing %s" % inFileName)
            return info
        for fin_name in sorted_T04s:
            shutil.copyfileobj( open(fin_name,'rb'), f_rcvr )
    TD.lap_timer(__debug_timer__)

    el = tree.find('StartTime')
    if el.text != "" and el.text != None:
        start_date = el.text
    else:
        start_date = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(os.path.getmtime(inFileName)))

    if os.path.isdir( out_plot_path ):
        shutil.rmtree( out_plot_path )
    os.makedirs( out_plot_path )
    plist = []
    plist += run_t012kml(tmp_all_T04,out_plot_path+'/sub2.kmz')
    if do_RTK:
        plist += run_t012kml(tmp_all_T04,out_plot_path+'/sub16.kmz',sub16=True)
    TD.lap_timer(__debug_timer__)

    pp = None
    snpc_proc = None
    if config.truth_format in ["POSPAC_ASCII","STATIC"]:
        # create cycle slip/SNPC analysis plots
        if config.truth_format == "POSPAC_ASCII":
            pp = doload(config.truth_file)
            fill_in_auto_config_spans( config, pp )
        out_slip_path = out_plot_path+'/cycle_slips'
        out_resid_path = out_plot_path+'/SNPC_resid'
        tmp_all_T04_full = os.getcwd() + '/' + tmp_all_T04
        snpc_proc = run_SNPC_slip_check(config.truth, config.truth_file,
                                        tmp_all_T04_full,
                                        out_slip_path,
                                        rm_iono=config.rm_iono,
                                        do_resid=True,
                                        in_bg=True)

    dgnss_stats = None
    ag_stats = None
    check_cno = True
    find_cno_issue = False
    TD.lap_timer(__debug_timer__)
    with ProcessPoolExecutor() as e:
        TD.lap_timer(__debug_timer__)
        p1 = e.submit(calc_stats,
                      config, pp, data_dir, out_nopi_path, out_plot_path, partial=partialrun, ag=ag_run )
        TD.lap_timer(__debug_timer__)
        if do_RTK:
            p2 = e.submit( calc_stats,
                           config, pp, data_dir, out_nopi_path, out_plot_path, rec='-d35:16', partial=partialrun, ag=ag_run )
            dgnss_stats, _ = p2.result()
        TD.lap_timer(__debug_timer__)
        if check_cno:
            p3 = e.submit( check_CNO,
                           config, data_dir, out_plot_path, rec='-d35:19' )
            readt04, find_cno_issue = p3.result()
        TD.lap_timer(__debug_timer__)
        stats, ag_stats = p1.result()
        TD.lap_timer(__debug_timer__)

    output = {}
    if os.path.isfile(data_dir+"/custom_fw.txt"):
        output["custom_fw"] = 1
    else:
        output["custom_fw"] = 0
    output["partial_run"] = 0 if tree.find('partialRun') == None else tree.find('partialRun').text
    output["date"] = start_date
    output["file_base"] = os.path.basename(data_dir)
    output["config"] = config
    output["unit_desc"] = tree.find('desc').text
    output["do_RTK"] = do_RTK
    output["do_RTX"] = do_RTX
    if config.NoPI_base is not None:
        output["NoPI"] = out_nopi_path + '/NoPiDI_Top.html'

    output["stats"] = stats
    output["dgnss_stats"] = dgnss_stats
    if ag_run:
        output["ag_stats"] = ag_stats
    output["err_log"] = get_warning_log( data_dir )
    if len(output["err_log"]["errors"]) > 0:
        subject = "Spirent RF playback got an error: %s" % output["file_base"]
        body = "Error:\n %s" % output["err_log"]
        send_email( subject, body )

    if find_cno_issue:
        subject = "Spirent RF playback found issue with CNO > 65 in T04"
        body = "Error:\n" + "Run folder: " + data_dir
        send_email( subject, body )
    TD.lap_timer(__debug_timer__)

    if snpc_proc is not None:
        print("Waiting for SNPC..")
        snpc_proc.wait()
        if snpc_proc.returncode != 0:
            raise RuntimeError("SNPC error")
        with ProcessPoolExecutor() as e:
            e.submit( analyze_slip_data,
                      config.span, out_slip_path )
            e.submit( analyze_range_resids,
                      config.span, out_slip_path, out_resid_path )
    TD.lap_timer(__debug_timer__)

    info.output = output
    for p in plist:
        p.wait()
    os.remove( tmp_all_T04 )
    TD.lap_timer(__debug_timer__)
    TD.close_timer(__debug_timer__)
    return info


def analyze_all_rx():
    """Process all ResultsQueue/*.xml files with missing outputs.
    """
    print("Reading ResultsQueue/ RX data..")
    all_result_files = [x.split('/')[-1][:-4] for x in glob.glob('ResultsQueue/RX*.xml')]
    all_json_files = [x.split('/')[-1][:-5] for x in glob.glob('OutputResults/RX*.json')]

    print("Checking for deleted data..")
    all_rx_and_run = []
    for rx_txt in all_result_files:
        txt = rx_txt.split('-')
        all_rx_and_run.append((int(txt[0][2:]),int(txt[1])))
    with get_db(True) as db:
        c = db.cursor()
        remove_deleted_filesystem_runs( c, all_rx_and_run )

    all_rx_plus_run = set(all_result_files).difference(all_json_files)
    def f_sort(x):
        # Sort by run num then RX num
        rx,run = x.split('-')
        rx = int(rx[2:])
        run = int(run)
        return (run,rx)
    all_rx_plus_run = sorted(all_rx_plus_run, key=f_sort)
    print("Processing %d new RX data.."%len(all_rx_plus_run))
    for rx_plus_run in all_rx_plus_run:
        out_nopi_path = output_dir + '/' + rx_plus_run
        out_plot_path = output_dir + '/plots/' + rx_plus_run
        outFileName = out_nopi_path + ".json"
        inFileName = 'ResultsQueue/%s.xml'%rx_plus_run
        tree = ET.ElementTree(file=inFileName)
        info = analyze_single_rx( inFileName, tree,
                                  out_nopi_path, out_plot_path )
        print("Saving results..")
        add_data_to_db( [info.output] )
        with open(outFileName, 'w') as f:
            # Do we want to get rid of this eventually to save space?
            json.dump( info.output, f )

def create_latest_symlinks():
    """Remove all DataDir/latest-RX*/ symlinks and replace them with the latest
    paths.  For example, if the latest 2017-11-14-SJ-van-test.xml run for
    RX4 is DataDir/RX4-542/, then create a symlink:
      DataDir/latest-RX4-2017-11-14-SJ-van-test -> DataDir/RX4-542
    """
    # Remove old symlinks
    # Add latest symlinks
    with get_db() as db:
        c = db.cursor()
        all_configs = read_configs(c)
        latest_runs = read_latest_runs_summary(c)
        for config_ID,(_,run_num,_) in latest_runs.items():
            for dir_name in glob.glob('DataDir/RX*-%d' % run_num):
                dir_name_full = dir_name
                dir_name = dir_name.split('/')[1] # get rid of DataDir/
                rx_num = int(re.match('RX([0-9]+)-',dir_name)[1])
                config_filename = all_configs[config_ID][0]
                link_name = 'DataDir/latest-RX%d-%s' % (
                    rx_num,
                    config_filename.strip('.xml'))
                #print(dir_name,link_name)
                if not os.path.exists( dir_name_full ):
                    continue
                if os.path.islink( link_name ):
                    os.remove( link_name )
                os.symlink(dir_name, link_name)

def any_changes():
    """Return True if ResultsQueue/ is newer than OutputResults/ """
    # getctime() should update when a file is added or removed
    in_file_t = os.path.getctime('ResultsQueue')
    out_file_t = os.path.getctime('OutputResults')
    return in_file_t >= out_file_t

def get_lock(wait_for_lock):
    import zc.lockfile
    """Prevent more than 1 person from running this script at a time.
    wait_for_lock - if True, wait forever until we have access.
    Return True if we got a lock.
    Return False if failed.  Should stop processing..
    """
    have_lock = False
    while True:
        # Without holding a reference to our lock somewhere it gets garbage
        # collected when the function exits.  Put a reference in _lock_ref so
        # we don't drop the lock until this script exits.
        try:
            get_lock._lock_ref = zc.lockfile.LockFile('.LockProcessResultsPython')
            have_lock = True
        except:
            print('Someone is already running this script')
        if have_lock or not wait_for_lock:
            break
        else:
            time.sleep(60)
    return have_lock

if __name__ == "__main__":
    import requests
    import argparse

    parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
                                     description=usage)
    parser.add_argument('-f','--force',
                        help='If a run is deleted, force update of web page',
                        action="store_true")
    parser.add_argument('-r','--reprocess',
                        help='Force reprocessing on a single dir(e.g., RX17-1234)')
    parser.add_argument('-t','--test_dir',
                        help="Don't update global storage.  Only output results to given directory.  Must be used with --reprocess option.")
    parser.add_argument('-w','--wait',
                        help='If ProcessResults is already running, wait for lock. (Normally we exit immediately)',
                        action="store_true")

    args = parser.parse_args()

    if not get_lock(args.wait):
        sys.exit()

    if args.test_dir:
        if not args.reprocess:
            print("--test_dir must be used with --reprocess")
            sys.exit()
        out_nopi_path = args.test_dir + '/' + args.reprocess
        out_plot_path = args.test_dir + '/plots/' + args.reprocess
        inFileName = 'ResultsQueue/%s.xml' % args.reprocess
        tree = ET.ElementTree(file=inFileName)
        info = analyze_single_rx( inFileName, tree,
                                  out_nopi_path, out_plot_path )
        print(info)
        sys.exit()

    if args.reprocess:
        target_file = '%s/%s.json'%(output_dir,args.reprocess)
        if os.path.isfile(target_file):
            os.remove(target_file)
        else:
            print("Warning: Can't find "+target_file)
    if args.force or args.reprocess or args.wait or any_changes():
        convert_old_format_to_new_if_needed()
        analyze_all_rx()
        create_latest_symlinks()
    else:
        print("No changes")
