import numpy as np
import mutils as m
from mutils.PosPacConst import *
import mutils.PosTypeConst as PosTypeConst
from ProcessResults import parse_sampleConfig
from overpasses import find_overpasses
import json
import sys
import datetime
#import pickle
import os
import math

import matplotlib.pyplot as plt
from matplotlib import use,rcParams
from statistics import median
import xml.etree.ElementTree as et
import glob, os
import ITRF as itrf
import pandas as pd

"""
Contains all functions used in generating analysis plots and truth adjustments
"""

def getDateFromDir(workingDir):
    """
    This function parses the directory name year, month, and date from the directory name 

    Inputs:
        workingDir: name of the directory that contains all the relevant files for a specific dataset (i.e. 2021-02-22-SantaNella-van-test)
    
    Outputs:
        yr, month, day: the year, month, and day of the dataset returned as ints

    """

    dateStr = workingDir.split('-') [:3] #Get the date of the 
    yr = int(dateStr[0])
    month = int(dateStr[1])
    day = int(dateStr[2])
    return yr,month,day

def getDirFromUser(path = '.'):
    """
    This function gets the directory from the user and returns it as a string.
    It lists the files/directories in the choice "main" directory and creates a dictionary to allow the user to easily view and select an option

    Inputs:
        path (optional): The path directory that contains the files/dirs you want to select from. Default is current directory.

    Outputs:
        currDir: user's choice of file or directory 

    """

    dirs = os.listdir(path)
    dirDict = {i:dirs[i] for i in range(len(dirs))}
    print(dirDict)
    dirChoice = int(input("Enter number of the directory to work with: "))
    currDir = dirDict[dirChoice]
    return currDir

def editXMLTruth(filepath,truthName):
    """
    Edits the xml file to point to the location of the desired truth file.
    Returns the previous truth name for keeping track of history/checking if it's working correctly

    Inputs:
        filepath: path to the XML file  
    
    Outputs:
        prevTruth: name of the previous truth file

    """

    xmlTree = et.parse(filepath)
    root = xmlTree.getroot()

    #Find the truth file source and replace it with multiple options
    for elem in root:
        if elem.tag == "truth":
            for sub_elem in elem:
                if sub_elem.tag == "file":
                    prevTruth = sub_elem.text
                    sub_elem.text = truthName 

    xmlTree.write(filepath)
    return prevTruth

def changeXMLFile(currDir):
    """
    Asks the user whether to use the legacy (on the server) truth file or a local truth file, and edits the XML file accordingly
    Inputs:
        currDir: Directory that contains the XML file
    
    Outputs:
        filepath: path to the truth file
        truthName: name of the truth file to be used for labeling analysis plots and files
    """
    
    #Generate the filepath to the truth files
    path = currDir + '/*.xml'
    files = glob.glob(path)
    filepath = files[0]
    xmlTruthFilenames = list()
    pathToTruth = currDir + '/Truth_Files/'

    #Ask the user for 
    option = input("Press Enter to use original (server) truth. Press any other key to see the options and select a truth file: ")
    if option == '':
        return filepath, "server_truth.txt" #Name the truth file "server_truth" if the user selects server legacy for the processing
    else:
        truthName = pathToTruth + getDirFromUser(pathToTruth)
        prevName = editXMLTruth(filepath,truthName)
        return filepath, truthName
    
def generateTruth(XMLConfig):
    """
    Uses mutils to generate the config file and loads the truth data into an array
    
    Inputs:
        XMLConfig: path to the xml config file
    
    Outputs:
        pp: array with data from the truth file
        RunConfig: RunConfig object from mutils
    """

    #Generate object with config information 
    RunConfig = parse_sampleConfig(XMLConfig)  

    #Load truth file
    pp=m.doload(RunConfig.truth_file)

    return pp,RunConfig

def formatMeas(filename,pp):

    """
    Uses mutils to get the formatted replay and truth data 

    Inputs:
        filepath: path to the T04 data
        pp: truth data array

    Outputs:
        ref_pos: truth data 
        meas: object containing replay data

    """
    #Load replay data
    meas = m.vd2cls(filename + '.T04','-d35:2') #This results in d,k rather than in a single "meas"

    # Match the truth and DUT data time tags, throw away any epochs that
    # don't match
    _,i_pp,i_d = m.intersect( pp[:,dPP_TIME], meas.TIME )
    if len(i_pp) != len(i_d):
        print("Position Error")

    #Define reference truth and collected data
    ref_pos = pp[i_pp,:]
    meas = meas[i_d,:]

    #Debugging time stamps
    # print(ref_pos[:,dPP_TIME])
    # print(meas.TIME)

    return ref_pos,meas

def plotHeightError(meas,ref_pos,err_2d,err_3d,filename,plotDirPath):

    """
    Gets the height error between and plots the time-history and histogram
    
    Inputs:
        meas: meas object of replay data
        ref_pos: truth data array
        err_2d: 2D error array
        err_3d: 3D error array
        filename: name of the dataset for the plot title
        plotDirPath: path to the directory to save the plots in

    Outputs:
        diff: height error array

    """

    #Use the 3D error < 20cm to plot the height error, since most of the 3D error will come from the height
    i3 = m.find(err_3d <= 0.2) 
    diff = meas[i3,meas.k.LAT+2] - ref_pos[i3,dPP_HGT] #Where 2D and 3D error are <=15cm

    #Plot the height error time history
    plt.figure()
    plt.subplot(1,2,1)
    plt.plot(meas.TIME[i3],diff)
    plt.ylabel('Height error [m]')
    plt.xticks(rotation=45, ha="right")
    plt.grid(True,zorder=0)


    #Plot the height error histogram
    plt.subplot(1,2,2)
    plt.hist(diff)
    plt.xlabel('Height error [m]')
    plt.grid(True,zorder=0)

    #SFormat and save the plots
    plt.suptitle(filename)
    plt.tight_layout()
    m.save_compressed_png(plotDirPath + '/Height Error.png', dpi=300)
    return diff

def errorBarChart(dE,dN,dU,filename,plotDirPath):
    """
        Computes the 2D and 3D error and plots the error < threshold amounts in a side-by side 2D and 3D error bar chart
        
        Inputs:
            dE: east error array
            dN: north error array
            dU: up error array
            filename: name of the dataset for the plot title
            plotDirPath: path to the directory to save the plots in

        Outputs:
            diff: height error array
            err_2d: 2D error array
            err_3d: 3D error array

        """
        
    #Plot an error bar with 2D and 3D error in side-by-side plots

    #Define 2D and 3D error based on dENU input
    err_2d = np.sqrt( dE**2 + dN**2 )
    err_3d = np.sqrt( dE**2 + dN**2 + dU**2)
    threshold = [0.05, 0.1, 0.15]
    stats2D = []
    stats3D = []
    for thres in threshold:
        i = m.find(err_2d <= thres)
        stats2D.append(len(i))
        i = m.find(err_3d <= thres)
        stats3D.append(len(i))
    
    #Define length of dataset and the empty array for filling data
    denom = len(dE) #Actual data collected
    data = np.empty((2,3))
    
    #Adjust the data to show the percentage below each threshold
    data[0,:] = [(100.0*x)/denom for x in stats2D] #2D Error
    data[1,:] = [(100.0*x)/denom for x in stats3D] #3D Error
    data[0,2] -= data[0,1]
    data[0,1] -= data[0,0]
    data[1,2] -= data[1,1]
    data[1,1] -= data[1,0]


    #Plot the data in a bar chart 
    plt.figure()

    #2D Plot
    for plotType in range(2):
        
        #2D-Error settings
        if (plotType == 0):
            offset = 0
            label = '2D'
            plt.subplot(1,2,1)
        
        #3D-Error settings
        elif(plotType == 1):
            offset = 1
            label = '3D'
            plt.subplot(1,2,2)

        #Bar plot settings
        plt.bar(filename, data[offset,0], label='<=5cm',zorder=3)
        bottom = data[offset,0]
        plt.bar(filename, data[offset,1], bottom=bottom, label='<=10cm',zorder=3)
        bottom += data[offset,1]
        plt.bar(filename, data[offset,2], bottom=bottom, label='<=15cm',zorder=3)
        bottom +=data[offset,2]
        plt.xticks(rotation=45,ha='right')
        plt.yticks(np.arange(0, 100, 10))
        plt.ylabel(label + ' Error <= Threshold [%]')
        plt.title(label + ' Error')
        plt.legend(loc='lower left')
        xmin, xmax, ymin, ymax = plt.axis()
        if(ymax > 100.0):
            plt.ylim([ymin,100.0])
        plt.grid(True,zorder=0)
        plt.tight_layout()

    #Save plot
    m.save_compressed_png(plotDirPath + '/2D_3D Error.png', dpi=300)
    return err_2d, err_3d

def getFreewayData(RunConfig,ref_pos,meas):

    """
    Modified from dataSummary.py. Finds overpasses and freeway sections and returns only the "clear" freeway data
    
    Inputs:
        RunConfig: object from mutils
        ref_pos: truth data array
        meas: meas object of replay data

    Outputs:
        meas_clear: meas object with only "clear freeway" data
        meas_overpasses: meas object with only overpass data
        ref_pos: truth data array with only "clear freeway" data
        rev_overpasses: truth data array with only overpass data

    """

    #Overpass finding
    spans = np.zeros(len(meas),dtype=bool)

    #Iterate through all stop and start points of events and find the freeway sections
    for desc,start_stops in RunConfig.span:
    # For now, we only want to process the freeway data.
        if desc!='Freeways' and desc!='Freeway': #Some datasets have this labeled with the singular, and some with the plural
            continue
        for start,stop in start_stops: #For each start and stop during the freeway collection
            i_d = m.find( (meas.TIME>=start) & (meas.TIME<=stop) ) #Get the epochs of the start and stop time for freeway
            spans[i_d] = True #Boolean indicating where the stretches of freeway are
    

    #Find overpasses and process only the freeway (not under overpasses) data
    # Get the overpass data
    overpass_pos, events = find_overpasses(ref_pos,False)


    # Create an array from the measurement data, we'll set this to True
    # for all epochs within +/-200m of an overpass
    overpasses = np.zeros(len(meas),dtype=bool)
    for thisEvent in events:
        _,i_event,i_d = m.intersect( thisEvent.TIME, meas.TIME )
        overpasses[i_d] = True


    # On the freeway (spans == True) and under an overpass
    i = m.find(overpasses & spans) #Intersect overpasses and freeway
    meas_overpasses = meas[i]

    # On the freeway, but not under an overpass
    i = m.find((overpasses == False) & spans) #Intersect NOT overpasses and freeway (i.e. freeway only)

   
    meas_clear = meas[i]

   # Now get the PVT stats for all data under the overpasses
    _,i_pp,i_d = m.intersect( ref_pos[:,dPP_TIME], meas_overpasses.TIME )
    if len(i_pp) != len(i_d):
        print("Position Error")


    ref_overpasses = ref_pos[i_pp,:]

    # Now get the PVT stats for all data under the overpasses
    _,i_pp,i_d = m.intersect( ref_pos[:,dPP_TIME], meas_clear.TIME )
    if len(i_pp) != len(i_d):
        print("Position Error")

    ref_clear = ref_pos[i_pp,:]


    return meas_clear, meas_overpasses, ref_clear, ref_overpasses
   
def enuErrorPlots(dE,dN,dU,ref_clear,meas,filename,plotDirPath):
    """
        Plots the time history of the ENU error amd the heading
        
        Inputs:
            dE: east error array
            dN: north error array
            dU: up error array
            ref_pos: truth data array
            meas: meas object of replay data
            filename: name of the dataset for the plot title
            plotDirPath: path to the directory to save the plots in

        No Outputs

        """

    #Get the error <= 20cm in each direction. This could potentially be changed to 3D <= 20cm for more detailed analysis, 
    #but it's fine for now because these plots are meant to get an idea of whether there is an along/cross track error or direction bias

    iE = m.find(abs(dE) <= 0.2)
    iN = m.find(abs(dN) <= 0.2)
    iU = m.find(abs(dU) <= 0.2)

    #Plot the dE,dN,dU error
    plt.figure()
    plt.subplot(2,2,1)
    plt.plot(meas.TIME[iE],dE[iE])
    plt.xlabel('Time')
    plt.xticks(rotation=45, ha="right")
    plt.ylabel('East [m]')
    plt.grid(True,zorder=0)

    plt.subplot(2,2,2)
    plt.plot(meas.TIME[iN],dN[iN])
    plt.xlabel('Time')
    plt.xticks(rotation=45, ha="right")
    plt.ylabel('North [m]')
    plt.grid(True,zorder=0)

    plt.subplot(2,2,3)
    plt.plot(meas.TIME[iU],dU[iU])
    plt.xlabel('Time')
    plt.xticks(rotation=45, ha="right")
    plt.ylabel('Up [m]')
    plt.grid(True,zorder=0)

    #Plot the heading of the POSLV 
    plt.subplot(2,2,4)
    plt.plot(meas.TIME,np.mod(ref_clear[:,12],360))
    plt.xlabel('Time')
    plt.xticks(rotation=45, ha="right")
    plt.ylabel('Heading [deg]')
    plt.grid(True,zorder=0)

    #Format and save plots
    plt.tight_layout()
    plt.suptitle(filename + ' ENU Error')
    plt.subplots_adjust(top=0.9)

    #Save plot
    m.save_compressed_png(plotDirPath + '/ENU Error Time History' + '.png', dpi=300)

def enuHistograms(dE,dN,dU,filename,plotDirPath):

    """
        Plots the histogram of the ENU error amd the heading
        
        Inputs:
            dE: east error array
            dN: north error array
            dU: up error array
            filename: name of the dataset for the plot title
            plotDirPath: path to the directory to save the plots in

        No Outputs

        """
    
    #Get the error <= 20cm in each direction. This could potentially be changed to 3D <= 20cm for more detailed analysis, 
    #but it's fine for now because these plots are meant to get an idea of whether there is an along/cross track error or direction bias
    iE = m.find(abs(dE) <= 0.2)
    iN = m.find(abs(dN) <= 0.2)
    iU = m.find(abs(dU) <= 0.2)

    #Plot the error histogram in each direction
    plt.figure()
    plt.subplot(1,3,1)
    plt.hist(dE[iE])
    plt.xlabel('Error [m]')
    plt.ylabel('Num Epochs')
    plt.yscale('log')
    plt.title('East')
    plt.grid(True,zorder=0)

    plt.subplot(1,3,2)
    plt.hist(dN[iN])
    plt.yscale('log')
    plt.xlabel('Error [m]')
    plt.ylabel('Num Epochs')
    plt.title('North')
    plt.grid(True,zorder=0)

    plt.subplot(1,3,3)
    plt.hist(dU[iU])
    plt.yscale('log')
    plt.xlabel('Error [m]')
    plt.ylabel('Num Epochs')
    plt.title('Up')
    plt.grid(True,zorder=0)

    #Format and save plot
    plt.tight_layout()
    plt.suptitle(filename + ' ENU Error')
    plt.subplots_adjust(top=0.9)

    #Save plot
    m.save_compressed_png(plotDirPath + '/ENU Histograms' + '.png', dpi=300)

def medianAdjust(llh,llh_ref,adjust = 'enu'):

    """
        Does and ENU adjustment based on the median of the error in each direction

        Inputs:
            llh: array with lat,long,height information of the replay data
            llh_ref: array with lat,long,height information of the reference data
            adjust (default: 'enu'): string containing the directions that need to be adjusted. (i.e., 'en' would indicate East and North adjustments only)

        Outputs:
            llh_corrected: reference llh corrected with the selected adjustment directions
            enu_corr: vector of ENU adjustment in each direction


        """
    #Get the e,n,u error in each direction
    (e,n,u) = m.llh2enu(llh,llh_ref)
    enuCorr = np.ones((len(llh),3))
    print(adjust) #Print the input adjustment so the user can verify that the correct adjustment is being made

    #Select the appropriate adjustment based on the user input
    if adjust == 'enu':
        enuCorr[...] = [median(e),median(n),median(u)]
    elif adjust == 'e':
        enuCorr[...] = [median(e),0,0]
    elif adjust == 'n':
        enuCorr[...] = [0,median(n),0]
    elif adjust == 'u':
        enuCorr[...] = [0,0,median(u)]
    elif adjust == 'en':
        enuCorr[...] = [median(e),median(n),0]
    elif adjust == 'eu':
        enuCorr[...] = [median(e),0,median(u)]
    elif adjust == 'nu':
        enuCorr[...] = [0,median(n),median(u)]

    #Separate out lat,lon,height to do an enu adjustment and return the corrected llh and its parameters
    lat = llh_ref[:,0]
    lon = llh_ref[:,1]
    hgt = llh_ref[:,2]
    llh_corrected = llh_enu_adjust(lat,lon,hgt,enuCorr)
    return llh_corrected, enuCorr

def itrf2000_itrf2014(pos,epoch):

    """
        Converts llh from ITRF2000 --> ITRF 2014. Assumes input is in ITRF 2000
        
        Inputs:
            pos: Sunnyvale format POSPac output data
            epoch: current date in a "dateAsYear" number from itrf 

        Outputs: 
            llhout: llh translated into the ITRF 2014 from ITRF 2000

        """

    inDatum = 'ITRF2000'
    outDatum = 'ITRF2014'
    tx = itrf.Transformation(to_itrf = outDatum, from_itrf=inDatum).atDate(epoch)
    transfunc = tx.transform
    xyz = m.conv_lla_ecef(pos[:,dPP_LAT],pos[:,dPP_LON],pos[:,dPP_HGT])
    itrfFormat = [[xyz[0][i],xyz[1][i],xyz[2][i]] for i in range(len(pos))]
    xyzt = transfunc(itrfFormat)
    llhout = np.empty((len(pos),3))
    for i in range(len(pos)):
        output = m.conv_ecef_lla(xyzt[i][0],xyzt[i][1],xyzt[i][2])
        llhout[i,0] = output[0]
        llhout[i,1] = output[1]
        llhout[i,2] = output[2]
    return llhout

def llh_enu_adjust(lat,lon,hgt,enuCorr):

    """
        Adjusts llh based on an ENU correction matrix
        
        Inputs:
            lat: array of latitude in degrees
            lon: array of longitude in degrees
            hgt: array of height in m
            enuCorr: enu correction matrix

        Outputs: 
            llh: adjusted llh using the correction parameters

    """
    
    #Apply an ENU correction to an existing LLH measurement, where LLH is a 3 x nEpochs array
 
    lat = np.deg2rad(lat)
    lon = np.deg2rad(lon)


    A_WGS84 = 6378137.0
    E2_WGS84 = 6.69437999013e-3
    W = np.sqrt( 1 - E2_WGS84*np.sin(lat)**2)
    N = A_WGS84/W
    M = A_WGS84*(1 - E2_WGS84)/W**3

    dllh = np.zeros((len(M),3))

    #Compute metric components (ignores higher order terms from the Taylor expansion)
    dllh[:,1] = enuCorr[:,0]/(N*np.cos(lat))
    dllh[:,0] = enuCorr[:,1]/M
    dllh[:,2] = enuCorr[:,2]

    llh = np.zeros((len(M),3))
    llh[:,0] = (lat + dllh[:,0])*(180/math.pi)
    llh[:,1] = (lon + dllh[:,1])*(180/math.pi)
    llh[:,2] = hgt + dllh[:,2]

    #TESTING NONLINEAR APPROXIMATION: Seems not worth the computational expense...possibly look into later if we need more accuracy
    # sym.init_printing()
    # de = enuCorr[0,0]
    # dn = enuCorr[0,1]
    # du = enuCorr[0,2]
    # lat = np.deg2rad(lat[0])
    # lon = np.deg2rad(lon[0])
    # W = W[0]
    # N = N[0]
    # M = M[0]

    # def solveFxn(z):
    #     dx = z[0]
    #     dy = z[1]
    #     dz = z[2]
    #     F = np.empty(3)
     
    #     F[0] = W*np.cos(lat)*dx - M*np.sin(lat)*E2_WGS84*dx**2 + np.cos(lat)*dy*dz - de
    #     F[1] = W*dx + (3/2)*A_WGS84*np.cos(lat)*np.sin(lat)*E2_WGS84*dx**2 + dz*dx + 0.5*np.sin(lat)*np.sin(lat)*np.cos(lat)*N*dy**2 - dn
    #     F[2] = dz - 0.5*A_WGS84*(1-(3/2)*E2_WGS84*np.cos(lat)+0.5*E2_WGS84)*dx**2 - 0.5*((A_WGS84*np.cos(lat)**2)/W)*dy**2 - du
    #     return F
    
    # zGuess = np.array([dllh[0,0],dllh[0,1],dllh[0,2]])
    # #print(sym.solve([f,g,h],(dx,dy,dz)))
    # z = fsolve(solveFxn,zGuess)
    # print(z)
    


    return llh

def plotCrossAlongError(dE,dN,dU,ref_data,err_2d,err_3d,filename,plotDirPath):

    """
    Plots the along/cross error where the 2D and 3D error <= 50cm. Returns the x,y,z error in the vehicle frame

    Inputs:
        dE: east error array
        dN: north error array
        dU: up error array
        ref_data: truth data array
        err_2d: 2D error array
        err_3d: 3D error array
        filename: name of the dataset for the plot title
        plotDirPath: path to the directory to save the plots in

    Outputs: 
        (x_v, y_v, z_v): x,y,z error in the vehicle frame

    """
    

    #Get the 2D and 3D error <= 50 cm for indexing
    i2 = m.find(err_2d <= 0.5)
    i3 = m.find(err_3d <= 0.5)
    underThresh = m.intersect(i2,i3)[0]

    #Identify the roll, pitch, and yaw, and use to compute the rotation matrix 
    roll = ref_data[:,10]*(math.pi/180)
    pitch = ref_data[:,11]*(math.pi/180)
    yaw = ref_data[:,12]*(math.pi/180)

    cr = np.cos(roll)
    sr = np.sin(roll)
    cp = np.cos(pitch)
    sp = np.sin(pitch)
    cy = np.cos(yaw)
    sy = np.sin(yaw)
    v2n11 = cp*sy
    v2n12 = cr*cy + sr*sp*sy
    v2n13 = -sr*cy + sr*sp*sy
    v2n21 = cp*cy
    v2n22 = -cr*sy + sr*sp*cy
    v2n23 = sr*sy + cr*sp*cy
    v2n31 = sp
    v2n32 = -sr*cp
    v2n33 = -cr*cp

    #Get the x,y,z error in the vehicle frame
    x_v = v2n11*dE + v2n21*dN + v2n31*dU
    y_v = v2n12*dE + v2n22*dN + v2n32*dU
    z_v = v2n13*dE + v2n23*dN + v2n33*dU

    #Plot the x,y,z and format and save 
    plt.figure()
    plt.subplot(3,1,1)
    plt.plot(ref_data[underThresh,dPP_TIME],x_v[underThresh])
    plt.ylabel('Cross track error [m]')
    plt.grid(True,zorder=0)

    plt.subplot(3,1,2)
    plt.plot(ref_data[underThresh,dPP_TIME],y_v[underThresh])
    plt.ylabel('Along track error[m]')
    plt.grid(True,zorder=0)

    plt.subplot(3,1,3)
    plt.plot(ref_data[underThresh,dPP_TIME],z_v[underThresh])
    plt.ylabel('Height error [m]')
    plt.grid(True,zorder=0)

    plt.tight_layout()
    plt.suptitle(filename + 'Along Cross Track Error')
    plt.subplots_adjust(top=0.9)
    m.save_compressed_png(plotDirPath + '/AlongCrossError.png', dpi=300)

    return (x_v,y_v,z_v)

def alongCrossAdjust(x_v,y_v,z_v,ref_data):

    """
    Adjusts the truth llh according to the median of the along/cross error

    Inputs:
        x_v: x error in the vehicle frame
        y_v: y error in the vehicle frame
        v_v: z error in the vehicle frame
        ref_data: truth data array

    Outputs: 
        llh: array with lat,lon,height information, corrected according to the along/cross track error

    """

    #Compute the median of the body-frame x,y,z error and adjust with the yaw 
    x = median(x_v)
    y = median(y_v)
    z = median(z_v)
    
    yaw = ref_data[:,12]*(math.pi/180)
    cy = np.cos(yaw)
    sy = np.sin(yaw)
    z = np.zeros(yaw.shape)
    o = np.ones(yaw.shape)
    v2n11 = sy
    v2n12 = cy
    v2n13 = z
    v2n21 = cy
    v2n22 = -sy
    v2n23 = z
    v2n31 = z
    v2n32 = z
    v2n33 = -o

    first = v2n11*x + v2n21*y + v2n31*z
    second = v2n12*z + v2n22*y + v2n32*z
    third = v2n13*z + v2n23*y + v2n33*z

    #Determine the enu correction matrix and adjust the llh according to the along/cross track error
    enuCorr = np.ones((len(ref_data),3))
    enuCorr[:,0] = first
    enuCorr[:,1] = second
    enuCorr[:,2] = third
    llh = llh_enu_adjust(ref_data[:,dPP_LAT],ref_data[:,dPP_LON],ref_data[:,dPP_HGT],enuCorr)
    return llh

def plotCDF(err_2d, err_3d,filename,plotDirPath):

    """
    Plots the 2D and 3D error (<=20 cm) CDF

    Inputs:
        err_2d: 2D error array
        err_3d: 3D error array
        filename: name of the dataset for the plot title
        plotDirPath: path to the directory to save the plots in

    No Outputs

    """

    #Get the errors <= 20cm
    i2 = m.find(err_2d <= 0.2)
    i3 = m.find(err_3d <= 0.2)
    plt.figure()

    #Compute the CDF and plot, format, and save
    cx2,cy2 = m.docdf(err_2d[i2])
    plt.plot(cx2,cy2,label = "2D")
    cx3,cy3 = m.docdf(err_3d[i3])
    plt.plot(cx3,cy3,label = "3D")
    plt.xlabel('Error [m]')
    plt.ylabel('CDF')
    plt.title('Clear data')
    plt.grid(True)
    plt.title(filename)
    
    #Format and save
    plt.tight_layout()
    plt.legend(loc = "lower right")
    m.save_compressed_png(plotDirPath + '/2D_3D CDF.png', dpi=300)
