#!/usr/bin/env python

######################################################################
# Copyright Trimble 2017-2019
#
# This script is meant to work in conjunction with web/RFapp/RFapp.py
# The web app sends requests over a socket to us, and we respond with
# the status of the Spirent RF logger or regression system.
#
# Start at "run_server()" (and then handle_req()) to get an idea of
# what the system can do.
######################################################################

import os, sys, traceback, signal, re
import pickle
import time
import RXTools as rx
import ReplayJamControl
import SpirentTools as ST
import subprocess
from datetime import datetime
import multiprocessing
import multiprocessing.pool
import multiprocessing.connection as mp_con
import json
import smtplib, email.mime.text, email.mime.multipart, email.mime.application
import glob
import replayCorr
import replayIMU
import random
import shutil
import copy
from RunConfig import get_config_xml
from functools import partial, total_ordering
import psutil
import inspect
import paramiko
import urllib.request

try:
  import xml.etree.cElementTree as ET
except ImportError:
  import xml.etree.ElementTree as ET

try:
  import AGRXTools as agrx
except:
  print("Error. Import AGRXTools failed")


verbose = False
run_build_install_in_background = True
run_setup_in_parallel = True
DCOL_port = 5017 # This port should be read/write on all receivers
DCOL_port3 = 5018 # This port should be read/write on all receivers
DCOL_port2 = 28001 # This port should be read/write on all receivers
agrx_access_lock = multiprocessing.Lock() # Global lock for Ag receiver. Tsip_core allows one connection at a time

jam_diag_filename = 'jam_diags.txt'


def fn(forcePrt=False):
  '''Inputs: forcePrt = True/False - force return function name and ignore verbose
     Return caller's name
  '''
  if verbose == False and forcePrt == False:
    return ""
  else:
    # return func name of who calls fn()
    frameinfo = inspect.getouterframes(inspect.currentframe())
    (frame, source, lineno, func, lines, index) = frameinfo[1]
    return func+"()::"

def wrap_use_agrxtool(func, *args, **kwargs ):
  '''AGRXTools uses tsip_core which allow one serial connection at a time. Lock is used to keep thread safe.
     Inputs:  func = function to be called after getting lock
              loginfo = ST.SpirentTools.LogInfo = where to log diagnostics
              device = rx object read from config.xml
              try_time = how long does it keep trying to acquire lock
              dt = delta time between each try
     Return func() if success
     eg. wrap_use_agrxtool(f, (1,2,3), loginfo=loginfo, device=device, try_time=10, dt=2)
  '''
  loginfo = kwargs['loginfo']
  device = kwargs['device']
  try_time = int(kwargs['try_time'])
  dt = int(kwargs['dt'])

  num_try = int(try_time/dt)
  acq_res = None

  while num_try > 0:
    if loginfo is not None: loginfo.print_(fn()+"Device #",device.n,"Acquiring agrx lock ...")
    acq_res = agrx_access_lock.acquire(block=False)
    if acq_res:
      break
    time.sleep(dt)
    num_try -= 1
  if acq_res:
    if loginfo is not None: loginfo.print_(fn()+"Device #",device.n,"Acquired agrx lock")
    try:
      rt = func(args)
    except Exception as e:
      if loginfo is not None: loginfo.print_(fn()+"Device #",device.n,"Release agrx lock after failure")
      agrx_access_lock.release()
      raise RuntimeError(e)
    if loginfo is not None: loginfo.print_(fn()+"Device #",device.n,"Release agrx lock after success")
    agrx_access_lock.release()
    return rt
  else:
    if loginfo is not None:
      loginfo.print_(fn()+"Device #",device.n,"Acquiring agrx lock failed")
    else:
      print( fn()+"Device #",device.n,"Acquiring agrx lock failed" )
    raise RuntimeError(fn()+"Device #",device.n,"Acquiring agrx lock failed")

def email_file(subject,csv_filename):
  '''Inputs: subject = email subject
             csv_filename = attached file name
     Sending email
  '''
  cfg = get_config_xml(None,True)
  if len(cfg.email_recipients) > 0:
    sender = 'RFregression@fermion.eng.trimble.com'
    msg_text = "see attached file: {}".format(csv_filename)

    msg = email.mime.multipart.MIMEMultipart()
    body_part = email.mime.text.MIMEText(msg_text, 'plain')
    msg['Subject'] = subject
    msg['From'] = sender
    msg['To'] = ', '.join(cfg.email_recipients)
    msg.attach(body_part)
    with open(csv_filename,'rb') as file:
        msg.attach(email.mime.application.MIMEApplication(file.read(), Name=csv_filename))

    s = smtplib.SMTP( cfg.email_host )
    s.sendmail(sender, cfg.email_recipients, msg.as_string())
    s.quit()

def getSSDWearLevelingCount(MOUNTPOINT='/media/GSS6450_Data'):
  '''Inputs: MOUNTPOINT = string eg. '/media/GSS6450_Data'
     Return: dictory of date, device name, serial num, wear out ctr, device file path
     ssh to spirent, find corresponding device file path and run smartctl
  '''
  # create cvs table #
  # eg.
  #     date,       device name,              device serial number, wear out ctr #
  #     11/23/2021, Samsung SSD 850 PRO 2TB,  S3D4NX0J708814R,      099
  loc_st = ST.SpirentTools(False)
  paths = []
  output = None

  # get device file path from mount point
  # cmd = "lsblk | grep " + "'" + MOUNTPOINT + "'"
  cmd = "lsblk"
  try:
    output = loc_st.do_ssh_cmd(cmd, True)
  except:
    print("Ssh-cmd failed:",cmd)
    return None

  try:
    stk = []
    curr_line = None
    for l in output:
      # print(l)
      if MOUNTPOINT in l:
        arr = l.split()
        # print(arr)
        if len(arr) != 7 or arr[-1].strip() != MOUNTPOINT:
          print("Bad return", l)
          return None
        # print(arr)
        while(len(stk)>0):
          prev = stk.pop()
          # print(prev)
          if prev[:2] == 'sd':
            path_tmp = '/dev/' + prev.split()[0] # + '/' + arr[0][2:]
            paths.append(path_tmp)
            break
        # break skip since RAID2 has two mount
      stk.append(l)
  except:
    print("Retrive disk files path failed:",output)
    return None
  # print('path =', path)

  # get wearleveling count
  ds = []
  for path in paths:
    cmd = "smartctl -d sat -a " + path
    keys = [("Device Model:", 1, ':'), ("Serial Number:",1,':'), ("Wear_Leveling_Count",3,None)]
    d = {}
    d["Path"] = path
    try:
      output = loc_st.do_ssh_cmd(cmd, True)
      for line in output:
        for key, col, delimiter in keys:
          if key in line:
            d[key] = re.sub('\s+',' ', line.split(delimiter)[col]).strip()
    except:
      print("failed. Exit.")
      return None
    ds.append(d)
  return ds

def logSSDWearLevelingCount(csv_filename='SSDWearLeveling.csv', MOUNTPOINTS=['/media/GSS6450_Data', '/home', '/media/RAID5'], send_email=True):
  '''Inputs:
      csv_filename = csv file to record Wear Leveling Count
      MOUNTPOINTs = list of mount point for drive eg. ['/media/GSS6450_Data','/media/RAID5/']
      send_email = True/False whether to send email to cfg.email_recipients
  '''
  for MOUNTPOINT in MOUNTPOINTS:
    ds = getSSDWearLevelingCount(MOUNTPOINT)
    if ds == None:
      return

    from datetime import date
    today = date.today()
    mmddyy = today.strftime("%m/%d/%Y")

    def get_line(fname, n):
      with open(fname) as f:
        return f.readlines()[n].rstrip()

    import csv
    if not os.path.exists(csv_filename):
      with open(csv_filename, 'w', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
        spamwriter.writerow(["Date","Device Model","Serial Number","Wear_Leveling_Count", "Device Files", "Mount Point"])
    with open(csv_filename,'a', newline='\n') as csvfile:
      spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
      for d in ds:
        spamwriter.writerow( [mmddyy, d["Device Model:"], d["Serial Number:"], d["Wear_Leveling_Count"], d['Path'], MOUNTPOINT])

    if "Mount Point" not in get_line(csv_filename,0): # Update column name
      print("Update first row in csv file")
      line_list = None
      with open(csv_filename, 'r') as csvfile:
        line = csv.reader(csvfile)
        line_list = list(line)

      line_to_override = {0:["Date","Device Model","Serial Number","Wear_Leveling_Count", "Device Files", "Mount Point"]}
      with open(csv_filename,'w', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
        for line, row in enumerate(line_list):
          data = line_to_override.get(line, row)
          spamwriter.writerow(data)

  if send_email:
    subject = 'RF regression weekly SSD wear leveling count report'
    email_file(subject, csv_filename)

def get_elev_mask_from_receiver(device):
  ''' get elev mask from receiver '''
  return rx.GetElev(device.ip,device.user,device.pw)

def get_elev_mask_from_xml(device):
  ''' get elev mask xml file '''
  raw = ET.parse(device.settingsClone).getroot()
  if raw.find('./APP_RECORD[@name="GeneralControls"]') == None:
    return None
  else:
    elev_mask = int(raw.find('./APP_RECORD[@name="GeneralControls"]').text.split()[2])
    return elev_mask

def getTestFileInfo(st, get_file_stats, single_file=None):
  """Get information about all (or one) XML scenario file(s).
  Returns:  dictionary with list of info on all scenarios.
  Input: get_file_stats = True -> Return info on all files & sizes (slower).
                          False -> Only return basic scenario info (fast!).
         single_file = if set, only analyze this one XML scenario file."""
  copy_percent = 0.0
  if get_file_stats:
    f_db = st.get_file_info()

  curr_copy_filename = ""
  if os.path.isfile("copy_status.json"):
    with open("copy_status.json") as f:
      curr_copy_filename = json.load( f )['filename']

  file_list = sorted(glob.glob("Samples/*.xml"))
  if single_file is not None:
    file_list = ["Samples/" + single_file]
  info = []
  usb_device_with_most_space = st.get_usb_device_with_most_space()
  for filename in file_list:
    raw = ET.parse(filename).getroot()
    curr = {}
    curr['filename'] = filename.replace('Samples/','')
    curr['desc'] = raw.find('desc').text
    curr['hours'] = float(raw.find('gss6450_playback/hours').text)
    reg_runs = raw.find('regression_runs')
    if reg_runs is not None:
      curr['regression_runs'] = reg_runs.text
    curr['RF_files'] = []
    sum_server = sum_usb = sum_ssd_ext = sum_ssd_int = sum_play = 0
    usb_device = None  # Are the files already on a USB device?
    if get_file_stats:
      curr['locked'] = False
      curr['play_drive'] = ""
    for rf in raw.findall('gss6450_playback/file'):
      RF_filename = rf.text
      base_RF_filename = os.path.basename(RF_filename)
      file_info = {'name':RF_filename}
      if get_file_stats:
        if os.path.isfile(RF_filename):
          file_info['server'] = os.stat(RF_filename).st_size
          sum_server += file_info['server']
        if base_RF_filename in f_db['usb']:
          file_info['usb'] = f_db['usb'][base_RF_filename].size
          sum_usb += file_info['usb']
          usb_device = f_db['usb'][base_RF_filename].device
        if base_RF_filename in f_db['play_usb']:
          file_info['play_usb'] = f_db['play_usb'][base_RF_filename].size
          curr['locked'] = f_db['play_usb'][base_RF_filename].locked
          sum_play += file_info['play_usb']
        if base_RF_filename in f_db['ext']:
          file_info['ssd_ext'] = f_db['ext'][base_RF_filename].size
          curr['locked'] = f_db['ext'][base_RF_filename].locked
          sum_ssd_ext += file_info['ssd_ext']
        if base_RF_filename in f_db['int']:
          file_info['ssd_int'] = f_db['int'][base_RF_filename].size
          curr['locked'] = f_db['int'][base_RF_filename].locked
          sum_ssd_int += file_info['ssd_int']
      curr['RF_files'].append( file_info )
    # Get spirent recorded start time, stop time and observed drifting offset
    start_time_offset = 0 if raw.find('start_time_offset') == None else int( raw.find('start_time_offset').text )
    def get_start_stop_time_from_scn(raw):
      '''get start, stop time from scn file'''
      StartGPSTime = 604800
      StopGPSTime = 0
      RF_filename = ""
      for rf in raw.findall('gss6450_playback/file'):
        if '.scn' in rf.text:
          RF_filename = rf.text

        def toGPSTime(time_str):
          # Convert to GPS time
          import leapseconds
          utc_date = datetime.strptime(time_str.strip(),'%H:%M:%S %d/%m/%y')
          gps_epoch = datetime(1980,1,6)
          gps_time = leapseconds.utc_to_gps(utc_date) - gps_epoch
          return gps_time.total_seconds()
          
        if not os.path.exists(RF_filename):
          print("Warning! get_start_stop_time_from_scn() can't open ",RF_filename)
          return -1,-1
        f_scn = open(RF_filename, 'r')
        fs = f_scn.readlines()
        prev_l = ""
        for l in fs:
          if '<Start Time>' in l:
            tmp = l.split('<Start Time>')[1].replace('\n','').replace('GMT','')
            StartGPSTime = min(StartGPSTime, toGPSTime(tmp)%604800)
          if '<Stop Time>' in prev_l:
            tmp = l.replace('\n','').replace('Local time','')
            StopGPSTime = max(StopGPSTime, toGPSTime(tmp)%604800)
          prev_l = l
        f_scn.close()
      return StartGPSTime, StopGPSTime

    StartGPSTime, StopGPSTime = get_start_stop_time_from_scn(raw)
    if StartGPSTime >= StopGPSTime:
      if raw.find('skip_regression') == None:
        print("Warning! StartGPSTime > StopGPSTime.", StartGPSTime, StopGPSTime, filename)
        # No stop time is provided in .scn. Use stop time from .xml.
        StopGPSTime_from_xml = int(raw.find('timespan/stop').text)
        if StopGPSTime == 0:
          print("Warning! No <Stop Time> found in .scn file. Use stop time from xml file for this scenario.")
          StopGPSTime = StopGPSTime_from_xml

    curr['StartGPSTime'] = StartGPSTime + start_time_offset
    curr['StopGPSTime'] = StopGPSTime + start_time_offset
    if get_file_stats:
      max_sum = max( (sum_server, sum_usb, sum_ssd_ext, sum_ssd_int, sum_play) )
      curr['play_copy_hours'] = 0.
      curr['backup_hours'] = 0.
      if sum_usb == max_sum:
        curr['RF_usb'] = 'OK'
      else:
        curr['RF_usb'] = 'missing'
        curr['needs_copy'] = 1
      if sum_play == max_sum:
        curr['RF_ssd'] = 'play_usb'
        curr['play_drive'] = "U"
        if curr['locked'] and curr['RF_usb'] == 'missing':
          # If a file is locked, don't require 2 copies.
          curr['RF_usb'] = 'OK'
          curr['needs_copy'] = 0
      elif sum_ssd_int == max_sum:
        curr['RF_ssd'] = 'internal'
        curr['play_drive'] = "I"
      elif sum_ssd_ext == max_sum:
        curr['RF_ssd'] = 'external'
        curr['play_drive'] = "E"
      else:
        curr['RF_ssd'] = 'missing'
        curr['needs_copy'] = 1
      if sum_server == max_sum:
        curr['RF_server'] = 'OK'
      else:
        curr['RF_server'] = 'missing'
        curr['needs_copy'] = 1

      # If data is not on the USB, then any backup will go to the device with the most space
      # If no USB drive is present, we'll copy from the network.
      if usb_device_with_most_space is None:
        usb_backup_copy_bytes_per_sec = st.copy_speed['network']
      else:
        usb_backup_copy_bytes_per_sec = st.copy_speed[usb_device_with_most_space]
      if curr['RF_server'] == 'missing':
        # Need to copy to server over the network
        curr['backup_hours'] += max_sum / st.copy_speed['network'] / (60.*60.)
      if curr['RF_usb'] == 'missing' and curr['RF_ssd'] != 'missing':
        # Need to copy from SSD to USB
        curr['backup_hours'] += max_sum / usb_backup_copy_bytes_per_sec / (60.*60.)
      if curr['RF_usb'] == 'missing' and curr['RF_ssd'] == 'missing':
        # Need to copy from server to USB/SSD over the network
        curr['backup_hours'] += max_sum / st.copy_speed['network'] / (60.*60.)
      if curr['play_drive'] == '':
        # Calculate time just to copy to SSD and play, not copy file to all locations..
        if usb_device is None:
          usb_copy_bytes_per_sec = st.copy_speed['network']
        else:
          usb_copy_bytes_per_sec = st.copy_speed[usb_device]
        curr['play_copy_hours'] += max_sum / usb_copy_bytes_per_sec / (60.*60.)

      if curr['filename'] == curr_copy_filename:
        if sum_server == max_sum:
          copy_percent = sum_usb*1.0 / max_sum
        elif sum_usb == max_sum:
          copy_percent = sum_server*1.0 / max_sum
        else:
          copy_percent = (sum_usb + sum_server) / (2.0*max_sum)
    curr['segs'] = []
    for segs in raw.findall('timespan/type'):
      if segs.text not in curr['segs']:
        curr['segs'].append(segs.text)
    info.append(curr)
  all_info = {}
  all_info['tests'] = info
  all_info['regression'] = get_regression_status()
  all_info['week_num'] = int((datetime.now() - datetime(1980,1,6)).days/7)
  if get_file_stats:
    all_info['copy'] = {}
    all_info['copy']['filename'] = curr_copy_filename
    all_info['copy']['percent'] = copy_percent*100.0
  return all_info


def run_jammer( ref_rcvr_addr, RF_control_addr ):
  '''Inputs: ref_rcvr_addr = receiver IP address string.  Should not be jammed.
             RF_control_addr = RF jammer address:port string, e.g. "10.1.151.98:10001"
  Turns on/off jamming based on reference position.
  Logs diagnostics to 'jam_diag_filename'
  '''
  RF_IP = RF_control_addr.split(':')[0]
  RF_port = int(RF_control_addr.split(':')[1])
  with open(jam_diag_filename,'w') as f_out:
    ReplayJamControl.loopUpdateJammer( RF_IP, RF_port, ref_rcvr_addr, f_out=f_out )

def replay_IMU( IPAddr, IMU_file, user_pass ):
  '''Inputs: IPAddr = receiver IP address string
             IMU_file = x.T04 with with IMU rec 35:3/4
             user_pass = web login, e.g. "admin:password"
  Plays back IMU to receiver
  '''
  r = replayIMU.ReplayIMU(IMU_file, ["%s:%d"%(IPAddr,DCOL_port2)], user_pass )
  r.do_loop()

def replay_corrs( IPAddr, IPPort, corr_file, _log_failed_zda_msg_filename=None ):
  '''Inputs: IPAddr = receiver IP address string, IPPort = TCP port
             corr_file = x.DAT with with rec98
  Plays back RTCM/CMR rec98 to receiver TCP/IP port
  '''
  rc = replayCorr.ReplayCorr(corr_file, "socket://%s:%d"%(IPAddr,IPPort), verbose=False, log_failed_zda_msg_filename=_log_failed_zda_msg_filename )
  rc.do_loop()


def start_logging( proc_q, loginfo, device, info, log_failed_zda_msg_filename=None, run_rtx=False, run_both=False ):
  '''Inputs: proc_q = Queue() - list of background processes started
             loginfo = ST.SpirentTools.LogInfo = where to log diagnostics
             device = Device object - info on current device
             info = ScenarioInfo object - info on current scenario
  Start logging activities:
    - make sure no T04 files are present
    - set GPS ref week
    - playback any CMR/RTCM data
    - clear error log
    - start logging
  '''
  def start_logging_for_GNSS_receiver( proc_q, loginfo, device, info, log_failed_zda_msg_filename, run_rtx=False, run_both=False):
    IPAddr = device.ip
    user = device.user
    password = device.pw
    user_pass = '%s:%s' % (user,password)
    proc_list = []
    loginfo.print_("start_logging for %s" % IPAddr)

    if info.IMU_corr_file and device.do_IMU:
      # If dev.imu_preference is set, change imu_corr_file to that value if it is found
      if device.imu_preference == 'HPC1' and info.HPC1_IMU_corr_file:
        selected_imu_file = info.HPC1_IMU_corr_file
      else:
        selected_imu_file = info.IMU_corr_file

      # update IMU calibration - reboots receiver
      loginfo.print_("Overwrite IMU calibration for %s" % IPAddr)
      r = replayIMU.ReplayIMU(selected_imu_file,
                              ["%s:%d"%(IPAddr,DCOL_port2)],
                              user_pass )
      try:
        r.best_calibration_update()
        loginfo.print_("Done with IMU calibration for %s" % IPAddr)
        time.sleep(30) # RX0 sometimes raises Error by rx.DisableDefaultLogging(). Try sleep longer here
      except:
        loginfo.print_("Couldn't update IMU calibration for %s" % IPAddr)
        raise Exception("Couldn't update IMU calibration for "+str(IPAddr))


    rx.DisableDefaultLogging(device.ip,device.user,device.pw)
    loginfo.print_("DisableDefaultLogging for %s" % IPAddr)
    rx.getFileListAndDelete(device.ip,device.user,device.pw,'/Internal/','T04')
    loginfo.print_("getFileListAndDelete for %s" % IPAddr)
    rx.EnableIonoMitigation(device.ip, device.user, device.pw, info.ionoMitigation)
    loginfo.print_("EnableIonoMitigation=%d for %s" % (info.ionoMitigation,IPAddr))
    rx.ResetRTC(device.ip,device.user,device.pw)
    if info.set_gps_year:
      rx.sendDColRefWeek( IPAddr, DCOL_port, info.set_gps_year )
      loginfo.print_("Set GPS year %d for %s:%d" % (info.set_gps_year,IPAddr,DCOL_port))
    if info.IMU_corr_file and device.do_IMU:
      time.sleep(5.0)
      loginfo.print_("start IMU for %s:%d" % (IPAddr,DCOL_port))
      proc = multiprocessing.Process(target=replay_IMU, args=(IPAddr,selected_imu_file,user_pass,))
      proc.start()
      time.sleep(1.0)
      proc_q.put(proc.pid)
      proc_list.append( proc )
    if info.RTK_corr_file and device.do_RTK:
      time.sleep(5.0)
      loginfo.print_("start RTCM/CMR for %s:%d" % (IPAddr,DCOL_port))
      all_corr_file = [info.RTK_corr_file]
      if run_both and info.RTX_corr_file is not None :
        loginfo.print_("use RTK and RTX correction file for %s:%d/%d" % (IPAddr, DCOL_port, DCOL_port3))
        all_corr_file = [info.RTK_corr_file, info.RTX_corr_file]
      elif run_rtx and info.RTX_corr_file is not None :
        loginfo.print_("use RTX correction file for %s:%d" % (IPAddr,DCOL_port))
        all_corr_file = [info.RTX_corr_file]

      for corr_file,IPPort in zip(all_corr_file,[DCOL_port,DCOL_port3]):
        loginfo.print_(f"replay_corrs {IPAddr} {IPPort} {corr_file}")
        proc = multiprocessing.Process(target=replay_corrs, args=(IPAddr,IPPort,corr_file,log_failed_zda_msg_filename,))
        proc.start()
        time.sleep(0.5)
        proc_q.put(proc.pid)
        proc_list.append( proc )

    def clean_proc_list(proc_list, loginfo):
      loginfo.print_("Clean background processes: replay_IMU and/or replay_corrs")
      for proc in proc_list:
        if proc is not None:
          loginfo.print_("Stop PID {}".format(proc.pid))
          process = psutil.Process(proc.pid)
          for proc in process.children(recursive=True):
            proc.kill()
          process.kill()

    # dynamic model could be overwritten by ReplayIMU.__init__() and replay_IMU()
    # Change dynamic model with web GUI after IMU setting
    # Test shows sleep 10 sec or more will significantly avoid ChangeDynamicModel() running unsuccessfully
    try:
      time.sleep(30)
      rx.ChangeDynamicModel(device.ip,device.user,device.pw,info.dynamicModel)
      loginfo.print_("Sent ChangeDynamicModel(%s) for %s" % (info.dynamicModel, IPAddr))
      time.sleep(30)
      GET_dynamicModel = rx.GetDynamicModel(device.ip,device.user,device.pw)
      if info.dynamicModel != GET_dynamicModel:
        loginfo.print_("info.dynamicModel != rx.GetDynamicModel()")
        clean_proc_list(proc_list, loginfo)
        raise Exception("ChangeDynamicModel() unsuccessfully")
      else:
        loginfo.print_("ChangeDynamicModel(%s) successfully for %s" % (GET_dynamicModel, IPAddr))
    except:
      loginfo.print_("Couldn't ChangeDynamicModel(%s) for %s" % (info.dynamicModel, IPAddr))
      clean_proc_list(proc_list, loginfo)
      raise Exception("ChangeDynamicModel() failed")

    # Clear the error/warning log
    rx.ClearErrorLog(IPAddr,user,password)
    loginfo.print_("ClearErrorLog for %s" % IPAddr)
    # Enable logging
    rx.EnableDefaultLogging(IPAddr,user,password)
    loginfo.print_("EnableDefaultLogging for %s" % IPAddr)
    return proc_list

  def start_logging_for_Ag_receiver( proc_q, loginfo, device, info, log_failed_zda_msg_filename, run_rtx=False, run_both=False):

    IPAddr = device.ip
    io_tsip = device.ag.io_TSIP
    io_rtk = device.ag.io_RTK_input
    io_zda = device.ag.io_NMEA_ZDA_output

    proc_list = []
    loginfo.print_(fn()+"Device #", device.n, "Start_logging for Ag receiver %s" % IPAddr)
    loginfo.print_(fn()+"Device #", device.n, IPAddr,"io_TSIP %s" % io_tsip)
    loginfo.print_(fn()+"Device #", device.n, IPAddr,"io_RTK_input %s" % io_rtk)
    loginfo.print_(fn()+"Device #", device.n, IPAddr,"io_NMEA_ZDA_output %s" % io_zda)
    loginfo.print_(fn()+"Device #", device.n, IPAddr,"device.do_RTK", device.do_RTK)

    if device.do_RTK:
      time.sleep(5.0)
      loginfo.print_(fn()+"Start RTCM/CMR for Ag receiver ",IPAddr,"at",io_rtk)

      def replay_corrs_ag( io_rtk, corr_file, io_zda ):
        '''Inputs: IPAddr = receiver IP address string
                  corr_file = x.DAT with with rec98
        Plays back RTCM/CMR rec98 to receiver TCP/IP port
        '''
        try:
          verbose_print = False
          log_nmea_filename = "log_nmea_nav900.txt" if verbose else None
          if isinstance(corr_file, list):
            rc = replayCorr.ReplayCorr(corr_file, io_rtk, verbose_print, io_zda, log_nmea_filename, 115200)
            rc.do_loop()
            rc = replayCorr.ReplayCorr(corr_file, io_rtk, verbose_print, io_zda, log_nmea_filename, 115200)
            rc.do_loop()
          else:
            rc = replayCorr.ReplayCorr(corr_file, io_rtk, verbose_print, io_zda, log_nmea_filename, 115200)
            rc.do_loop()
        except:
          loginfo.print_(fn()+"Device #", device.n, "ReplayCorr() failed")

      corr_file = None
      if False and run_both and info.RTX_corr_file is not None and info.RTK_corr_file is not None:
        corr_file = [info.RTK_corr_file, info.RTX_corr_file]
        loginfo.print_(fn()+"Device #", device.n,"use RTK/RTX correction file", corr_file)
      elif run_rtx and info.RTX_corr_file is not None:  
        corr_file = info.RTX_corr_file
        loginfo.print_(fn()+"Device #", device.n,"use RTX correction file", corr_file)
      elif not run_rtx and info.RTK_corr_file is not None:
        corr_file = info.RTK_corr_file
        loginfo.print_(fn()+"Device #", device.n,"use RTK correction file", corr_file)
      else:
        loginfo.print_(fn()+"Device #", device.n, "Abort. RTX/RTK setting is incorrect.",device.do_RTK,run_rtx,info.RTX_corr_file,info.RTK_corr_file)
        raise RuntimeError("use RTX/RTX failed")

      proc = multiprocessing.Process(target=replay_corrs_ag, args=(io_rtk,corr_file,io_zda))
      proc.start()
      time.sleep(0.5)
      proc_q.put(proc.pid)
      proc_list.append( proc )

    return proc_list

  if device.ag != None:
    if verbose: loginfo.print_(fn()+"Device #", device.n ,"is Ag receiver")
    return start_logging_for_Ag_receiver( proc_q, loginfo, device, info, log_failed_zda_msg_filename, run_rtx)
  else:
    if verbose: loginfo.print_(fn()+"Device #", device.n ,"is GNSS receiver")
    return start_logging_for_GNSS_receiver( proc_q, loginfo, device, info, log_failed_zda_msg_filename, run_rtx, run_both)

  
def setup_install_alm(loginfo, device, info, cfg):
  """It's be nice to use a clone file to store almanac data, but the format
  changes more than we'd like. So.. try to use a DCOL packet to set the
  almanac data.
  In the scenario file, the clone file might be Clone/20180205_ALM16.xml .
  First look for Clone/20180205_ALM.pkl to load (by stripping last 6 chars).
  Clone/20180205_ALM.pkl can be generated from an almanac file by using
  clone_alm_to_pkl.py
  """
  if len(info.alm_eph_clone) == 0:
    return

  if int(device.StingerAPI) >= 16 and info.set_gps_year:
    # Use Trimcomm to install almanac
    filename = info.alm_eph_clone[0]
    filename = filename + '.pkl'
    if os.path.isfile(filename):
      loginfo.print_(f"using DCOL alm {device.ip} {filename}")
      rx.clearGNSSReset(device.ip,device.user,device.pw)
      loginfo.print_(f" wait 70s for clear+reset")
      time.sleep(70)
      # Using the DCOL interface requires the ref week be set correctly
      loginfo.print_(f" {device.ip} sending ref year {info.set_gps_year}")
      time.sleep(1)
      rx.sendDColRefWeek( device.ip, DCOL_port2, info.set_gps_year )
      with open(filename,'rb') as f_data:
        alm_data,wk_sec = pickle.load(f_data)
        time.sleep(1)
        rx.sendDColStartupData(device.ip,DCOL_port2,alm_data,time_wk_sec=wk_sec)
        loginfo.print_(f" {device.ip} sent startup data - wait for storage..")
        time.sleep(1)
        data = rx.getDColStartupData( device.ip, DCOL_port2 )
        loginfo.print_(f" {device.ip} gps={len(data['gps_alm'])}"
                       f" gln={len(data['gln_alm'])}"
                       f" gal={len(data['gal_alm'])}"
                       f" bds={len(data['bds_alm'])}"
                       )
        # Wait for files to get stored to BBFFS within 70s.
        time.sleep(70)
        count_almanacs( loginfo, [device], None )
        if len(data['gps_alm'])!=0 and len(data['gps_alm'])!=0:
          return
        
  # Fallback to using an almanac clone file
  almEph = f"{info.alm_eph_clone[0]}{device.StingerAPI}.xml"
  if not os.path.isfile(almEph):
    almEph = None

  if almEph is not None:
    loginfo.print_("using clone file alm "+almEph)
    rx.UploadClone(device.ip,device.user,device.pw,almEph)
    rx.InstallClone(device.ip,device.user,device.pw,almEph)
    return

  # If we get here, almanac upload failed...
  if len(cfg.email_recipients) > 0:
    sender = 'RFregression@fermion.eng.trimble.com'
    msg_text = f"No almanac for RX#{device.n} run {cfg.GlobalRunNum}"
    msg = email.mime.text.MIMEText(msg_text)
    msg['Subject'] = 'RF regression missing almanac'
    msg['From'] = sender
    msg['To'] = ', '.join(cfg.email_recipients)
    s = smtplib.SMTP( cfg.email_host )
    s.sendmail(sender, cfg.email_recipients, msg.as_string())
    s.quit()
  

def run_setup(loginfo, device, info, cfg, run_rtx=False):
  '''Set up a receiver before the RF playback starts.
  1. Delete T0x and almanac/eph data
  2. Configure receiver settings
  3. Install scenario almanac data
  4. Reboot the receiver
  '''

  def run_setup_for_GNSS_receiver(loginfo, device, info):
    # rx.sendDColAntennaOffCmd() is not available in
    # released firmware, so we can't rely on it.
    # The Spirent simulator should be connected and the RF signal
    # off, so we shouldn't get more GNSS data.

    loginfo.print_("run_setup "+device.ip)

    # Disable T0X logging
    rx.DisableDefaultLogging(device.ip,device.user,device.pw)
    time.sleep(5)

    # Some commands below only work in test mode
    rx.EnableTestMode(device.ip,device.user,device.pw)
    loginfo.print_("run_setup-delete files "+device.ip)
    # Delete the GNSS alm/eph etc
    rx.DeleteFile(device.ip,device.user,device.pw,'/ffs','950') # Titan INS cal
    rx.DeleteGNSSData(device.ip,device.user,device.pw)
    # Delete all the files in /Internal
    rx.getFileListAndDelete(device.ip,device.user,device.pw,'/Internal/','T04')
    # Install clone file - receiver settings only
    loginfo.print_("run_setup-install clone "+device.ip)
    rx.UploadClone(device.ip,device.user,device.pw,device.settingsClone, n_retries=5)
    rx.InstallClone(device.ip,device.user,device.pw,device.settingsClone,clear=True)

    # Delay seems to be needed for some devices, otherwise following commands can
    # be overwritten.  It'd be nice to fix the receiver.
    time.sleep(30)

    # Confirm elev_mask
    elev_mask_from_receiver = get_elev_mask_from_receiver(device)
    elev_mask_from_xml = get_elev_mask_from_xml(device)
    raw = ET.parse(device.settingsClone).getroot()
    if elev_mask_from_xml == None:
      loginfo.print_(device.ip,": Elev_mask not defined in clone file", device.settingsClone,'SKIP')
      raise RuntimeError("run_setup-install Elev_mask not defined in clone file.")
    else:
      if elev_mask_from_xml != elev_mask_from_receiver:
        loginfo.print_("run_setup-install failed.","GetElev(",device.ip,") vs. elev_mask from clone file(",device.settingsClone,") =",elev_mask_from_receiver,"vs.",elev_mask_from_xml)
        loginfo.print_("run_setup-reboot "+device.ip)
        rx.SoftReset(device.ip,device.user,device.pw)
        time.sleep(70)
        raise RuntimeError("run_setup-install Elev_mask inconsistent.")
      else:
        loginfo.print_("run_setup-install Elev_mask check pass", device.ip)

    # set dynamicModel later due to IMU will overwrite it
    # Change dynamic model with web GUI
    #rx.ChangeDynamicModel(device.ip,device.user,device.pw,info.dynamicModel)

    # EnableIpFilter is buggy with older firmware and causes units to drop off net
    #rx.EnableIpFilter( device.ip, device.user, device.pw, 31, this_server_IP )

    # Find alm/eph file with correct API
    loginfo.print_("run_setup-install alm/eph "+device.ip)
    setup_install_alm(loginfo, device, info, cfg)

    if device.do_RTK and info.antenna_ID is not None:
      loginfo.print_("run_setup-antenna ID '%s' %s"%(info.antenna_ID,device.ip))
      rx.sendAntennaType( device.ip, DCOL_port, info.antenna_ID )

    ##################################################
    # Reboot the receiver
    ##################################################
    loginfo.print_("run_setup-reboot "+device.ip)
    rx.SoftReset(device.ip,device.user,device.pw)


  def run_setup_for_Ag_receiver(args):
    loginfo, device, cfg, run_rtx = args[0]
    loginfo.print_(fn()+"#",device.n,"run_setup "+device.ip+", using TSIP at "+device.ag.io_TSIP+", skip_setup = "+str( device.ag.skip_setup ))
    if device.ag.skip_setup == 'yes':
      loginfo.print_(fn()+"#",device.n,"Skip run_setup "+device.ip)
      return

    dt = 2
    ######################################
    # Reset: clean Battery-backed memory #
    # No need to teardown()              #
    ######################################
    loginfo.print_(fn()+"#",device.n,"Resetting ...")

    port = device.ag.io_TSIP
    dev = agrx.AgDevice(port)
    dev.open_connection()
    time.sleep(dt)

    try:
      rt = dev.reset()
      loginfo.print_(fn()+"#",device.n,"Reset() returns "+rt)
    except:
      # dev.close_connection()
      loginfo.print_(fn()+"#",device.n,"Lost connection after reset().")

    loginfo.print_(fn()+"#",device.n,"Sleep 30 sec after reset")
    time.sleep(30)

    try:
      loginfo.print_(fn()+"Try to close connection ...")
      dev.close_connection()
    except:
      loginfo.print_(fn()+"Close connection failed.")
      
    # remove previous .tsip .t0x file
    loginfo.print_(fn()+"#",device.n,"Remove previous .tsip .t0x file")
    if cfg.TNFS_server_root == None:
      loginfo.print_(fn()+"cfg.TNFS_server_root = None")
      raise RuntimeError("cfg.TNFS_server_root = None")
    TNFS_server_path = cfg.TNFS_server_root + "/gpsData/"
    prefix = "RX" + str(device.n).zfill(2)
    for fname in os.listdir(TNFS_server_path):
      if len(fname)>=4 and fname[:4] == prefix:
        loginfo.print_(fn()+"#",device.n,"Remove old log file", fname)
        os.remove(TNFS_server_path + fname)

    # Reconnect
    loginfo.print_(fn()+"#",device.n,"Connecting ... ")
    port = device.ag.io_TSIP
    dev = agrx.AgDevice(port)
    dev.open_connection()
    time.sleep(dt)

    # Check logging
    loginfo.print_(fn()+"#",device.n,"Check logging:", dev.get_logging())
    time.sleep(dt)

    # Set Port Protocol
    if device.ag.io_NMEA_ZDA_output == device.ag.io_RTK_input:
      loginfo.print_(fn()+"#",device.n,"Setting port ...")
      setting = "port,2,RTCM_Input_Protocol,NMEA_Output_Protocol"
      loginfo.print_(fn()+"#",device.n,"Setting port protocol: "+setting)
      _, port_n, input, output = setting.split(',')
      dev.set_port(int(port_n), input, output)
      time.sleep(dt)
    else:
      loginfo.print_(fn()+"#",device.n,"Setting port ...")
      setting = "port,1,No_Input_Protocol,NMEA_Output_Protocol"
      loginfo.print_(fn()+"#",device.n,"Setting port protocol: "+setting)
      _, port_n, input, output = setting.split(',')
      dev.set_port(int(port_n), input, output)
      time.sleep(dt)

      setting = "port,2,RTCM_Input_Protocol,No_Output_Protocol"
      if device.do_RTK and run_rtx:
        setting = "port,2,AutomaticRTK_Input_Protocol_-_RTCM/CMR(+)/CMRx/CMRxe,No_Output_Protocol"
        datum_id = 248
        loginfo.print_(fn()+"#",device.n,"Setting datum id for centerpoint RTX: "+str(datum_id))
        dev.set_centerpoint_rtx_output_settings(datum_id)

      loginfo.print_(fn()+"#",device.n,"Setting port protocol: "+setting)
      _, port_n, input, output = setting.split(',')
      dev.set_port(int(port_n), input, output)
      time.sleep(dt)

    # Config DGPS using RTK
    if device.do_RTK:
      if run_rtx:
        loginfo.print_(fn()+"#",device.n,"Call set_DGPS_source_control() to enable RTX")
        rt = dev.set_DGPS_source_control('RTX')
      else:
        loginfo.print_(fn()+"#",device.n,"Call set_DGPS_source_control() to enable RTK")
        rt = dev.set_DGPS_source_control('RTK')
      if rt != None:
        loginfo.print_(fn()+"#",device.n,"set_DGPS_source_control() pass")
      else:
        loginfo.print_(fn()+"#",device.n,"set_DGPS_source_control() fail")
        raise RuntimeError("run_setup_for_Ag_receiver fail.")
      time.sleep(dt)

    # Config GPS Pos Rate 10Hz
    loginfo.print_(fn()+"#",device.n,"Call set_primary_receiver_configuration() to set Pos Rate = 10Hz")
    rt = dev.set_primary_receiver_configuration()
    if rt != None:
      loginfo.print_(fn()+"#",device.n,"set_primary_receiver_configuration() pass")
    else:
      loginfo.print_(fn()+"#",device.n,"set_primary_receiver_configuration() fail")
      raise RuntimeError("run_setup_for_Ag_receiver fail.")
    time.sleep(dt)

    # Enable logging + set logging filename
    prefix_4char = "RX" + str(device.n).zfill(2)
    rt = dev.set_logging(True, prefix_4char)
    loginfo.print_(fn()+"#",device.n,"set_logging(",prefix_4char,") returns", rt)
    time.sleep(dt)

    # Set TNFS server IP
    if cfg.TNFS_server_ip_address == None:
      raise RuntimeError("cfg.TNFS_server_ip_address = None")
    rt = dev.set_tnfs(cfg.TNFS_server_ip_address)
    loginfo.print_(fn()+"#",device.n,"set_tnfs returns", rt)
    time.sleep(dt)

    # Print final setting
    loginfo.print_(fn()+"#",device.n,"All setting is done:")
    # arr = dev.get_all_port_protocol()
    # time.sleep(dt)
    # for tmp in arr:
    #   tmp = '\n'.join(tmp.split(';'))
    #   loginfo.print_(fn()+device.ip+": "+str(tmp))

    # Final check
    # check RTK
    if device.do_RTK:
      rt = dev.get_DGPS_source_control()
      arr = rt.split(',')
      dgps_src_mode = arr[0].strip()
      if dgps_src_mode == "DGPSSrcMode-14" and run_rtx:
        loginfo.print_(fn()+"#",device.n,"Check DGPS config using RTX pass.")
      elif dgps_src_mode == "DGPSSrcMode-6" and not run_rtx:
        loginfo.print_(fn()+"#",device.n,"Check DGPS config using RTK pass.")
      else:
        loginfo.print_(fn()+"#",device.n,"Check DGPS config using RTK/RTX fail. dgps_src_mode =",dgps_src_mode)
        raise RuntimeError(fn()+"#",device.n,"Check DGPS config using RTK/RTX fail.")
      time.sleep(dt)

    # check output frequency
    rt = dev.get_primary_receiver_configuration()
    arr = rt.split(',')
    if arr[-1].strip() == 'PosFixRate-2':
      loginfo.print_(fn()+"#",device.n,"Check GPS config Pos Rate pass.")
    else:
      loginfo.print_(fn()+"#",device.n,"Check GPS config Pos Rate fail.")
      raise RuntimeError(fn()+"#",device.n,"Check GPS config Pos Rate fail.")
    time.sleep(dt)

    # check logging
    loginfo.print_(fn()+"#",device.n,"Check logging:", dev.get_logging())
    time.sleep(dt)

    # close connection
    loginfo.print_(fn()+"#",device.n,"Close connection...")
    dev.close_connection()
    time.sleep(dt)

    # No need to Reboot

  if device.ag != None:
    if verbose: loginfo.print_(fn()+"Device #", device.n, "is Ag receiver")
    # wrap_call_agrxtool(run_setup_for_Ag_receiver, loginfo, device, cfg, run_rtx, 180,15) # try every 15 secs for 3 mins
    wrap_use_agrxtool(run_setup_for_Ag_receiver, (loginfo, device, cfg, run_rtx), loginfo=loginfo, device=device, try_time=180, dt=15)
  else:
    if verbose: loginfo.print_(fn()+"Device #", device.n ,"is GNSS receiver")
    run_setup_for_GNSS_receiver(loginfo, device, info)

def disable_all_IP_filters():
  """After a test is done, make sure receivers are visible to everyone.
  During a regression test, the receivers will block packets from all IP
  addresses but this server.
  """
  try:
    cfg = get_config_xml(None,True)
    for dev in cfg.devices:
      rx.DisableIpFilter( dev.ip, dev.user, dev.pw )
  except:
    print("Couldn't disable IP filtering on all receivers...")
    pass

def check_runq(loginfo, proc_q, cmd, log=None, exit_on_err=True, run_in_bg=False):
  """Inputs: loginfo = ST.SpirentTools.LogInfo = where to log diagnostics
             proc_q = Queue() - list of background processes started
             cmd = shell command to run
             log = if True, output to Builds/log.txt
             exit_on_err = if True, stop on error.
             run_in_bg = if True, don't block
  Run the shell command 'cmd'.  Put processes in 'proc_q' in case the
  user sends a 'Kill' signal.
  """
  loginfo.print_("Running shell cmd: %s" % cmd)
  if log is not None:
    fout = open('Builds/log.txt', 'w')
    p = subprocess.Popen(['bash','-c',cmd],stdout=fout,stderr=fout)
  else:
    p = subprocess.Popen(['bash','-c',cmd])
  proc_q.put(p.pid)
  if run_in_bg:
    ret = 0
  else:
    ret = p.wait()
  if log is not None:
    fout.close()
  if ret != 0 and exit_on_err:
    raise RuntimeError('Error in cmd',cmd)


def get_receiver_info():
  """Provide dictionary with info on all regression system receivers.  This
  is basically config.xml info.  Here's the data format:
   'devs' : [{ 'ip':'10.1.2.3', 'long_desc':'Some desc..'}, ...]
  """
  cfg = get_config_xml(None,True)
  status = get_regression_status()

  d= {'devs':[]}
  for n,dev in enumerate(cfg.devices):
    info = {'ip':dev.ip, 'long_desc':dev.long_desc,'n':dev.n, 'ag':dev.ag}
    d['devs'].append(info)
  return d


def get_ag_receiver_info(ip='10.1.149.183'):
  ''' Return Ag receiver info using TSIP as dictory
      eg. {'product_name':'NAV 900', 'product_fw_version':'12.0.20.0', ...}
  '''
  cfg = get_config_xml(None,True)
  d = {}

  for n,dev in enumerate(cfg.devices):
    if dev.ip == ip:
      if dev.ag == None:
        print("No <ag> tag defined")
        return None
      else:
        if 'AGRXTools' not in sys.modules:
          return{'AGRXTool import':'failed'}

        def get_ag_info_all(args):
          dev = args[0]
          try:
            port = dev.ag.io_TSIP
            dev = agrx.AgDevice(port)
            dev.open_connection()
            d['product_name'] = dev.get_product_name()
            d['product_fw_version'] = dev.get_product_fw_version()
            d['product_sn'] = dev.get_sn()
            d['fw_date'] = dev.get_fw_date()
            d['ip_address'] = dev.get_ip_address()
            d['tnfs'] = dev.get_tnfs()
            d['logging'] = dev.get_logging()
            d['port_protocol'] = dev.get_all_port_protocol()
            d['DGPS_source'] = dev.get_DGPS_source_control()
            dev.e35cmd_e55rsp()
            d['gps_position_info'] = dev.get_gps_position_info()
            dev.e35cmd_e55rsp(0x1, 0x1, 0x10, 0x2)
            d['gps_dgps_corr_info'] = dev.get_dgps_corr_info()
            d['io_option_flag_request'] = dev.e35req_e55rsp() + " vs.(0x1, 0x1, 0x10, 0x2)"
            dev.close_connection()
            return d
          except:
            d['tsip connection'] = 'failed'
            return d

        try:
          rt = wrap_use_agrxtool(get_ag_info_all, dev, loginfo=None, device=dev, try_time=1, dt=1)
          return rt
        except Exception as e:
          d['acquire lock'] = 'failed'
          d['error msg'] = str(e)
          return d
  return d

def stop_ag_receiver_log(ip = '0.0.0.0'):
  ''' Stop Ag receiver logging
      Return eg. receiver log settings eg. eg. {"logging":"logging status:0, filenameLen:0, full path of the log file: ", "tnfs":"..."}
  '''
  cfg = get_config_xml(None,True)
  d = {}
  for n,dev in enumerate(cfg.devices):
    if dev.ip == ip:
      if dev.ag == None:
        d['error msg'] = 'No <ag> tag defined'
        return d
      else:
        if 'AGRXTools' not in sys.modules:
          return{'AGRXTool import':'failed'}

        def stop_log(args):
          dev = args[0]
          d = {}
          try:
            port = dev.ag.io_TSIP
            dev = agrx.AgDevice(port)
            dev.open_connection()
            dev.set_logging(False)
            d['logging'] = dev.get_logging()
            d['tnfs'] = dev.set_tnfs('0.0.0.0')
            dev.close_connection()
            return d
          except:
            d['tsip connection'] = 'failed'
            return d

        try:
          rt = wrap_use_agrxtool(stop_log, dev, loginfo=None, device=dev, try_time=1, dt=1)
          return rt
        except Exception as e:
          d['acquire lock'] = 'failed'
          d['error msg'] = str(e)
          return d
  return d


# Using __slots__ lets pylint check for typos
class ScenarioInfo(object):
  '''All info from a single scenario XML file'''
  __slots__ = ('fileName',
               'play_secs',
               'scn_file',
               'alm_eph_clone',
               'RF_jam',
               'dynamicModel',
               'ionoMitigation',
               'set_gps_year',
               'RTK_corr_file',
               'RTX_corr_file',
               'antenna_ID',
               'IMU_corr_file',
               'HPC1_IMU_corr_file',
               'NoPI_base',
               'NoPI_ini',
               'file_list',
               'file_len_list' )
  def __init__(self):
    pass
  def __repr__(self):
    mystr = 'ScenarioInfo:\n  '
    mystr += '\n  '.join("%s: %s" % (item,getattr(self,item,None)) for item in self.__slots__)
    mystr += '\n'
    return mystr


def is_build_needed(loginfo):
  '''Input: loginfo - ST.SpirentTools.loginfo
  Returns: True/False - is a firmware build needed?
           fw_time - the start/check time of the firmware
  '''
  fw_time = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
  try:
    install_age_secs = time.time() - os.path.getmtime('Builds/install.txt')
    if install_age_secs < 15*60:
      loginfo.print_("Skipping builds because they are recent")
      return False, fw_time
  except:
    # Builds/install.txt probably doesn't exist, so continue on
    pass
  return True, fw_time


def parse_XML_scenarios(st, cfg, xml_scenarios, skipped_devices_list=[], n_loops=1, do_subset=False, start_time=-1, stop_time=-1):
  '''Inputs: st = ST.SpirentTools instance
             cfg = ConfigXml() = config.xml info
             xml_scenarios = None for everything, or list of XML scenarios
             skipped_devices_list = list of integer numbers defined by <n>?</n> in config.xml file
  Returns: (list of ScenarioInfo(), total build + playback seconds)
  Returns info on all scenarios or a specified list of scenarios.
  For non-regression tests, any XML scenario is valid.
  For manual regression tests, only XML scenarios without 'skip_regression' are valid.
  For full regression tests, check 'regression_runs' to see if it is time to run a scenario.
  '''
  sample_file_list = []
  sample_file_total_secs = 0.0
  full_regression_run = False
  if xml_scenarios is None:
    full_regression_run = True
    xml_scenarios = [os.path.basename(p) for p in glob.glob(cfg.SampleConfig+"/*.xml")]

  if verbose: st.loginfo.print_(fn()+"skipped_devices_list=",skipped_devices_list)
  # Estimate about 5 minutes per build (if enabled)
  if cfg.allow_builds and not cfg.disable_regression:
    do_build, _ = is_build_needed(st.loginfo)
    if do_build:
      sample_file_total_secs += (len(cfg.devices) - len(skipped_devices_list)) * 60. * 5

  # Note: set-up takes 6 mins each time
  sample_file_total_secs += 60.*6*n_loops

  for fileName in xml_scenarios:
    curr_copy_len = 0
    curr_info = ScenarioInfo()
    tree = ET.ElementTree(file="%s" % (cfg.SampleConfig + '/' + fileName))
    if not cfg.disable_regression:
      if tree.find('skip_regression') is not None:
        # It's impossible to analyze this data because we don't have truth
        continue

      if full_regression_run:
        # Full regression runs follow a schedule.
        regression_runs = tree.find('regression_runs').text
        gps_week_num = int((datetime.now() - datetime(1980,1,6)).days/7)
        if regression_runs == 'all':
          pass
        elif regression_runs == 'none':
          continue
        else:
          # Handle formats like "week % 2 == 0"
          week = gps_week_num
          if eval(regression_runs):
            pass
          else:
            continue

    # Get the files we need to upload from the XML
    scn_file = None
    alm_eph_clone = []
    for elem in tree.iterfind('almEphClone'):
      alm_eph_clone.append(elem.text)
    play_secs = 0.
    if do_subset:
      play_secs += float(stop_time-start_time)
    else:
      for elem in tree.iterfind('gss6450_playback/hours'):
        play_secs += float(elem.text)*60*60
    curr_info.play_secs = play_secs
    sample_file_total_secs += play_secs * n_loops

    # Note: there's some overhead for every file playback, so add some
    # (arbitrary) time here
    if cfg.disable_regression:
      sample_file_total_secs += 60. * 2
    else:
      sample_file_total_secs += 60. * len(cfg.devices)

    file_len_list = []
    for elem in tree.iterfind('gss6450_playback/file_len'):
      file_len_list.append( int(elem.text) )

    file_list = []
    for elem in tree.iterfind('gss6450_playback/file'):
      file_list.append( elem.text )
      if elem.text.endswith(".scn") and scn_file is None:
        scn_file = elem.text
      # Approx file copy time:
      if os.path.isfile(elem.text):
        curr_copy_len += os.stat(elem.text).st_size
    if scn_file == None:
      raise RuntimeError("No Spirent .scn file found in {}".format(fileName))
    curr_info.fileName = fileName
    curr_info.scn_file = scn_file
    curr_info.alm_eph_clone = alm_eph_clone
    test_type = tree.find('type').text
    if test_type == "Mobile":
      curr_info.dynamicModel = "OffRoadVehicleMovingStart"
    elif test_type == "Static":
      curr_info.dynamicModel = "Static"
    else:
      raise RuntimeError("Unknown test type {}".format(test_type))
    # <dynamicModel> tag if exists will overwrite <type> tag on info.dynamicModel
    if tree.find('dynamicModel') is not None:
      dynModNum = int(tree.find('dynamicModel').text)
      DynamicModelKeyValue = rx.GetDynamicModelDict()
      st.loginfo.print_(fn()+"info.dynamicModel is overwrittern by tag <dynamicModel>"+str(dynModNum)+"</dynamicModel> from "+str(fileName))
      if dynModNum in DynamicModelKeyValue.keys():
        curr_info.dynamicModel = DynamicModelKeyValue[dynModNum]
      else:
        st.loginfo.print_(fn()+"Unknown <dynamicModel>"+str(dynModNum)+"</dynamicModel> from "+str(fileName)+". Exit.")
        raise RuntimeError("Unknown <dynamicModel>{}</dynamicModel>".format(dynModNum))
    if tree.find('RF_jam') is not None:
      curr_info.RF_jam = tree.find('RF_jam').text
    elif tree.find('skip_regression') is not None:
      curr_info.RF_jam = 'no'
    else:
      raise RuntimeError("No RF_jam value found in {}".format(fileName))
    curr_info.ionoMitigation = 0 # default to off unless specified
    for elem in tree.iterfind('ionoMitigation'):
      curr_info.ionoMitigation = int(elem.text)
    curr_info.set_gps_year = None
    for elem in tree.iterfind('set_gps_year'):
      curr_info.set_gps_year = int(elem.text)
    curr_info.RTK_corr_file = None
    for elem in tree.iterfind('RTK_corr_file'):
      curr_info.RTK_corr_file = elem.text
    curr_info.RTX_corr_file = None
    for elem in tree.iterfind('RTX_corr_file'):
      curr_info.RTX_corr_file = elem.text
    curr_info.antenna_ID = None
    for elem in tree.iterfind('antenna_ID'):
      curr_info.antenna_ID = elem.text
    curr_info.IMU_corr_file = None
    for elem in tree.iterfind('IMU_corr_file'):
      curr_info.IMU_corr_file = elem.text
    curr_info.HPC1_IMU_corr_file = None
    for elem in tree.iterfind('HPC1_IMU_corr_file'):
      curr_info.HPC1_IMU_corr_file = elem.text
    curr_info.NoPI_base = None
    for elem in tree.iterfind('NoPI/base'):
      curr_info.NoPI_base = elem.text
    curr_info.NoPI_ini = None
    for elem in tree.iterfind('NoPI/ini'):
      curr_info.NoPI_ini = elem.text

    curr_info.file_list = file_list
    curr_info.file_len_list = file_len_list
    sample_file_list.append(curr_info)

  return sample_file_list, sample_file_total_secs

def prepare_XML_scenarios( st, sample_file_list, sample_file_total_secs ):
  # Compute fastest run order and approx copy time
  copy_secs = st.prepare_runs( sample_file_list )
  sample_file_total_secs += copy_secs

  end_time_str = \
    time.strftime("%a, %d %b %Y %H:%M:%S",
                  time.localtime(time.time()+sample_file_total_secs))

  return end_time_str

def build_cvs_firmware(device,loginfo,cfg,proc_q,build_dir_name):
  '''Checkout code from CVS and build
      device = Device() to remove
      loginfo = LogInfo() for diagnostics
      cfg = ConfigXml() overall config
      proc_q = Queue() - list of background processes started
      build_dir_name = string path to coreBuild/
  '''
  tag_cmd = ""
  if device.tag:
    if device.tag.startswith("-"):
      tag_cmd = " " + device.tag
    else:
      tag_cmd = " -r" + device.tag
  loginfo.print_("CVS checkout Build #%d" % device.n)
  if cfg.clean_builds or not os.path.isdir("Builds/"+build_dir_name):
    check_runq(loginfo,proc_q, 'rm -rf Builds/' + build_dir_name)
    check_runq(loginfo,proc_q, 'cd Builds ; cvs -q co -d ' + build_dir_name + tag_cmd + ' coreBuild',log=True)
  else:
    check_runq(loginfo,proc_q, "cd Builds/%s; cvs -qn up | egrep '^[AMC] ' | cut -d' ' -f2 | xargs -t -i mv {} {}.orig" % build_dir_name)
    check_runq(loginfo,proc_q, 'cd Builds/%s; cvs -q up' % build_dir_name)

        
  if device.patchFile:
    check_runq(loginfo,proc_q, 'export BASE_DIR=$PWD; cd Builds/%s; patch -p0 < $BASE_DIR/%s' % (build_dir_name,device.patchFile))

  frozen = ""
  if device.frozen is not None:
    frozen = "DATEFROZEN=1"
  check_runq(loginfo,proc_q, 'cd Builds/' + build_dir_name + ' ; ./config -f ' + device.build + ' ; ' + frozen + ' make -j8',log=True)

  firmware_name = glob.glob('Builds/' + build_dir_name + '/' + device.build + '_output/' + device.build + '.timg*')
  return firmware_name


def reboot_linux_receiver_via_ssh(device,loginfo):
  # SSH here and reboot if possible
  try:
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh.connect(device.ip,
                username='root',
                password='root',
                timeout=60)

    transport = ssh.get_transport()
    chan = ssh.get_transport().open_session(timeout=0.5)
    chan.settimeout(0.5)
    loginfo.print_(f"Rebooting device with address {device.ip} over SSH")
    chan.exec_command('reboot')
  except:
    pass
    # SSH should not get a response, because the receiver is rebooting.

  
def build_git_firmware(device,loginfo,cfg,proc_q,build_dir_name,build_branch):
  '''Checkout code from git 'colossal' and build
      device = Device() to remove
      loginfo = LogInfo() for diagnostics
      cfg = ConfigXml() overall config
      proc_q = Queue() - list of background processes started
      build_dir_name = string path to coreBuild/
  '''
  num_pull_attempts = 2
  loginfo.print_("Git checkout Build #%d" % device.n)
  tag_cmd = f"git checkout {build_branch}"
  if device.tag:
    tag_cmd = f"git checkout {device.tag}"

  # Set up the build directory
  if not os.path.isdir("Builds/"+build_dir_name):
    check_runq(loginfo,proc_q, f'cd Builds/ ; git clone --filter=blob:none --sparse -c http.extraHeader="Authorization: Bearer $COLOSSAL_RD" https://bitbucket.trimble.tools/scm/sgnss/colossal.git {build_dir_name}')
    check_runq(loginfo,proc_q, f'cd Builds/{build_dir_name} && {tag_cmd} && ./set_project.sh coreBuild')
  else:
    check_runq(loginfo,proc_q, f'cd Builds/{build_dir_name} && git reset --hard HEAD && git clean -fd && {tag_cmd} && ./set_project.sh coreBuild')

  successful_pull = False
  for i in range(num_pull_attempts):
    try:
      check_runq(loginfo,proc_q, f'cd Builds/{build_dir_name} && git pull')
      successful_pull = True
      break
    except:
      loginfo.print_(f"Git pull attempt {i+1} failed for device #{device.n}")

  if not successful_pull:
    raise RuntimeError(f"Git pull failed after {num_pull_attempts} attempts for device #{device.n}")

  if device.patchFile:
    check_runq(loginfo,proc_q, f'export BASE_DIR=$PWD; cd Builds/{build_dir_name}/coreBuild; patch -p0 < $BASE_DIR/{device.patchFile}' )
  frozen = ""
  if device.frozen is not None:
    frozen = "DATEFROZEN=1"
  cb_dir = f'Builds/{build_dir_name}/coreBuild'
  check_runq(loginfo,proc_q, f'cd {cb_dir} && ./config -f {device.build}',log=True)

  # We originally checked the target.mk file to see if it was a Linux build, but
  # now we have a linux tag in the config.xml file. It should be more reliable 
  # outside of build_git_firmware because it does not depend on the build
  # directory being initialized.
  if device.linux:
    is_new_start_container = False
    try:
      # newer start_container.sh requires "-e" before commands
      check_runq(loginfo,proc_q, f'cd {cb_dir} && ./start_container.sh exit')
    except RuntimeError:
      is_new_start_container = True
    if is_new_start_container:
      check_runq(loginfo,proc_q, f'cd {cb_dir} && ./start_container.sh -m -e "make -j8"')
    else:
      check_runq(loginfo,proc_q, f'cd {cb_dir} && ./start_container.sh "make -j8 && make bundle"')
    firmware_name = glob.glob(f'{cb_dir}/{device.build}_output/{device.build}.img*')
  else:
    check_runq(loginfo,proc_q, f'cd {cb_dir} && {frozen} make -j8')
    firmware_name = glob.glob(f'{cb_dir}/{device.build}_output/{device.build}.timg*')
  return firmware_name

def ag_fw_install(args):
  '''Download and install Ag firmware'''
  loginfo, device, cfg, proc_q = args[0]
  port = device.ag.io_TSIP
  dev = agrx.AgDevice(port)
  dev.open_connection()

  loginfo.print_("#", device.n, ": device TSIP serial port = " + str(device.ag.io_TSIP) )
  loginfo.print_("#", device.n, ": device product name = " + dev.get_product_name() )
  loginfo.print_("#", device.n, ": device product fw version = " + dev.get_product_fw_version() )
  loginfo.print_("#", device.n, ": device sn = " + dev.get_sn() )
  loginfo.print_("#", device.n, ": device fw date = " + dev.get_fw_date() )

  # Download firmware
  loginfo.print_("#", device.n, ": Downloading build ...")
  try:
    build_names = dev.download(device.ag.job_key)
    loginfo.print_("#", device.n, ": Succeeded in downloading builds for device")
  except Exception as e:
    loginfo.print_("#", device.n, ": Failed to download builds.",str(e))
    raise RuntimeError(e)
  build_names = dev.download(device.ag.job_key)
  build_dir_name = "RX" + str(device.n) + "-coreBuild"
  loginfo.print_("#", device.n, ": Done. build_names = ",build_names)
  check_runq(loginfo,proc_q, 'rm -rf Builds/' + build_dir_name)
  check_runq(loginfo,proc_q, 'mkdir Builds/' + build_dir_name)
  check_runq(loginfo,proc_q, 'mv shifter_*.img Builds/' + build_dir_name)

  # Upgrade the firmware
  if cfg.allow_firmware_install:
    loginfo.print_("#", device.n, ": Installing Build", 'Builds/' + build_dir_name + '/' + build_names[0])
    if device.ag.skip_install == 'yes':
      loginfo.print_("#", device.n, ": Skip install.")
    else:
      loginfo.print_("#", device.n, ": Installing ...... (It takes ~30 mins. Be cautious to kill. This may brick Ag device.)")
      try:
        dev.install('Builds/' + build_dir_name + '/' + build_names[0])
        loginfo.print_("#", device.n, ": Succeeded in installing builds for device")
      except Exception as e:
        loginfo.print_("#", device.n, ": Failed to install builds.")
        raise RuntimeError(e)
  dev.close_connection()
  loginfo.print_("Disconncet Ag device #%d" % device.n)


def build_install_firmware( st, proc_q, cfg, build_branch, skipped_devices=[]):
  '''Inputs: st = ST.SpirentTools instance
             proc_q = Queue() - list of background processes started
             cfg = ConfigXML = info from config.xml
  Builds and installs firmware on all units.
  '''
  loginfo = st.loginfo
  if(os.path.isdir("Builds") == False):
    os.mkdir("Builds") # Make sure the directory exists

  do_build, fw_time = is_build_needed(loginfo)
  if not do_build:
    loginfo.print_("Skipping builds because they are recent")
    return fw_time

  if verbose: loginfo.print_(fn(),"skipped devices=",skipped_devices)
  proc_list = []
  # sort devices and put GNSS device first to run build_install in backgound
  # Ag devices need to be build and install in forground one by one due to Class Tsip() is singleton
  devices = cfg.devices
  devices.sort(key=lambda x: 0 if x.ag==None else 1, reverse=False)
  for device in devices:
    if device.n in skipped_devices:
      continue
    if device.ag == None:
      build_dir_name = "RX" + str(device.n) + "-coreBuild"

      try:
        if device.useTerrasatBuild:
          pass # nothing to build
        elif device.colossal_build:
          firmware_name = build_git_firmware(device,loginfo,cfg,proc_q,"git-coreBuild",build_branch)
        else:
          firmware_name = build_cvs_firmware(device,loginfo,cfg,proc_q,build_dir_name)
      except:
        loginfo.print_("Bad repository for {}".format(device.ip))
        remove_one_bad_device( st, cfg, device, "CVS or build problem" )
        continue

      # Upgrade the firmware
      if cfg.allow_firmware_install:
        loginfo.print_("install Build #%d" % device.n)
        if device.useTerrasatBuild:
          loginfo.print_("Get prebuilt Terrasat Build #%d" % device.n)
          check_runq(loginfo,proc_q, 'rm -rf Builds/' + build_dir_name)
          check_runq(loginfo,proc_q, "mkdir Builds/" + build_dir_name)
          try:
            firmware_name = ['Builds/' + build_dir_name + '/' + device.build + '_tmp.timg']
            urllib.request.urlretrieve(device.useTerrasatBuild, firmware_name[0])
            loginfo.print_("saving Terrasat fw img succeeded!")
          except:
            firmware_name = ""
            loginfo.print_(f"failed to get Terrasat build from {device.useTerrasatBuild}")

        if len(firmware_name) > 0:
          firmware_name = firmware_name[0]
        args = (device.ip,
                device.user,
                device.pw,
                firmware_name,
                True) # use the failsafe install
        kwargs = {'wait':True}
        proc = multiprocessing.Process( target=rx.upgradeFW, name=device, args=args, kwargs=kwargs )
        proc.start()
        proc_list.append( proc )
    else:
      # for ag receivers, install() using tsip() can only connect to receivers one at a time. No need to add the proc to proc_list.
      st.loginfo.print_("This is Ag receicer.","#",device.n)

      wrap_use_agrxtool(ag_fw_install, (loginfo, device, cfg, proc_q), loginfo=loginfo, device=device, try_time=10, dt=2)

  for proc in proc_list:
    proc.join()

    if proc.exitcode != 0:
      loginfo.print_("Bad install for {}".format(proc.name.ip))
      remove_one_bad_device( st, cfg, proc.name, "CVS or build problem" )

  if len(cfg.devices) == 0:
    raise RuntimeError("build_install_firmware() removed everything!")

  check_runq(loginfo,proc_q, "touch Builds/install.txt")
  check_runq(loginfo,proc_q, "chmod a+w Builds/install.txt")
  time.sleep(120) # allow time for reboot to fully finish and mount filesystem
  return fw_time

def wrap_download( device, cfg_GlobalRunNum ):
  if device.ag == None:
    try:
      rx.GetLoggedFilesViaHttpNoDisable( device.ip,
                                        device.user,
                                        device.pw,
                                        "DataDir/RX%d-%d"% (device.n,cfg_GlobalRunNum),
                                        False, # don't clear existing data
                                        checkLocal=True )
      return True
    except:
      return False
  else:
    # Ag receviers write data directly to server using TNFS. No need to download.
    return True

def check_for_t04_issue( st, cfg, selected_devices ):
  """Sometimes we see a DataDir/RX?-????/ with no T04 files. Try to detect
  this and log diagnostics.
  """
  bad_units = []
  for device in selected_devices:
    if device.ag is not None: # only check GNSS receivers
      continue
    curr_datadir = "DataDir/RX%d-%d"%(device.n,cfg.GlobalRunNum)
    diag_filename = "%s/logging_issue.txt"%curr_datadir
    if os.path.isfile(diag_filename):
      continue
    n_t04 = len(glob.glob("%s/*.T04"%curr_datadir))
    if n_t04 != 0:
      continue
    st.loginfo.print_("Warning - no T04s for: %s. See %s"%(device.ip,diag_filename))
    bad_units.append( device.n )
    try:
      sv_txt = rx.SendHttpGet( device.ip, '/xml/dynamic/svData.xml', device.user, device.pw )
      dl_txt = rx.SendHttpGet( device.ip, '/xml/dynamic/dataLogger.xml', device.user, device.pw )
      ps_txt = rx.SendHttpGet( device.ip, '/xml/dynamic/posData.xml', device.user, device.pw )
      cf_txt = rx.SendHttpGet( device.ip, '/xml/dynamic/configData.xml', device.user, device.pw )
    except:
      sv_txt = 'Error: not found'
      dl_txt = 'Error: not found'
      ps_txt = 'Error: not found'
      cf_txt = 'Error: not found'
    with open(diag_filename, "w" ) as f_out:
      f_out.write("----------- svData.xml output:\n")
      f_out.write(sv_txt)
      f_out.write("\n----------- dataLogger.xml output:\n")
      f_out.write(dl_txt)
      f_out.write("\n----------- posData.xml output:\n")
      f_out.write(ps_txt)
      f_out.write("\n----------- configData.xml output:\n")
      f_out.write(cf_txt)
  if len(cfg.email_recipients) > 0 and len(bad_units) > 0:
    sender = 'RFregression@fermion.eng.trimble.com'
    msg_text = "No T04s for units {} run {}".format(bad_units,cfg.GlobalRunNum)
    msg = email.mime.text.MIMEText(msg_text)
    msg['Subject'] = 'RF regression device problem'
    msg['From'] = sender
    msg['To'] = ', '.join(cfg.email_recipients)
    s = smtplib.SMTP( cfg.email_host )
    s.sendmail(sender, cfg.email_recipients, msg.as_string())
    s.quit()

def count_almanacs( loginfo, selected_devices, last_lens ):
  """loginfo = logging function
     selected_devices = list of devices to check
     last_lens = previous count of almanacs (gps,gln,gal,bds,qzs) or None
  Count the # of healthy SVs in almanac and return a dict:
    sat_lens[device #] = [list of healthy counts with same length as sat_types]
  """
  sat_types = ('gps','glonass','galileo','beidou','qzss')
  sat_lens = {}
  try:
    for device in selected_devices:
      if device.ag is not None:
        continue
      rsp = rx.SendHttpGet( device.ip,
                            '/xml/dynamic/merge.xml?gps=&glonass=&galileo=&qzss=&beidou=&irnss=',
                            device.user, device.pw )
      raw = ET.fromstring(rsp)
      all_lens = []
      dbg_str = f" alm RX #{device.n}"
      for pos,sat in enumerate(sat_types):
        curr_len = len(raw.findall(f'.//{sat}/sv[Health="TRUE"]'))
        all_lens.append(curr_len)
        dbg_str += f" {sat}={curr_len}"
        if last_lens is not None and device.n in last_lens:
          last_len = last_lens[device.n][pos]
          if last_len > curr_len:
            loginfo.print_(f" warning: alm RX #{device.n} {sat}={last_len}->{curr_len}")
      if last_lens is None:
        loginfo.print_(dbg_str)
      sat_lens[device.n] = all_lens
    return sat_lens
  except Exception as e:
    loginfo.print_("warning: Couldn't get alms",e)
  return None
    
def run_spirent_and_download_files( st, proc_q, cfg, info, skipped_devices_list=[], do_subset=False, start_time=-1, stop_time=-1 ):
  '''Inputs: st = ST.SpirentTools instance
             proc_q = Queue() - list of background processes started
             cfg = ConfigXML = info from config.xml
             info = ScenarioInfo = info from current scenario XML
             skipped_devices_list = list of integer numbers defined by <n>?</n> in config.xml file
     Returns: proc = multiprocessing.Process if started something, None otherwise
              try_again = True if RF replay stopped because of a drive glitch.  False normally
  Starts Spirent RF playback and waits until it is done.
  Also downloads T0x files once an hour for regression tests.
  '''
  proc = None
  try_again = False
  if os.path.isfile(jam_diag_filename):
    os.remove(jam_diag_filename)
  if cfg.jam_ref_rcvr is not None:
    if info.RF_jam == "grid":
      st.loginfo.print_("Kick off RF jammer script")
      proc = multiprocessing.Process(target=run_jammer, args=(cfg.jam_ref_rcvr,cfg.jam_RF_control,))
      proc.start()
      proc_q.put(proc.pid)
    else:
      st.loginfo.print_("Disable RF jammer")
      RF_IP = cfg.jam_RF_control.split(':')[0]
      RF_port = int(cfg.jam_RF_control.split(':')[1])
      ReplayJamControl.setupJammer( RF_IP, RF_port )

  st.loginfo.print_("Start Spirent playback for run {} scenario {}".format(cfg.GlobalRunNum,info.fileName))
  selected_devices =[x for x in cfg.devices if x.n not in skipped_devices_list]
  last_count = count_almanacs( st.loginfo, selected_devices, None )
  
  if not do_subset:
    # Full Run
    st.start_spirent_test(info.scn_file)
    st.loginfo.print_("Wait for Spirent playback to finish...")
  else:
    # Partial Run
    offset_sec_start = max(0, start_time)
    offset_sec_stop = stop_time
    dt_play = offset_sec_stop - offset_sec_start

    scn_files = []
    for f in info.file_list:
      if f.endswith(".scn"):
        scn_files.append(f)
    
    if len(scn_files) == 1:
      # Partial Run with single .scn file
      # No need to recalculate time offset
      st.start_spirent_test(info.scn_file, offset_sec_start)
      st.loginfo.print_("Start time offset[sec]:", offset_sec_start, "for file", info.scn_file)
      st.loginfo.print_("Wait for Spirent playback to finish(in",dt_play,"sec)...")
    else:
      # Partial Run with multiple .scn files
      # Recalculate time offset
      # Reselect .scn file
      scn_files.sort(key=lambda x: int(x.split('/')[-1].split('.')[0]), reverse=False)
      
      def toGPSTime(time_str):
        # Convert string to GPS time
        import leapseconds
        utc_date = datetime.strptime(time_str.strip(),'%H:%M:%S %d/%m/%y')
        gps_epoch = datetime(1980,1,6)
        gps_time = leapseconds.utc_to_gps(utc_date) - gps_epoch
        return gps_time.total_seconds()

      def get_start_time_tow(RF_filename):
        # Return start time in GPS sec from .scn file
        if not os.path.exists(RF_filename):
          msg = "scn file "+str(RF_filename)+" does not exsit."
          st.loginfo.print_(msg)
          raise RuntimeError(msg)

        f_scn = open(RF_filename, 'r')
        fs = f_scn.readlines()

        StartGPSTime = 604800
        for l in fs:
          if '<Start Time>' in l:
            tmp = l.split('<Start Time>')[1].replace('\n','').replace('GMT','')
            StartGPSTime = min(StartGPSTime, toGPSTime(tmp)%604800)
            break
        if StartGPSTime == 604800:
          msg = "No <Start Time> in scn file "+str(RF_filename)+". Cannot calcualte time offset."
          st.loginfo.print_(msg)
          raise RuntimeError(msg)
        
        return StartGPSTime

      new_scn_file = info.scn_file
      new_offset_sec_start = offset_sec_start
      t0_start_time = 0
      for i,f in zip(range(len(scn_files)), scn_files):
        RF_filename = f
        if i == 0:
          t0_start_time = get_start_time_tow(RF_filename)
          st.loginfo.print_("Start time[sec] from #1 .scn file:", t0_start_time)
        else:
          ti_start_time = get_start_time_tow(RF_filename)
          st.loginfo.print_("Start time[sec] from #"+str(i+1)+" .scn file:", ti_start_time)
          if ti_start_time - t0_start_time <= offset_sec_start:
            new_scn_file = RF_filename
            new_offset_sec_start = offset_sec_start - (ti_start_time - t0_start_time)
          else:
            break

      st.start_spirent_test(new_scn_file, new_offset_sec_start)
      st.loginfo.print_("Start time offset[sec]:", new_offset_sec_start, "for file", new_scn_file.split('/')[-1])
      st.loginfo.print_("Wait for Spirent playback to finish(in",dt_play,"sec)...")

  # Check when it stops
  last_download_time = time.time()
  start_time = time.time()

  while True:
    if do_subset and time.time() - start_time > dt_play:
      st.loginfo.print_("Stop Spirent playback")
      st.stop_any_spirent_test()

    status = st.spirent_test_status()
    if status == "PLAYING":
      pass
    elif status == "STOPPED":
      break
    elif status == "ERROR_RECOVERED":
      try_again = True
      break
    else:
      raise RuntimeError("Bad Spirent RF status")

    time.sleep(30)

    if cfg.disable_regression:
      continue

    # check for new files and download once every ~minute(+download time).
    if time.time() - last_download_time > 1*60:
      # Download in parallel so devices with limited memory don't
      # have to wait very long between polls.
      last_download_time = time.time()
      selected_devices =[x for x in cfg.devices if x.n not in skipped_devices_list]
      if verbose: st.loginfo.print_(fn(),"Runtime skipped_devices_list: %s"%str(skipped_devices_list))
      if verbose:
        # check elev mask and almanac
        for device in selected_devices:
          if device.ag == None:
            elev_mask_from_receiver = get_elev_mask_from_receiver(device)
            elev_mask_from_xml = get_elev_mask_from_xml(device)
            if elev_mask_from_receiver != elev_mask_from_xml:
              st.loginfo.print_("Warning! GetElev("+device.ip+") vs. clone file(",device.settingsClone,") =", elev_mask_from_receiver,"vs.", elev_mask_from_xml)
            else:
              st.loginfo.print_("PASS GetElev("+device.ip+") vs. clone file(",device.settingsClone,") =", elev_mask_from_receiver,"vs.", elev_mask_from_xml)
      # For the first few minutes, show almanac debug
      if time.time() - start_time < 5*60:
        last_count = count_almanacs( st.loginfo, selected_devices, last_count )
      with multiprocessing.Pool(len(selected_devices)) as p:
        results = p.map( partial(wrap_download, cfg_GlobalRunNum=cfg.GlobalRunNum),
                        selected_devices )
        for result,device in zip(results,selected_devices):
          if not result:
            st.loginfo.print_("Runtime download err: %s"%device.ip)
      if time.time() - start_time > 30*60:
        # check to make sure we have some T04 files within 30 minutes, otherwise log diagnostics
        check_for_t04_issue(st, cfg, selected_devices)
      

  return proc, try_again

def turn_ag_dev_logging_off(args):
  '''Inputs: loginfo = ST.SpirentTools.LogInfo = where to log diagnostics
             device = device to remove
     Turn off logging
  '''
  loginfo,device = args[0]
  dt = 0.2
  port = device.ag.io_TSIP
  dev = agrx.AgDevice(port)

  loginfo.print_("Device #",device.n,":",port,"open connection" )
  dev.open_connection()
  time.sleep(dt)

  # turn off log
  rt = dev.set_logging(False)
  loginfo.print_("Device #",device.n,":",port,"turn off logging returns",rt)
  time.sleep(dt)

  # turn off tnfs
  rt = dev.set_tnfs("0.0.0.0")
  loginfo.print_("Device #",device.n,":",port,"turn off tnfs",rt)
  time.sleep(dt)

  # check log
  loginfo.print_("Device #",device.n,":",port,"check", dev.get_logging())
  time.sleep(dt)

  # close connection
  rt = dev.close_connection()
  loginfo.print_("Device #",device.n,":",port,"close connection" )


def remove_one_bad_device( st, cfg, device, reason ):
  '''Inputs: st = ST.SpirentTools = control handle for Spirent commands
             cfg = ConfigXML = info from config.xml
             device = device to remove
             reason = text describing removal reason
     Remove a bad device from list to test
  '''
  st.loginfo.print_("Removing device {}".format(device.ip))
  cfg.devices.remove( device )
  if len(cfg.email_recipients) > 0:
    sender = 'RFregression@fermion.eng.trimble.com'
    msg_text = "{} - removing unit {}".format(reason,device.ip)
    msg = email.mime.text.MIMEText(msg_text)
    msg['Subject'] = 'RF regression device problem'
    msg['From'] = sender
    msg['To'] = ', '.join(cfg.email_recipients)
    s = smtplib.SMTP( cfg.email_host )
    s.sendmail(sender, cfg.email_recipients, msg.as_string())
    s.quit()
  # turn logging off if Ag receiver
  if device.ag is not None:
    st.loginfo.print_("Turn Ag device #"+str(device.n)+" logging off")
    try:
      # wrap_call_agrxtool(turn_ag_dev_logging_off, st.loginfo, device, None, None, 60,5)
      wrap_use_agrxtool(turn_ag_dev_logging_off, (st.loginfo, device), loginfo=st.loginfo, device=device, try_time=180, dt=15)
      st.loginfo.print_("Device # "+str(device.n)+" Turn off logging successfully")
    except:
      st.loginfo.print_("Device # "+str(device.n)+" Turn off logging error!")
      raise RuntimeError("Cannot turn off logging")

def remove_bad_devices( st, cfg ):
  '''Inputs: st = ST.SpirentTools = control handle for Spirent commands
             cfg = ConfigXML = info from config.xml
     Check all cfg.devices and remove any that are not responding.
  '''
  st.loginfo.print_("Removing bad devices...")
  bad_devices = []
  for device in cfg.devices:
    if device.ag == None: # only check GNSS receivers
      try:
        rx.SendHttpGet( device.ip, '/', device.user, device.pw )
        if device == cfg.current_config_device:
          bad_devices.append( device )
      except:
        bad_devices.append( device )
    else:
      # check T04 exist
      have_t04 = False
      # 1) check TNFS gpsData folder
      if cfg.TNFS_server_root == None:
        st.loginfo.print_(fn()+"cfg.TNFS_server_root = None")
        raise RuntimeError("cfg.TNFS_server_root = None")
      TNFS_server_path = cfg.TNFS_server_root + "/gpsData/"
      prefix = "RX" + str(device.n).zfill(2)
      for fname in os.listdir(TNFS_server_path):
        if len(fname)>=4 and fname[:4] == prefix and fname[-4:]=='.t0x':
          have_t04 = True
      # 2) check RX_-_/data folder
      out_dir = "DataDir/RX%d-%d/data"% (device.n,cfg.GlobalRunNum)
      if os.path.isdir(out_dir):
        for f in os.listdir(out_dir):
          if f.split('.')[-1] == 'T04' or f.split('.')[-1] == 't0x':
            have_t04 = True
        if not have_t04:
          bad_devices.append( device )

  for device in bad_devices:
    remove_one_bad_device( st, cfg, device, "Unresponsive" )
  if len(cfg.devices) == 0:
    raise RuntimeError("remove_bad_devices() removed everything!")

def download_files_at_end_of_test( st, proc_q, cfg, skipped_devices_list=[] ):
  '''Inputs: st = ST.SpirentTools = control handle for Spirent commands
              proc_q = Queue() - list of background processes started
             cfg = ConfigXML = info from config.xml
             skipped_devices_list = list of integer numbers defined by <n>?</n> in config.xml file
  Download T0x files at end of a single RF playback
  '''
  if verbose: st.loginfo.print_(fn(),"Runtime skipped_devices_list: %s"%str(skipped_devices_list))
  for device in cfg.devices:
    if device.n in skipped_devices_list:
      continue
    st.loginfo.print_("download files.. #",device.n)
    cfg.current_config_device = device

    if device.ag == None:
      try_cnt = 2
      while try_cnt > 0:
        try:
          out_dir = "DataDir/RX%d-%d"% (device.n,cfg.GlobalRunNum)
          rx.DisableDefaultLogging(device.ip,device.user,device.pw)
          rx.GetLoggedFilesViaHttpNoDisable( device.ip, # IP Addr
                                            device.user, # User name
                                            device.pw, # Password
                                            out_dir,
                                            False, # don't remove existing files
                                            checkLocal=True )

          try:
            rx.DownloadSystemErrlog(device.ip,device.user,device.pw,
                                    out_dir+'/SysLog.bin')
            rx.CloneGNSSConfig(device.ip,device.user,device.pw,'ALM.xml')
            rx.DownloadClone(device.ip,device.user,device.pw,out_dir+'/ALM.xml')
          except:
            # this info is not critical.  Ignore if error...
            st.loginfo.print_("Couldn't get ALM.xml/Syslog.bin for {}".format(device.ip))
            time.sleep(70)

          try_cnt = 0
        except:
          st.loginfo.print_("Final download error! Sleep 70s - time for reboot...")
          try_cnt -= 1
          if try_cnt == 0:
            raise RuntimeError("Cannot download files at end of test")
          time.sleep(70) # give time for crash recovery
    else:
      st.loginfo.print_("This is Ag receicer. #",device.n)
      st.loginfo.print_("Try turn off logging. #",device.n)

      try_cnt = 2
      while try_cnt > 0:
        try:
          # wrap_call_agrxtool(turn_ag_dev_logging_off, st.loginfo, device, None, None)
          wrap_use_agrxtool(turn_ag_dev_logging_off, (st.loginfo, device), loginfo=st.loginfo, device=device, try_time=180, dt=15)
          try_cnt = 0
        except:
          st.loginfo.print_("Turn off logging error! Sleep 70s - time for reboot...")
          try_cnt -= 1
          if try_cnt == 0:
            raise RuntimeError("Cannot turn off logging at end of test")
          time.sleep(70) # give time for crash recovery

      # create folder
      out_dir = "DataDir/RX%d-%d/data"% (device.n,cfg.GlobalRunNum)
      if os.path.isdir(out_dir)  == False:
        cmd = "mkdir -p " + out_dir
        check_runq(st.loginfo, proc_q, cmd)

      # move files from tnfs folder to DataDir
      if cfg.TNFS_server_root == None:
        st.loginfo.print_(fn()+"cfg.TNFS_server_root = None")
        raise RuntimeError("cfg.TNFS_server_root = None")
      TNFS_server_path = cfg.TNFS_server_root + "/gpsData/"
      prefix = "RX" + str(device.n).zfill(2)
      for fname in os.listdir(TNFS_server_path):
        if len(fname)>=4 and fname[:4] == prefix:
          cmd = "mv "+TNFS_server_path + fname + " " + out_dir
          check_runq(st.loginfo, proc_q, cmd)

      # rename t0x to T04
      fs = os.listdir(out_dir)
      for f in fs:
        if f.split('.')[-1] == 't0x':
          old_name = out_dir + "/" + f
          new_name = out_dir + "/" + f.split('.')[0]+".T04"
          st.loginfo.print_("Rename files", old_name, new_name)
          os.rename(old_name, new_name)

      # combine T04 files to a single file
      # same prefix will fix the order of files to be read
      # combined file will fix the decode error between 2 files
      # AG cases, sort by file name _XXX.T04
      out_dir_parent = out_dir.split('/data')[0]
      tmp_all_T04 = out_dir_parent + '/rcvr.T04'
      all_t04_files = glob.glob( out_dir + '/*.T0?' )
      if len(all_t04_files) == 0:
          st.loginfo.print_("#",device.n,". Final download error! No T04/t0x file")
          raise RuntimeError("Cannot download files at end of test")
      all_t04_files.sort(key = lambda x: int(x.split('_')[-1].replace('.T04','')))
      st.loginfo.print_("Combine",all_t04_files,"to",tmp_all_T04)
      with open(tmp_all_T04, "wb" ) as f_rcvr:
        for fin_name in all_t04_files:
          shutil.copyfileobj( open(fin_name,'rb'), f_rcvr )

    cfg.current_config_device = None

def startup_receivers( proc_q, st, cfg, info, proc_list, skipped_devices_list=[], log_failed_zda_msg_filename=None, run_rtx=False, run_both=False ):
  '''Inputs: proc_q = Queue() - list of background processes started
             st = ST.SpirentTools = control handle for Spirent commands
             cfg = ConfigXML = info from config.xml
             info = ScenarioInfo = info from current scenario XML
             proc_list = list of processes started by this function
             skipped_devices_list = list of integer numbers defined by <n>?</n> in config.xml file
  Start receivers for testing:
   - configure receivers
   - start logging
  '''
  total_devices = copy.copy(cfg.devices)
  do_devices = [x for x in total_devices if x.n not in skipped_devices_list]
  if verbose: st.loginfo.print_("startup_receivers::do_devices =",do_devices)
  run_rtx = run_rtx and info.RTX_corr_file is not None

  for n_attempt in range(2):
    # do_devices_gnss = [x for x in do_devices if x.ag is None]
    # do_devices_ag   = [x for x in do_devices if x.ag is not None]

    def wrap_run_setup( device ):
      try:
        run_setup( st.loginfo, device, info, cfg, run_rtx )
        return True
      except Exception as e:
        st.loginfo.print_(f"run_setup() failed. {device.ip} {e}")
        return False

    if run_setup_in_parallel:
      st.loginfo.print_("Run setup in parallel.")
      with multiprocessing.pool.ThreadPool(len(do_devices)) as p:
        results = p.map( wrap_run_setup, do_devices )
    else:
      st.loginfo.print_("Run setup in serial.")
      results = []
      for dev in do_devices:
        results.append( wrap_run_setup(dev) )

    st.loginfo.print_("Sleep 70s.  Then double check elev mask and alms.")
    time.sleep(70) # give time for reboot/crash recovery
    count_almanacs( st.loginfo, do_devices, None )
    with multiprocessing.pool.ThreadPool(len(do_devices)) as p:
      def wrap_check_elev_mask( device ):

        def check_elev_mask( loginfo, device ):
          ''' check elev mask
              return True/False
          '''
          def check_elev_mask_for_GNSS(loginfo,device):
            elev_mask_from_receiver = get_elev_mask_from_receiver(device)
            elev_mask_from_xml = get_elev_mask_from_xml(device)
            if elev_mask_from_xml == None:
              return True
            if elev_mask_from_receiver != elev_mask_from_xml:
              loginfo.print_("Warning! GetElev("+device.ip+") vs. clone file(",device.settingsClone,") =", elev_mask_from_receiver,"vs.", elev_mask_from_xml,".")
              return False
            else:
              loginfo.print_("Double check elev mask:",device.ip,"pass")
              return True

          def check_elev_mask_for_Ag():
            return True

          if device.ag == None:
            if verbose: loginfo.print_(fn()+"Device #", device.n ,"is GNSS receiver")
            return check_elev_mask_for_GNSS(loginfo, device)
          else:
            if verbose: loginfo.print_(fn()+"Device #", device.n ,"is Ag receiver")
            return check_elev_mask_for_Ag()

        try:
          return check_elev_mask(st.loginfo, device)
        except Exception as e:
          st.loginfo.print_("Double check elev mask:",device.ip,"fail",e)
          return False
      results_elev = p.map( wrap_check_elev_mask, do_devices )

    tmp = []
    for i in range(len(do_devices)):
      if verbose: st.loginfo.print_("#",do_devices[i].n,"run_setup(), check_elev_mask():", results[i], results_elev[i])
      if results[i] == False or results_elev[i] == False:
        tmp.append(do_devices[i])
    do_devices = tmp

    if len(do_devices) == 0:
      break
    st.loginfo.print_("Need to retry run_setup for {}".format(do_devices))

  # Anything left undone is bad:
  for device in do_devices:
    remove_one_bad_device( st, cfg, device, "run_setup error" )
    cfg.removed_devices.append(device)

  total_devices = copy.copy(cfg.devices)
  do_devices = [x for x in total_devices if x.n not in skipped_devices_list]
  for n_attempt in range(2):
    with multiprocessing.pool.ThreadPool(len(do_devices)) as p:
      def wrap_start_logging( device ):
        try:
          return start_logging( proc_q, st.loginfo, device, info, log_failed_zda_msg_filename, run_rtx, run_both )
        except Exception as e:
          st.loginfo.print_(f'start_logging error:{e}')
          return 'error'
      results = p.map( wrap_start_logging, do_devices )


    # Get devices that failed:
    bad_devices = []
    for dev,proc in zip(do_devices,results):
      if proc == 'error':
        # Get devices that failed:
        bad_devices.append( dev )
      else:
        # Update proc_list with list of processes started
        proc_list.extend( proc )
    do_devices = bad_devices  # retry bad devices

    if len(do_devices) == 0:
      break
    st.loginfo.print_("Sleep 70s.  Need to retry start_logging for {}".format(do_devices))
    time.sleep(70) # time for crash recovery

  # Anything left undone is bad:
  for device in do_devices:
    remove_one_bad_device( st, cfg, device, "start_logging error" )
    cfg.removed_devices.append(device)

  if len(cfg.devices) == 0:
    raise RuntimeError("startup_receivers() removed everything!")


def run_single_test( proc_q, st, cfg, info, first_info, fw_time, custom_fw, n_loops, proc_build_install_fw, skipped_devices_list=[], do_subset=False, start_time=-1, stop_time=-1, run_rtx=False, run_both=False, skip_post=False):
  '''Inputs: proc_q = Queue() - list of background processes started
             st = ST.SpirentTools = control handle for Spirent commands
             cfg = ConfigXML = info from config.xml
             info = ScenarioInfo = info from current scenario XML
             first_info = first 'info' in group to play?
             fw_time = text string of when firmware was built
             custom_fw = user has custom firmware?
             n_loops = # of times to run?
             proc_build_install_fw = background process to run firmware build and install on receivers
             skipped_devices_list = list of integer numbers defined by <n>?</n> in config.xml file
             do_subset = True/False, if do partial run
             start_time = int, start time for partial run
             stop_time = int, stop time for partial run
             run_rtx = True/False, try to run rtx or not
             skip_post = True/False, <skip_post> flag will be saved in ResultsQueue/RX...xml file
  Perform RF playback of a single XML scenario file:
   - starts RF playback
   - at end, downloads T0x files and writes out cfg.ResultsQueue summary file
  '''
  # if log_failed_zda_msg_filename is not None & exists & is not empty, it will be emailed at the end of single test & renamed for reviewing
  log_failed_zda_msg_filename = 'failed_ZDA_msg.txt'

  st.loginfo.info['TestFilename'] = info.fileName
  if verbose: st.loginfo.print_(fn(),"st.loginfo.info['TestFilename'] =",st.loginfo.info['TestFilename'])

  st.stop_any_spirent_test()
  if not cfg.disable_regression:
    st.reset_offset()

  n_copies_needed = st.calc_num_files_to_copy()
  if n_copies_needed > 0:
    # Copying is slow, so process results in background
    check_runq(st.loginfo, proc_q, "./ProcessResults.py",
               exit_on_err=False,
               run_in_bg=True)
    st.copy_needed_files()

  if first_info:
    # Once we're done copying the current scenario RF samples, try to
    # pre-copy the next scenario's RF samples in the background.
    bg_copy_pid = st.bg_copy_next_run_in_advance()
    st.loginfo.print_(fn()+"bg_copy_pid =",bg_copy_pid,"Add to proc_q")
    if bg_copy_pid is not None:
      proc_q.put(bg_copy_pid)

  # After scn files are copied to Spirent SSD, wait for build and install firmware to finish before configure receviers
  st.loginfo.print_(fn()+"scenario files uploading to Spirent is skipped or finished.")
  if proc_build_install_fw is not None:
    st.loginfo.print_(fn()+"waiting for build_install_firmware() pid={} to finish".format(proc_build_install_fw.pid))
    proc_build_install_fw.join()
    if proc_build_install_fw.exitcode != 0:
      raise RuntimeError("Error in firmware build")

  # Run this set of samples multiple times? Based on user request.
  # We potentially run each data set multiple times to get an idea of
  # how much variability we get (tasking and analog noise adds
  # variations).
  n_errors = 0
  fileRunNum = 0
  while True:
    if fileRunNum >= n_loops:
      break
    
    # Reactivate any receivers disabled due to runtime failures
    cfg.devices += cfg.removed_devices
    cfg.removed_devices = []

    st.loginfo.info['runs'] = '%d / %d' % (fileRunNum+1, n_loops)

    st.loginfo.print_("run num = %d / %d" % (fileRunNum+1, n_loops))
    start_time_YmdHMS = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')

    # While we're waiting we might as well do some useful work in the background:
    # - process any old results
    check_runq(st.loginfo, proc_q, "./ProcessResults.py",
               exit_on_err=False,
               run_in_bg=True)

    # Clear any output directory
    if not cfg.disable_regression:
      for device in cfg.devices:
        out_dir = "DataDir/RX%d-%d"% (device.n,cfg.GlobalRunNum)
        if os.path.exists(out_dir):
          st.loginfo.print_("Clear dir %s for: %s"%(out_dir,device.ip))
          shutil.rmtree( out_dir )

    # Samples are available now set up the receiver by clearing from
    # the last run
    st.loginfo.print_("Clear, reset and configure receivers...")
    proc_list = []
    if not cfg.disable_regression:
      startup_receivers( proc_q, st, cfg, info, proc_list, skipped_devices_list, log_failed_zda_msg_filename, run_rtx, run_both)

    #####################################################
    proc,try_again = run_spirent_and_download_files( st, proc_q, cfg, info, skipped_devices_list, do_subset, start_time, stop_time)
    proc_list.append(proc)

    for proc in proc_list:
      if proc is not None:
        st.loginfo.print_("Stop PID {}".format(proc.pid))
        proc.terminate()
        proc.join()

    # Rename and Email bad ZDA log
    if log_failed_zda_msg_filename is not None and os.path.isfile(log_failed_zda_msg_filename):
      if os.path.getsize(log_failed_zda_msg_filename) != 0:
        st.loginfo.print_("Email non-empty zda log file:",log_failed_zda_msg_filename)
        email_file("RF regression bad zda msg",log_failed_zda_msg_filename)
      os.rename(log_failed_zda_msg_filename, log_failed_zda_msg_filename+'.last')

    if try_again:
      n_errors += 1
      if n_errors >= 2:
        raise RuntimeError("Too many errors.  Giving up")
      st.loginfo.print_("Some problem with Spirent.  Try one more time...")
      continue

    if cfg.disable_regression:
      # All the stuff below is only for the regression test
      return

    # The test has completed
    #  - Keep any jammer diagnostics
    #  - Download and archive the data
    #  - Write an XML file with details
    #  - kick off in a seperate process analysis
    jam_diag_dir = "DataDir/jam-%d/" % (cfg.GlobalRunNum)
    if os.path.isfile(jam_diag_filename):
      os.makedirs(jam_diag_dir, exist_ok=True)
      os.rename(jam_diag_filename,
                jam_diag_dir + jam_diag_filename )

    # If a receiver is crashed at the end of a test, we may not be able
    # to download files.  Try once, but if there is a problem then
    # remove any bad devices and move on.
    try:
      download_files_at_end_of_test( st, proc_q, cfg, skipped_devices_list)
    except:
      remove_bad_devices( st, cfg )
      download_files_at_end_of_test( st, proc_q, cfg, skipped_devices_list )

    for device in cfg.devices:
      if device.n in skipped_devices_list:
        continue

      out_dir = "DataDir/RX%d-%d"% (device.n,cfg.GlobalRunNum)

      # Note if this is custom firmware
      if custom_fw:
        open(out_dir+"/custom_fw.txt", 'a').close()

      if do_subset:
        partial_file = open(out_dir+"/partial.txt", 'a')
        partial_file.write("<offsetStartGPSTime>" + str(start_time) + "</offsetStartGPSTime>")
        partial_file.write("<offsetStopGPSTime>" + str(stop_time) + "</offsetStopGPSTime>")
        partial_file.close()

      # Create symbolic link to jam diags in output directory
      if os.path.isfile( jam_diag_dir + jam_diag_filename ):
        os.symlink( "../../" + jam_diag_dir + jam_diag_filename,
                    "%s/%s" % (out_dir,jam_diag_filename) )

      end_time = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
      # Now write a results file that point to the results and truth
      # information
      if(os.path.isdir(cfg.ResultsQueue) == False):
        os.makedirs( cfg.ResultsQueue ) # Make sure the directory exists
      fid = open(cfg.ResultsQueue + '/RX' + str(device.n) + '-' + str(cfg.GlobalRunNum) + '.xml','w');
      fid.write("<data>");
      fid.write("<IP>" + device.ip + "</IP>");
      fid.write("<desc>" + device.desc + "</desc>");
      fid.write("<ScriptDir>%s</ScriptDir>" % cfg.ScriptRoot);
      fid.write("<SampleConfig>" + cfg.SampleConfig + '/' + info.fileName + "</SampleConfig>");
      fid.write("<Data>DataDir/RX" + str(device.n) + "-" + str(cfg.GlobalRunNum) + "</Data>");
      fid.write("<BuildTime>" + fw_time + "</BuildTime>");
      fid.write("<StartTime>" + start_time_YmdHMS + "</StartTime>");
      fid.write("<EndTime>" + end_time + "</EndTime>");
      for x in info.alm_eph_clone:
        fid.write("<almEphClone>" + x + "</almEphClone>");
      if device.settingsClone:
        fid.write("<settingsClone>" + device.settingsClone + "</settingsClone>");
      fid.write("<dynamicModel>" + info.dynamicModel + "</dynamicModel>");
      if device.patchFile:
        fid.write("<patchFile>" + device.patchFile + "</patchFile>");
      if device.tag:
        fid.write("<tag>" + device.tag + "</tag>");
      if device.frozen:
        fid.write("<frozen>" + device.frozen + "</frozen>");
      if device.do_RTK:
        fid.write("<do_RTK>" + device.do_RTK + "</do_RTK>");
      if run_rtx and device.do_RTK and info.RTX_corr_file is not None:
        fid.write("<do_RTX>" + str(1) + "</do_RTX>")
      if do_subset:
        fid.write("<partialRun>" + str(1) + "</partialRun>")
      if device.ag != None:
        fid.write("<agRun>" + str(1) + "</agRun>")
      if skip_post == True:
        fid.write("<skipPost>1</skipPost>")
      # ToDo - any other meta data we need for the analysis?
      fid.write("</data>");
      fid.close()

    cfg.GlobalRunNum = cfg.GlobalRunNum + 1
    fid = open('RunNum.txt','w')
    fid.write(str(cfg.GlobalRunNum))
    fid.close()
    st.loginfo.info['GlobalRunNum'] = cfg.GlobalRunNum
    fileRunNum += 1



def Run(proc_q, st, custom_fw, n_loops, manual_test_files=None, skipped_devices_list=[], do_subset=False, start_time=-1, stop_time=-1, run_rtx=False, run_both=False, skip_post=False, build_branch='main'):
  """Inputs: proc_q = Queue() - list of background processes started
             st = ST.SpirentTools instance - for controlling Spirent
             custom_fw = user has custom FW
             n_loops = # of times to run each scenario
             manual_test_files = for manual testing, XML scenario(s) of test(s) to run
             skipped_devices_list = list of integer numbers defined by <n>?</n> in config.xml file
             do_subset = True/False, if do partial run
             start_time = int, start time for partial run
             stop_time = int, stop time for partial run
             run_rtx = True/False, try to run rtx or not
             skip_post = True/False, <skip_post> flag will be saved in ResultsQueue/RX...xml file
  Run an RF playback.
  For a normal regression we'll build and upload firmware before running the XML scenario list.
  This handles copying data from the USB backup to the SSD (but not from the remote server).
  If manual_test_files == "a_single_file.xml", then the receivers won't be touched at all.
  If manual_test_files == "file1.xml,file2.xml,...", then the normal full regression test will
    run on the listed XML files.
  If manual_test_files == "file1.xml,", then the normal full regression test will run on
    the single XML scenario.
  """
  if verbose: st.loginfo.print_(fn(),"manual_test_files=",manual_test_files)
  # Decode manual_test_files into a list of files to run, and decide if we
  # want to run the fill regression.
  disable_regression = False
  manual_list = None
  if manual_test_files is not None:
    if manual_test_files.find(',') < 0:
      disable_regression = True
      manual_list = [manual_test_files]
    else:
      manual_list = [x for x in manual_test_files.split(',') if x]

  # Make sure USB is mounted for any copy/playback
  st.mount_usb(True)

  # Get the test system config file
  cfg = get_config_xml(st, disable_regression)

  # remove ag receivers if AGRXTools does not exsit
  if 'AGRXTools' not in sys.modules:
    st.loginfo.print_("Import module AGRXTools failed. Remove ag receviers")
    total_devices = copy.copy(cfg.devices)
    for x in total_devices:
      if x.ag is not None and x.n not in skipped_devices_list:
        skipped_devices_list.append(x.n)

  # remove disabled devices from builds/runs
  total_devices = copy.copy(cfg.devices)
  for x in total_devices:
    if x.disable_build_run:
      skipped_devices_list.append(x.n)

  selected_devices_id_list =[x.n for x in cfg.devices if int(x.n) not in skipped_devices_list]
  st.loginfo.print_("Selected devices list: RX-{}".format(str(selected_devices_id_list)))
  st.loginfo.info['NumReceivers'] = f"{len(selected_devices_id_list)}/{len(cfg.devices)}"
  st.loginfo.info['GlobalRunNum'] = cfg.GlobalRunNum
  st.loginfo.print_("Stop any pending test...")

  st.stop_any_spirent_test() # commands run faster if playback is stopped

  file_list, file_total_secs = parse_XML_scenarios(st, cfg, manual_list, skipped_devices_list, n_loops, do_subset, start_time, stop_time)
  end_time_str = prepare_XML_scenarios(st, file_list, file_total_secs)

  for info_group in st.get_sorted_scenario_groups():
    for info in info_group:
      st.loginfo.print_("Scenario: %s" % info.fileName)

  st.loginfo.info['ExpectedEndTime'] = end_time_str
  st.loginfo.print_("Expected end time: %s" % end_time_str)

  #####################################################
  # Reboot via SSH on linux in case GDB was opened
  #####################################################
  devices = cfg.devices
  for device in devices:
    if device.n not in skipped_devices_list and device.linux:
      st.loginfo.print_(f"Attempt to reboot Linux device #{device.n} over SSH")
      reboot_linux_receiver_via_ssh(device, st.loginfo)
      time.sleep(70)

  #####################################################
  # Build the firmware - we do this before each
  # time we run through all the samples. That way we
  # get the latest firmware
  # need to call .join() before config receivers
  #####################################################
  proc = None
  if cfg.allow_builds and not cfg.disable_regression:
    proc = None
    if run_build_install_in_background == True:
      st.loginfo.print_(fn()+"Run build_install_firmware() in background. Later .join() when 1st RF scenario copy is finished.")
      proc = multiprocessing.Process(target=build_install_firmware, args=(st, proc_q, cfg, build_branch, skipped_devices_list, ))
      proc.start()
      proc_q.put(proc.pid)
    else:
      st.loginfo.print_(fn()+"Run build_install_firmware() in foreground.")
      build_install_firmware(st, proc_q, cfg, build_branch, skipped_devices_list )

  _, fw_time = is_build_needed(st.loginfo)

  # Now read each sample XML config file and run that test
  for info_group in st.get_sorted_scenario_groups():
    for n,info in enumerate(info_group):
      # Add back removed devices due to configure failure
      for removed_device in cfg.removed_devices:
        cfg.devices.append(removed_device)
      cfg.removed_devices = []
      first_info = (n==0)
      run_single_test(proc_q, st, cfg, info, first_info, fw_time, custom_fw, n_loops, proc, skipped_devices_list, do_subset, start_time, stop_time, run_rtx, run_both, skip_post)
    st.done_with_playback_group()

def set_regression_status(proc_q,status):
  """Input: proc_q = Queue() - list of background processes started
            status = Running, Copying, Idle, Idle (Error), Killed, Copy Canceled
  What is running?  Mainly to inform the web GUI if we're actively doing something.
  Also kills any leftover background processes if the user cancels an operation.
  """
  d = {}
  d['status'] = status
  if status == "Running" or status == "Copying":
    d['active'] = 1
  else:
    d['active'] = 0

  if status != "Running":
    disable_all_IP_filters()

  if d['active'] == 1:
    # Return an error if we're already running and try to start again
    curr_d = get_regression_status()
    if curr_d['active'] == 1:
      return False
  elif proc_q is not None:
    kill_background_procs(proc_q)

  with open("regression_status.json","w") as f:
    print("Set Regression Status",d)
    json.dump( d, f )
  return True

def get_regression_status():
  """Loads "regression_status.json" file as a dictionary"""
  with open("regression_status.json") as f:
    return json.load( f )

def email_error_result():
  """Email error reports for the normal regression run."""
  sender = 'RFregression@fermion.eng.trimble.com'

  msg_text = ""
  try:
    with open('regression_diag.txt') as f:
      msg_text = f.read()
  except:
    msg_text = "Diagnostic file not found"

  cfg = get_config_xml(None,True)
  if len(cfg.email_recipients) > 0:
    msg = email.mime.text.MIMEText(msg_text)
    msg['Subject'] = 'RF regression failure'
    msg['From'] = sender
    msg['To'] = ', '.join(cfg.email_recipients)
    s = smtplib.SMTP( cfg.email_host )
    s.sendmail(sender, cfg.email_recipients, msg.as_string())
    s.quit()


def kill_all(proc_pid):
  """os.kill() and proc.terminate() don't handle child processes.
  Use psutils to kill everything spawned...
  """
  process = psutil.Process(proc_pid)
  for proc in process.children(recursive=True):
    proc.kill()
  process.kill()


def kill_background_procs(proc_q):
  """Input: proc_q = Queue() - list of background processes started
  Kill any background processes"""
  while not proc_q.empty():
    pid = proc_q.get()
    if pid != multiprocessing.current_process().pid:
      print('kill',pid)
      try:
        kill_all(pid)
      except:
        # If the process is already finished, don't worry
        pass

def CopyList(st, test_files):
  """Inputs: test_files = XML scenario file with list of GSS6450 files
             Could be a list of XML files, e.g.: "file1.xml,file2.xml"
  Makes sure all files in XML scenario test_file are on the Linux server and the
  USB backup drives."""
  st.loginfo.print_("CopyList: {}".format(test_files))
  st.mount_usb( True, read_only=False )
  for test_file in test_files.split(','):
    st.loginfo.print_(" Copying: {}".format(test_file))
    all_info = getTestFileInfo( st, True, test_file )
    test = all_info['tests'][0]
    st.loginfo.print_(" Info: {}".format(test))
    with open("copy_status.json","w") as f:
      d = {}
      d['filename'] = test['filename']
      json.dump( d, f )

    if test['RF_usb'] != 'OK':
      file_list = [curr['name'] for curr in test['RF_files']]
      if test['RF_ssd'] == 'internal':
        st.loginfo.print_("copying from internal SSD to USB")
        st.copy_internal_ssd_to_usb( file_list )
      elif test['RF_ssd'] == 'external':
        st.loginfo.print_("copying from external SSD to USB")
        st.copy_external_ssd_to_usb( file_list )
      elif test['RF_server'] == 'OK':
        st.loginfo.print_("copying from server to USB")
        st.copy_server_to_usb( file_list )
    if test['RF_server'] != 'OK':
      st.loginfo.print_("copying from USB to server")
      for curr in test['RF_files']:
        st.copy_usb_to_server( curr['name'] )
    os.remove("copy_status.json")
  st.mount_usb( True, read_only=True )

def WrapCopyList(proc_q,manual_test_files):
  """Input: proc_q = Queue() - list of background processes started
            manual_test_files = XML scenario filename (or list "a.xml,b.xml")
  Same as CopyList(), but catches and reports errors."""
  proc_q.put(multiprocessing.current_process().pid)
  got_error = False
  st = ST.SpirentTools(True)
  try:
    st.loginfo.print_("Copying files for %s" % manual_test_files)
    CopyList( st, manual_test_files )
    st.loginfo.print_("Done copying")
  except:
    got_error = True
    print("Unexpected error: {}".format(sys.exc_info()[0]))
    st.loginfo.print_("Unexpected error: {}".format(sys.exc_info()[0]))
    st.loginfo.print_(sys.exc_info()[1])
    print(sys.exc_info()[1])
    traceback.print_tb( sys.exc_info()[2], file=st.loginfo.diag_f )
    traceback.print_tb( sys.exc_info()[2] )
  finally:
    st.loginfo.close()
    if got_error:
      set_regression_status(proc_q,'Idle (Error)')
      if not manual_test_files:
        email_error_result()
    else:
      set_regression_status(proc_q,'Idle')
  return got_error

def WrapRun(proc_q, manual_test_files, n_loops, custom_fw, skipped_devices_list=[], do_subset=False, start_time=-1, stop_time=-1, run_rtx=False, run_both=False, skip_post=False,build_branch='main'):
  """Input: proc_q = Queue() - list of background processes started
            manual_test_files = for manual testing, XML scenario(s) of test(s) to run. If None, run full regression system.
            n_loops = # of times to run the scenarios
            custom_fw = if True, don't do a fresh CVS checkout and don't install firmware!
            skipped_devices_list = list of integer numbers defined by <n>?</n> in config.xml file
            do_subset = True/False, if do partial run
            start_time = int, start time for partial run
            stop_time = int, stop time for partial run
            run_rtx = True/False, try to run rtx or not
            skip_post = True/False, <skip_post> flag will be saved in ResultsQueue/RX...xml file
  Run given manual XML file, or full regression system."""
  proc_q.put(multiprocessing.current_process().pid)
  got_error = False
  st = ST.SpirentTools(True)
  try:
    st.loginfo.print_(f"Number of loops {n_loops}.  Skip FW {custom_fw}. run_rtx {run_rtx}. run_both {run_both}. Skip post {skip_post}.")
    st.loginfo.print_("Skipped devices list: RX-{}".format(str(skipped_devices_list)))
    if custom_fw:
      check_runq(st.loginfo, proc_q, "touch Builds/install.txt")
      check_runq(st.loginfo, proc_q, "chmod a+w Builds/install.txt")
    Run(proc_q, st, custom_fw, n_loops, manual_test_files, skipped_devices_list, do_subset, start_time, stop_time, run_rtx, run_both, skip_post, build_branch)
    # ProcessResults.py runs in the background while the RF
    # playback/copy is running.  If ProcessResults takes too long, it
    # could still be running here.  Use the --wait option to make sure
    # we wait and process the latest results here.
    check_runq(st.loginfo, proc_q, "./ProcessResults.py --wait")
    st.loginfo.print_("Finished regression run.")
  except:
    got_error = True
    print("Unexpected error: {}".format(sys.exc_info()[0]))
    st.loginfo.print_("Unexpected error: {}".format(sys.exc_info()[0]))
    st.loginfo.print_(sys.exc_info()[1])
    print(sys.exc_info()[1])
    traceback.print_tb( sys.exc_info()[2], file=st.loginfo.diag_f )
    traceback.print_tb( sys.exc_info()[2] )
  finally:
    st.loginfo.close()
    if got_error:
      set_regression_status(proc_q,'Idle (Error)')
      email_error_result()
    else:
      set_regression_status(proc_q,'Idle')

def WrapCopyAndPlaySingle( proc_q, manual_test_file):
  """Input: proc_q = Queue() - list of background processes started
            manual_test_file = single XML scenario filename.
  Copy all files in XML to USB/backup server and then run single XML scenario."""
  got_err = WrapCopyList( proc_q, manual_test_file )
  if got_err:
    print("Skipping playback because of a copy error")
  else:
    if set_regression_status(proc_q,'Running'):
      WrapRun( proc_q, manual_test_file, 1, False )

def handle_req(proc_q,conn, ser):
  """Input: proc_q = Queue() - list of background processes started
            conn = socket for communication with RFapp.py
            ser = ST.SpirentSerial = control serial port toggle
     Handle requests from RFapp.py and send a response back over the 'conn' socket."""
  msg = conn.recv()
  if verbose: print("Got msg:",msg)
  st = ST.SpirentTools(False)
  if msg[0] == 'kill':
    set_regression_status(proc_q, 'Killed')
    st.quick_stop()
    st.mount_usb(True)
    # Release agrx lock
    print("Releasing agrx_access_lock.")
    acq_res = agrx_access_lock.acquire(block=False)
    if acq_res:
      agrx_access_lock.release()
    else:
      agrx_access_lock.release()
      print("Released agrx_access_lock.")
    # Turn all ag receiver log off
    cfg = get_config_xml(None,True)
    for n,dev in enumerate(cfg.devices):
      if dev.ag != None:
        rt = stop_ag_receiver_log(dev.ip) # For each receiver, try to wait 5sec until acquire lock then turn log & tnfs off
        print("Stop ag receiver #",n,':',dev.ip, "returns",rt)
  elif msg[0] == 'start':
    if set_regression_status(proc_q, 'Running'):
      if verbose: print("Do start message")
      ser.toggle_serial(ser.Serial_Level_Spirent_Receivers)
      proc = multiprocessing.Process(target=WrapRun, args=(proc_q,None,1,False,))
      proc.start()
  elif msg[0] == 'logSSDWearLevelingCount':
    if len(msg) == 4:
      csv_filename = msg[1]
      mountpoints = msg[2].split(',')
      send_email = True if msg[3].lower() == 'true' else False
      if verbose: print("csv_filename:",csv_filename,"mountpoints:",mountpoints,"send_email:",send_email)
      logSSDWearLevelingCount(csv_filename, mountpoints, send_email)
    else:
      logSSDWearLevelingCount()
  elif msg[0] == 'toggleSerial':
    ser.toggle_serial()
  elif msg[0] == 'userInfo':
    if verbose: print("User info message", msg[1])
    with open("user_status.json","w") as f:
      json.dump( msg[1], f )
  elif msg[0] == 'getDetailedDiag':
    cfg = get_config_xml(None, True)
    conn.send( cfg.detailedDiagCmd )
  elif msg[0] == 'getReceiverInfo':
    conn.send( get_receiver_info() )
  elif msg[0] == 'getAgReceiverInfo':
    conn.send( get_ag_receiver_info(msg[1]) )
  elif msg[0] == 'stopAgReceiverLog':
    conn.send( stop_ag_receiver_log(msg[1]) )
  elif msg[0] == 'getUserInfo':
    with open("user_status.json") as f:
      conn.send(json.load( f ))
  elif msg[0] == 'getRegressionInfo':
    conn.send(get_regression_status())
  elif msg[0] == 'getRegressionProgress':
    with open("regression_progress.json") as f:
      conn.send(json.load( f ))
  elif msg[0] == 'getRegressionDiag':
    with open("regression_diag.txt") as f:
      conn.send("".join(f.readlines()))
  elif msg[0] == 'getTestFileInfoAll':
    conn.send(getTestFileInfo(st,True))
  elif msg[0] == 'getTestFileInfoShort':
    conn.send(getTestFileInfo(st,False))
  elif msg[0] == 'playSingle':
    if set_regression_status(proc_q, 'Running'):
      if verbose: print("message: %s %s" % (msg[0],msg[1]))
      ser.toggle_serial(ser.Serial_Level_Spirent_Receivers)
      proc = multiprocessing.Process(target=WrapRun, args=(proc_q,msg[1],1,False,))
      proc.start()
  elif msg[0] == 'playList':
    if set_regression_status(proc_q, 'Running'):
      if verbose: print( "message: {} {} {} {} {} {} {} {} {} {}".format(msg[0],msg[1],msg[2],msg[3],msg[4],msg[5],msg[6],msg[7],msg[8],msg[9]) )
      ser.toggle_serial(ser.Serial_Level_Spirent_Receivers)
      file_list=msg[1]
      n_loop=msg[2]
      skip_build=msg[3]
      skipped_devices_list = [] if msg[4]==None else msg[4]
      do_subset,start_time,stop_time = bool(msg[5]),int(msg[6]),int(msg[7])
      run_rtx=msg[8]
      run_both=msg[9]
      skip_post=msg[10]
      build_branch=msg[11]
      proc = multiprocessing.Process(target=WrapRun, args=(proc_q,file_list,n_loop,skip_build,skipped_devices_list,do_subset,start_time,stop_time,run_rtx,run_both,skip_post,build_branch))
      proc.start()
  elif msg[0] == 'copyList':
    if set_regression_status(proc_q, 'Copying'):
      proc = multiprocessing.Process(target=WrapCopyList, args=(proc_q, msg[1],))
      proc.start()
  elif msg[0] == 'cancelCopy':
    set_regression_status(proc_q, 'Copy Canceled')
  elif msg[0] == 'copyAndPlaySingle':
    if set_regression_status(proc_q, 'Copying'):
      if verbose: print("Do copy & play single message")
      ser.toggle_serial(ser.Serial_Level_Spirent_Receivers)
      proc = multiprocessing.Process(target=WrapCopyAndPlaySingle, args=(proc_q, msg[1],))
      proc.start()
  elif msg[0] == 'mountUsbToggle':
    info = list(st.get_mount_usb_status().values())[0]
    if info == 'MountedRO' or info == 'MountedRW':
      st.mount_usb(False)
    elif info == 'Unmounted' or info == 'UnmountedWrongPath':
      st.mount_usb(True)
    else:
      print("Can't toggle mount on missing USB drive")
    conn.send("done")
  elif msg[0] == 'getMountUsbStatus':
    info = st.get_mount_usb_status()
    conn.send({'status':','.join(info.values())})
  else:
    print("Unknown msg:",msg)
  conn.close()

def run_server(ser):
  """Input: ser = ST.SpirentSerial = control serial port toggle
  Start listening on port ? for requests from RFapp.py.  Each request is
  handled in a separate thread so multiple people can access the server. Also
  start a thread to keep the Spirent RF USB drive awake.
  """
  config_XML = ET.parse("config.xml")
  control_port = int(config_XML.find('control_port').text)
  proc_q = multiprocessing.Queue()
  ping = ST.SpirentPing()
  addr = ('localhost', control_port)
  print('Listen on {}'.format(addr))
  listener = mp_con.Listener(addr)
  while True:
    conn = listener.accept()
    proc = multiprocessing.Process(target=handle_req, args=(proc_q,conn,ser))
    proc.start()

def init_json():
  '''Initialize all JSON status files at startup.'''
  if not os.path.isfile("user_status.json"):
    with open("user_status.json","w") as f:
      d = {'name':'', 'desc':'', 'end_time': 'Mon, 06 Jan 1980 00:00:00'}
      json.dump( d, f )
  with open("regression_progress.json","w") as f:
    d = {'detail':'Server restarted'}
    json.dump( d, f )
  with open("regression_diag.txt","w") as f:
    f.write('\n')
  set_regression_status(None,'Idle')

def init_run_num():
  """Create RunNum.txt if it is not present"""
  if os.path.isfile("RunNum.txt"):
    return
  cfg = get_config_xml(None,True)
  files = glob.glob( cfg.ResultsQueue + "/*.xml")
  if len(files) == 0:
    with open('RunNum.txt','w') as fid:
      fid.write("1")
      return
  files.sort(key=os.path.getmtime)
  num = int(re.findall(r'RX\d+-(\d+).xml',files[-1])[0]) + 1
  with open('RunNum.txt','w') as fid:
    fid.write(str(num))


if __name__ == "__main__":
  os.environ['no_proxy'] = '*'
  init_json()
  init_run_num()
  ser = ST.SpirentSerial()
  run_server(ser)
