diff --git a/selfdrive/controls/radard.py b/selfdrive/controls/radard.py index 98fce1cb26e7b5..4b9279ee94cd6c 100755 --- a/selfdrive/controls/radard.py +++ b/selfdrive/controls/radard.py @@ -10,7 +10,7 @@ from openpilot.common.params import Params from openpilot.common.realtime import DT_MDL, Priority, config_realtime_process from openpilot.common.swaglog import cloudlog -from openpilot.common.simple_kalman import KF1D +from openpilot.common.simple_kalman import KF1D, get_kalman_gain # Default lead acceleration decay set to 50% at 1s @@ -26,6 +26,60 @@ RADAR_TO_CAMERA = 1.52 # RADAR is ~ 1.5m ahead from center of mesh frame +class VisionLeadKF: + MIN_STD = 1 + MAX_STD = 15 + SIGMA_A = 2.5 # m/s^2, how much can lead relative velocity realistically change (not per step) + DT = DT_MDL + + def __init__(self, x): + self.x = x # distance state estimate (m) + self.vRel = 0.0 # relative velocity state estimate (m/s) + self.P = np.diag([10.0 ** 2, 5.0 ** 2]) # variance of the state estimate. x uncertainty: 10m, vRel uncertainty: 5 m/s + self.K = 0.0 # Kalman gain + + # TODO: understand this fully + var_a = self.SIGMA_A ** 2 # acceleration variance + self.Q = var_a * np.array([ + [self.DT ** 4 / 4.0, self.DT ** 3 / 2.0], # affects x (m^2) and coupling + [self.DT ** 3 / 2.0, self.DT ** 2], # affects vRel ((m/s)^2) + ],) + + def update(self, x, x_std, a_ego): + # predict state + # self.x = self.x + self.vRel * self.DT + self.x = self.x + self.vRel * self.DT - 0.5 * a_ego * self.DT * self.DT + self.vRel = self.vRel - a_ego * self.DT + + # predict covariance + A = np.array([[1.0, self.DT], + [0.0, 1.0]]) + + self.P = A @ self.P @ A.T + self.Q + + # update + H = np.array([[1.0, 0.0]]) + + x_std = np.clip(x_std, self.MIN_STD, self.MAX_STD) + R = x_std ** 2 + + x_pred = (H @ np.array([[self.x], [self.vRel]]))[0, 0] + + S = (H @ self.P @ H.T)[0, 0] + R + + K = (self.P @ H.T) / S + + y = x - x_pred # how far meas is form prediction + self.x = self.x + K[0, 0] * y + self.vRel = self.vRel + K[1, 0] * y + + # update covariance + I = np.eye(2) + self.P = (I - K @ H) @ self.P + + return self.x, self.vRel + + class KalmanParams: def __init__(self, dt: float): # Lead Kalman Filter params, calculating K from A, C, Q, R requires the control library. @@ -138,37 +192,82 @@ def prob(c): return None -def get_RadarState_from_vision(lead_msg: capnp._DynamicStructReader, v_ego: float, model_v_ego: float): - lead_v_rel_pred = lead_msg.v[0] - model_v_ego - return { - "dRel": float(lead_msg.x[0] - RADAR_TO_CAMERA), - "yRel": float(-lead_msg.y[0]), - "vRel": float(lead_v_rel_pred), - "vLead": float(v_ego + lead_v_rel_pred), - "vLeadK": float(v_ego + lead_v_rel_pred), - "aLeadK": float(lead_msg.a[0]), - "aLeadTau": 0.3, - "fcw": False, - "modelProb": float(lead_msg.prob), - "status": True, - "radar": False, - "radarTrackId": -1, - } - - -def get_lead(v_ego: float, ready: bool, tracks: dict[int, Track], lead_msg: capnp._DynamicStructReader, - model_v_ego: float, low_speed_override: bool = True) -> dict[str, Any]: +class VisionTrack: + def __init__(self, lead_msg: capnp._DynamicStructReader): + self.distances = deque(maxlen=round(0.5 / DT_MDL)) + d_rel = float(lead_msg.x[0] - RADAR_TO_CAMERA) + self.kf = VisionLeadKF(d_rel) + + def get_RadarState(self, lead_msg: capnp._DynamicStructReader, v_ego: float, a_ego: float, model_v_ego: float): + lead_v_rel_pred = lead_msg.v[0] - model_v_ego + + d_rel = float(lead_msg.x[0] - RADAR_TO_CAMERA) + + old_v_lead = v_ego + lead_v_rel_pred + + dt = len(self.distances) * DT_MDL + if dt > 0.0: + kf_dRel, kf_vRel = self.kf.update(d_rel, lead_msg.xStd[0], a_ego) + # v_rel = (d_rel - self.distances[0]) / dt + v_lead = v_ego + kf_vRel + else: + kf_dRel = d_rel + v_lead = old_v_lead + + self.distances.append(d_rel) + + return { + "dRel": float(kf_dRel), + "yRel": float(-lead_msg.y[0]), + "vRel": float(lead_v_rel_pred), + "vLead": float(v_lead), # new vlead + "vLeadK": float(v_ego + lead_v_rel_pred), # old vlead logic for comparison + "aLeadK": float(lead_msg.a[0]), # this seems stable? + "aLeadTau": 0.3, + "fcw": False, + "modelProb": float(lead_msg.prob), + "status": True, + "radar": False, + "radarTrackId": -1, + } + + +# def get_RadarState_from_vision(lead_msg: capnp._DynamicStructReader, v_ego: float, model_v_ego: float): +# lead_v_rel_pred = lead_msg.v[0] - model_v_ego +# return { +# "dRel": float(lead_msg.x[0] - RADAR_TO_CAMERA), +# "yRel": float(-lead_msg.y[0]), +# "vRel": float(lead_v_rel_pred), +# "vLead": float(v_ego + lead_v_rel_pred), +# "vLeadK": float(v_ego + lead_v_rel_pred), +# "aLeadK": float(lead_msg.a[0]), +# "aLeadTau": 0.3, +# "fcw": False, +# "modelProb": float(lead_msg.prob), +# "status": True, +# "radar": False, +# "radarTrackId": -1, +# } + + +def get_lead(v_ego: float, a_ego: float, ready: bool, tracks: dict[int, Track], vision_track, lead_msg: capnp._DynamicStructReader, + model_v_ego: float, low_speed_override: bool = True) -> tuple[dict[str, Any], Any | None]: # Determine leads, this is where the essential logic happens if len(tracks) > 0 and ready and lead_msg.prob > .5: track = match_vision_to_track(v_ego, lead_msg, tracks) else: track = None + if low_speed_override and lead_msg.prob <= 0.5: + vision_track = None + lead_dict = {'status': False} if track is not None: lead_dict = track.get_RadarState(lead_msg.prob) elif (track is None) and ready and (lead_msg.prob > .5): - lead_dict = get_RadarState_from_vision(lead_msg, v_ego, model_v_ego) + if vision_track is None and low_speed_override: + vision_track = VisionTrack(lead_msg) + lead_dict = vision_track.get_RadarState(lead_msg, v_ego, a_ego, model_v_ego) if low_speed_override: low_speed_tracks = [c for c in tracks.values() if c.potential_low_speed_lead(v_ego)] @@ -179,7 +278,7 @@ def get_lead(v_ego: float, ready: bool, tracks: dict[int, Track], lead_msg: capn if (not lead_dict['status']) or (closest_track.dRel < lead_dict['dRel']): lead_dict = closest_track.get_RadarState() - return lead_dict + return lead_dict, vision_track class RadarD: @@ -187,9 +286,11 @@ def __init__(self, delay: float = 0.0): self.current_time = 0.0 self.tracks: dict[int, Track] = {} + self.vision_track = None self.kalman_params = KalmanParams(DT_MDL) self.v_ego = 0.0 + self.a_ego = 0.0 self.v_ego_hist = deque([0.0], maxlen=int(round(delay / DT_MDL))+1) self.last_v_ego_frame = -1 @@ -204,6 +305,7 @@ def update(self, sm: messaging.SubMaster, rr: car.RadarData): if sm.recv_frame['carState'] != self.last_v_ego_frame: self.v_ego = sm['carState'].vEgo + self.a_ego = sm['carState'].aEgo self.v_ego_hist.append(self.v_ego) self.last_v_ego_frame = sm.recv_frame['carState'] @@ -239,8 +341,8 @@ def update(self, sm: messaging.SubMaster, rr: car.RadarData): model_v_ego = self.v_ego leads_v3 = sm['modelV2'].leadsV3 if len(leads_v3) > 1: - self.radar_state.leadOne = get_lead(self.v_ego, self.ready, self.tracks, leads_v3[0], model_v_ego, low_speed_override=True) - self.radar_state.leadTwo = get_lead(self.v_ego, self.ready, self.tracks, leads_v3[1], model_v_ego, low_speed_override=False) + self.radar_state.leadOne, self.vision_track = get_lead(self.v_ego, self.a_ego, self.ready, self.tracks, self.vision_track, leads_v3[0], model_v_ego, low_speed_override=True) + # self.radar_state.leadTwo, self.vision_track = get_lead(self.v_ego, self.ready, self.tracks, self.vision_track, leads_v3[1], model_v_ego, low_speed_override=False) def publish(self, pm: messaging.PubMaster): assert self.radar_state is not None diff --git a/voacc_debugging.py b/voacc_debugging.py new file mode 100644 index 00000000000000..287cb83ca9e5cd --- /dev/null +++ b/voacc_debugging.py @@ -0,0 +1,115 @@ +import numpy as np +from collections import deque +import matplotlib.pyplot as plt + +from tools.lib.logreader import LogReader +from selfdrive.controls.radard import RADAR_TO_CAMERA, VisionLeadKF +from openpilot.common.realtime import DT_MDL +from openpilot.common.simple_kalman import KF1D, get_kalman_gain + +plt.ion() + +# Camry w/ accurate lead from radar +# lr = LogReader('https://connect.comma.ai/3f447b402cbe27b6/0000008d--b6a2350b41/351/448', sort_by_time=True) +lr = LogReader('https://connect.comma.ai/3f447b402cbe27b6/0000008d--b6a2350b41/572/950', sort_by_time=True) + +# tesla with no radar +# lr = LogReader('https://connect.comma.ai/0f79c454f812791a/000000a3--e1e54de187/1037/1067', sort_by_time=True) +lr = sorted(lr, key=lambda m: m.logMonoTime) + +CS = None + +radar_data = [] # from radard/radar +model_data = [] # straight from model +pred_data = [] # kf predictions derived from model +kf_data = [] # internal kf state to visualize + +# deque (noisy) +dRel_deque = deque(maxlen=round(1.0 / DT_MDL)) + + +# kf +kf = None + +for msg in lr: + if msg.which() == 'carState': + CS = msg.carState + + elif msg.which() == 'radarState': + RS = msg.radarState + + if RS.leadOne.status: + radar_data.append((msg.logMonoTime, RS.leadOne.dRel, RS.leadOne.vLead, RS.leadOne.aLeadK)) + + elif msg.which() == 'modelV2': + MD = msg.modelV2 + + if CS is None: + continue + + if not len(MD.leadsV3): + continue + + lead = MD.leadsV3[0] + if lead.prob > 0.5: + dRel = lead.x[0] - RADAR_TO_CAMERA + if kf is None: + kf = VisionLeadKF(dRel) + + model_data.append((msg.logMonoTime, dRel, lead.v[0], lead.a[0], lead.xStd[0])) + + # simple kf prediction for vlead + if len(dRel_deque) == dRel_deque.maxlen: + # vLead = CS.vEgo + (dRel - dRel_deque[0]) / (DT_MDL * len(dRel_deque)) + + kf_dRel, kf_vRel = kf.update(dRel, lead.xStd[0], CS.aEgo) + + kf_data.append((msg.logMonoTime, kf.P, kf.K)) + print(dRel, kf_dRel) + # kf_dRel = kf.x[0][0] + # kf_vLead = CS.vEgo + kf.x[1][0] + kf_vLead = CS.vEgo + kf_vRel + + pred_data.append((msg.logMonoTime, kf_dRel, kf_vLead, lead.a[0])) + + dRel_deque.append(dRel) + else: + dRel_deque.clear() + kf = None + +fig, ax = plt.subplots(4, 1, sharex=True) +ax[0].plot([d[0] for d in radar_data], [d[1] for d in radar_data], label='radar dRel') +ax[0].plot([d[0] for d in model_data], [d[1] for d in model_data], label='model dRel') +ax[0].plot([d[0] for d in pred_data], [d[1] for d in pred_data], label='predicted dRel') +ax[0].set_ylabel('dRel (m)') +ax[0].legend() + +ax[1].plot([d[0] for d in radar_data], [d[2] for d in radar_data], label='radar vLead') +ax[1].plot([d[0] for d in model_data], [d[2] for d in model_data], label='model vLead') +ax[1].plot([d[0] for d in pred_data], [d[2] for d in pred_data], label='predicted vLead') +ax[1].set_ylabel('vLead (m/s)') +ax[1].legend() + +ax[2].plot([d[0] for d in radar_data], [d[3] for d in radar_data], label='radar aLeadK') +ax[2].plot([d[0] for d in model_data], [d[3] for d in model_data], label='model aLead') +# ax[2].plot([d[0] for d in pred_data], [d[3] for d in pred_data], label='predicted aLead') +ax[2].set_ylabel('aLead (m/s²)') +ax[2].legend() + +# prob +# ax[3].plot([d[0] for d in model_data], [d[4] for d in model_data], label='model lead prob') +# ax[3].set_ylabel('lead prob') +# ax[3].legend() + +# xStd +ax[3].plot([d[0] for d in model_data], [d[4] for d in model_data], label='model lead xStd') +ax[3].set_ylabel('lead xStd (m)') +ax[3].legend() + +# # kf internal stats +# ax[4].plot([d[0] for d in kf_data], [d[1] for d in kf_data], label='kf P') +# ax[4].plot([d[0] for d in kf_data], [d[2] for d in kf_data], label='kf K') +# ax[4].set_ylabel('kf stats') +# ax[4].legend() + +# TODO: print some stats about how close model and kf are from radar ground truths