Skip to content

Commit

Permalink
Merge pull request DeepLearnPhysics#10 from dkoh0207/develop
Browse files Browse the repository at this point in the history
Analysis tool updates
  • Loading branch information
francois-drielsma authored Mar 8, 2023
2 parents bae6599 + 4ceaa19 commit 798c969
Show file tree
Hide file tree
Showing 24 changed files with 1,204 additions and 128 deletions.
146 changes: 102 additions & 44 deletions analysis/algorithms/calorimetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,17 @@
import numpy as np
import numba as nb
from sklearn.decomposition import PCA
from scipy.interpolate import CubicSpline
import pandas as pd


def compute_sum_deposited(particle : Particle):
assert hasattr(particle, 'deposition')
sum_E = particle.deposition.sum()
return sum_E
# CONSTANTS (MeV)
PROTON_MASS = 938.272
MUON_MASS = 105.7
ELECTRON_MASS = 0.511998
ARGON_DENSITY = 1.396
ADC_TO_MEV = 1. / 350.
ARGON_MASS = 37211
PIXELS_TO_CM = 0.3


def compute_track_length(points, bin_size=17):
Expand Down Expand Up @@ -44,6 +49,40 @@ def compute_track_length(points, bin_size=17):
return length


def get_csda_range_spline(particle_type, table_path):
'''
Returns CSDARange (g/cm^2) vs. Kinetic E (MeV/c^2)
'''
if particle_type == 'proton':
tab = pd.read_csv(table_path,
delimiter=' ',
index_col=False)
elif particle_type == 'muon':
tab = pd.read_csv(table_path,
delimiter=' ',
index_col=False)
else:
raise ValueError("Range based energy reconstruction for particle type\
{} is not supported!".format(particle_type))
# print(tab)
f = CubicSpline(tab['CSDARange'] / ARGON_DENSITY, tab['T'])
return f


def compute_range_based_energy(particle, f, **kwargs):
assert particle.semantic_type == 1
if particle.pid == 4: m = PROTON_MASS
elif particle.pid == 2: m = MUON_MASS
else:
raise ValueError("For track particle {}, got {}\
as particle type!".format(particle.pid))
if not hasattr(particle, 'length'):
particle.length = compute_track_length(particle.points, **kwargs)
kinetic = f(particle.length * PIXELS_TO_CM)
total = kinetic + m
return total


def compute_particle_direction(p: Particle,
start_segment_radius=17,
vertex=None,
Expand Down Expand Up @@ -90,46 +129,26 @@ def compute_particle_direction(p: Particle,
return direction
else:
return direction, pca.explained_variance_ratio_


def load_range_reco(particle_type='muon', kinetic_energy=True):
"""
Return a function maps the residual range of a track to the kinetic
energy of the track. The mapping is based on the Bethe-Bloch formula
and stored per particle type in TGraph objects. The TGraph::Eval
function is used to perform the interpolation.
Parameters
----------
particle_type: A string with the particle name.
kinetic_energy: If true (false), return the kinetic energy (momentum)

Returns
-------
The kinetic energy or momentum according to Bethe-Bloch.
"""
output_var = ('_RRtoT' if kinetic_energy else '_RRtodEdx')
if particle_type in ['muon', 'pion', 'kaon', 'proton']:
input_file = ROOT.TFile.Open('RRInput.root', 'read')
graph = input_file.Get(f'{particle_type}{output_var}')
return np.vectorize(graph.Eval)
else:
print(f'Range-based reconstruction for particle "{particle_type}" not available.')


def make_range_based_momentum_fns():
f_muon = load_range_reco('muon')
f_pion = load_range_reco('pion')
f_proton = load_range_reco('proton')
return [f_muon, f_pion, f_proton]


def compute_range_momentum(particle, f, voxel_to_cm=0.3, **kwargs):
assert particle.semantic_type == 1
length = compute_track_length(particle.points,
bin_size=kwargs.get('bin_size', 17))
E = f(length * voxel_to_cm)
return E
def compute_track_dedx(p, bin_size=17):
assert len(p.points) >= 2
vec = p.endpoint - p.startpoint
vec_norm = np.linalg.norm(vec)
vec = (vec / (vec_norm + 1e-6)).astype(np.float64)
proj = p.points - p.startpoint
proj = np.dot(proj, vec)
bins = np.arange(proj.min(), proj.max(), bin_size)
bin_inds = np.digitize(proj, bins)
dedx = np.zeros(np.unique(bin_inds).shape[0]).astype(np.float64)
for i, b_i in enumerate(np.unique(bin_inds)):
mask = bin_inds == b_i
sum_energy = p.depositions[mask].sum()
if np.count_nonzero(mask) < 2: continue
# Repeat PCA locally for better measurement of dx
dx = proj[mask].max() - proj[mask].min()
dedx[i] = sum_energy / dx
return dedx


def highland_formula(p, l, m, X_0=14, z=1):
Expand Down Expand Up @@ -249,4 +268,43 @@ def compute_mcs_muon_energy(particle, bin_size=17,
einit = i
lls = np.array(lls)
Es = np.array(Es)
return einit, min_ll
return einit, min_ll

# def load_range_reco(particle_type='muon', kinetic_energy=True):
# """
# Return a function maps the residual range of a track to the kinetic
# energy of the track. The mapping is based on the Bethe-Bloch formula
# and stored per particle type in TGraph objects. The TGraph::Eval
# function is used to perform the interpolation.

# Parameters
# ----------
# particle_type: A string with the particle name.
# kinetic_energy: If true (false), return the kinetic energy (momentum)

# Returns
# -------
# The kinetic energy or momentum according to Bethe-Bloch.
# """
# output_var = ('_RRtoT' if kinetic_energy else '_RRtodEdx')
# if particle_type in ['muon', 'pion', 'kaon', 'proton']:
# input_file = ROOT.TFile.Open('/sdf/group/neutrino/koh0207/misc/RRInput.root', 'read')
# graph = input_file.Get(f'{particle_type}{output_var}')
# return np.vectorize(graph.Eval)
# else:
# print(f'Range-based reconstruction for particle "{particle_type}" not available.')


# def make_range_based_momentum_fns():
# f_muon = load_range_reco('muon')
# f_pion = load_range_reco('pion')
# f_proton = load_range_reco('proton')
# return [f_muon, f_pion, f_proton]


# def compute_range_momentum(particle, f, voxel_to_cm=0.3, **kwargs):
# assert particle.semantic_type == 1
# length = compute_track_length(particle.points,
# bin_size=kwargs.get('bin_size', 17))
# E = f(length * voxel_to_cm)
# return E
6 changes: 3 additions & 3 deletions analysis/algorithms/point_matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def match_points_to_particles(ppn_points : np.ndarray,
for particle in particles:
dist = cdist(ppn_coords, particle.points)
matches = ppn_points_type[dist.min(axis=1) < ppn_distance_threshold]
particle.ppn_candidates = matches
particle.ppn_candidates = matches.reshape(-1, 7)

# Deprecated
def get_track_endpoints(particle : Particle, verbose=False):
Expand Down Expand Up @@ -115,8 +115,8 @@ def get_track_endpoints_max_dist(particle):
"""
coords = particle.points
dist = cdist(coords, coords)
pts = particle.points[np.where(dist == dist.max())[0]]
return pts[0], pts[1]
inds = np.unravel_index(dist.argmax(), dist.shape)
return coords[inds[0]], coords[inds[1]]


# Deprecated
Expand Down
1 change: 1 addition & 0 deletions analysis/algorithms/selections/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@
from .flash_matching import flash_matching
from .muon_decay import muon_decay
from .benchmark import benchmark
from .particles import run_inference_particles
133 changes: 133 additions & 0 deletions analysis/algorithms/selections/particles.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
from collections import OrderedDict
import os, copy, sys

# Flash Matching
sys.path.append('/sdf/group/neutrino/ldomine/OpT0Finder/python')


from analysis.decorator import evaluate
from analysis.classes.evaluator import FullChainEvaluator
from analysis.classes.TruthInteraction import TruthInteraction
from analysis.classes.Interaction import Interaction
from analysis.classes.Particle import Particle
from analysis.classes.TruthParticle import TruthParticle
from analysis.algorithms.utils import get_interaction_properties, \
get_particle_properties, \
get_mparticles_from_minteractions

from analysis.algorithms.calorimetry import get_csda_range_spline

@evaluate(['particles'], mode='per_batch')
def run_inference_particles(data_blob, res, data_idx, analysis_cfg, cfg):
"""
Analysis tools inference script for particle-level information.
"""
# List of ordered dictionaries for output logging
# Interaction and particle level information
interactions, particles = [], []

# Analysis tools configuration
deghosting = analysis_cfg['analysis']['deghosting']
primaries = analysis_cfg['analysis']['match_primaries']
enable_flash_matching = analysis_cfg['analysis'].get('enable_flash_matching', False)
ADC_to_MeV = analysis_cfg['analysis'].get('ADC_to_MeV', 1./350.)
compute_vertex = analysis_cfg['analysis']['compute_vertex']
vertex_mode = analysis_cfg['analysis']['vertex_mode']
matching_mode = analysis_cfg['analysis']['matching_mode']

# FullChainEvaluator config
processor_cfg = analysis_cfg['analysis'].get('processor_cfg', {})

# Skeleton for csv output
particle_dict = analysis_cfg['analysis'].get('particle_dict', {})

use_primaries_for_vertex = analysis_cfg['analysis'].get('use_primaries_for_vertex', True)

splines = {
'proton': get_csda_range_spline('proton'),
'muon': get_csda_range_spline('muon')
}

# Load data into evaluator
if enable_flash_matching:
predictor = FullChainEvaluator(data_blob, res, cfg, processor_cfg,
deghosting=deghosting,
enable_flash_matching=enable_flash_matching,
flash_matching_cfg="/sdf/group/neutrino/koh0207/logs/nu_selection/flash_matching/config/flashmatch.cfg",
opflash_keys=['opflash_cryoE', 'opflash_cryoW'])
else:
predictor = FullChainEvaluator(data_blob, res, cfg, processor_cfg, deghosting=deghosting)

image_idxs = data_blob['index']
spatial_size = predictor.spatial_size

# Loop over images
for idx, index in enumerate(image_idxs):
index_dict = {
'Index': index,
# 'run': data_blob['run_info'][idx][0],
# 'subrun': data_blob['run_info'][idx][1],
# 'event': data_blob['run_info'][idx][2]
}

particle_matches, particle_matches_values = predictor.match_particles(idx,
only_primaries=primaries,
mode='true_to_pred',
volume=None,
matching_mode=matching_mode,
return_counts=True
)

# 3. Process particle level information
for i, mparticles in enumerate(particle_matches):
true_p, pred_p = mparticles[0], mparticles[1]

assert (type(true_p) is TruthParticle) or true_p is None
assert (type(pred_p) is Particle) or pred_p is None

part_dict = copy.deepcopy(particle_dict)

part_dict.update(index_dict)
part_dict['particle_match_value'] = particle_matches_values[i]

pred_particle_dict = get_particle_properties(pred_p,
prefix='pred', splines=splines)
true_particle_dict = get_particle_properties(true_p,
prefix='true', splines=splines)

if true_p is not None:
pred_particle_dict['pred_particle_has_match'] = True
true_particle_dict['true_particle_interaction_id'] = true_p.interaction_id
if 'particles_asis' in data_blob:
particles_asis = data_blob['particles_asis'][idx]
if len(particles_asis) > true_p.id:
true_part = particles_asis[true_p.id]
true_particle_dict['true_particle_energy_init'] = true_part.energy_init()
true_particle_dict['true_particle_energy_deposit'] = true_part.energy_deposit()
true_particle_dict['true_particle_creation_process'] = true_part.creation_process()
# If no children other than itself: particle is stopping.
children = true_part.children_id()
children = [x for x in children if x != true_part.id()]
true_particle_dict['true_particle_children_count'] = len(children)

if pred_p is not None:
true_particle_dict['true_particle_has_match'] = True
pred_particle_dict['pred_particle_interaction_id'] = pred_p.interaction_id


for k1, v1 in true_particle_dict.items():
if k1 in part_dict:
part_dict[k1] = v1
else:
raise ValueError("{} not in pre-defined fieldnames.".format(k1))

for k2, v2 in pred_particle_dict.items():
if k2 in part_dict:
part_dict[k2] = v2
else:
raise ValueError("{} not in pre-defined fieldnames.".format(k2))


particles.append(part_dict)

return [particles]
Loading

0 comments on commit 798c969

Please sign in to comment.