#include <CalibrationClasses.h>
◆ DetectorCalib()
DetectorCalib::DetectorCalib |
( |
| ) |
|
◆ ~DetectorCalib()
DetectorCalib::~DetectorCalib |
( |
| ) |
|
|
virtual |
◆ applyCalib()
virtual double DetectorCalib::applyCalib |
( |
double |
a_value | ) |
const |
|
pure virtual |
◆ clone()
◆ getCalibType()
returns what type of implimentation has occured for runtime polymorphism
◆ invertCalib()
double DetectorCalib::invertCalib |
( |
double |
a_value | ) |
const |
|
virtual |
allows for the inverse mapping of a given calibration if needed for applying cuts. implementation isn't enforced by default so the base class implimentation will return -10000;
Reimplemented in LinearCalib.
◆ print()
std::ostream & DetectorCalib::print |
( |
std::ostream & |
a_stream = std::cout | ) |
const |
|
virtual |
◆ setTDiv()
void DetectorCalib::setTDiv |
( |
int |
a_timeDiv | ) |
|
sets the divisor for which to consider the given time
◆ setTime()
void DetectorCalib::setTime |
( |
uint32_t |
a_t | ) |
|
sets the experiment time during the processing in time since epoch
◆ m_t
uint32_t DetectorCalib::m_t |
|
protected |
◆ m_tDiv
int DetectorCalib::m_tDiv |
|
protected |
◆ m_type
The documentation for this class was generated from the following files: