// file: $isip/class/algo/Reflection/refl_05.cc // version: $Id: refl_05.cc 10636 2007-01-26 22:18:09Z tm334 $ // // isip include files // #include "Reflection.h" // method: apply // // arguments: // Vector& output: (output) output data // const Vector< CircularBuffer >& input: (input) input data // // return: a bool8 value indicating status // // this method calls the appropriate computation methods // bool8 Reflection::apply(Vector& output_a, const Vector< CircularBuffer >& input_a) { // determine the number of input channels and force the output to be // that number // int32 len = input_a.length(); output_a.setLength(len); bool8 res = true; // start the debugging output // displayStart(this); // loop over the channels and call the compute method // for (int32 c = 0; c < len; c++) { // display the channel number // displayChannel(c); // branch on input coefficient type - covariance takes matrix as input // if (input_a(c)(0).getCoefType() == AlgorithmData::COVARIANCE) { // call AlgorithmData::makeVectorFloat to force the output vector for // this channel to be a VectorFloat. // call AlgorithmData::getMatrixFloat on the input for this channel // to check that the input is already a MatrixFloat and return // that matrix. // res &= compute(output_a(c).makeVectorFloat(), input_a(c)(0).getMatrixFloat(), input_a(c)(0).getCoefType(), c); } // branch on input coefficient type - others take vector as input // else { // call AlgorithmData::makeVectorFloat to force the output vector for // this channel to be a VectorFloat, call AlgorithmData::getVectorFloat // on the input for this channel to check that the input is // already a VectorFloat and return that vector. // res &= compute(output_a(c).makeVectorFloat(), input_a(c)(0).getVectorFloat(), input_a(c)(0).getCoefType(), c); } // set the coefficient type for the output // output_a(c).setCoefType(AlgorithmData::REFLECTION); } // finish the debugging output // displayFinish(this); // exit gracefully // return res; } // method: compute // // arguments: // VectorFloat& coeff: (output) output reflection coefficients // const VectorFloat& input: (input) input data // AlgorithmData::COEF_TYPE input_coef_type: (input) type of input // int32 index: (input) channel number // // return: a bool8 value indicating status // // this method computes the reflection coefficents for given input data // bool8 Reflection::compute(VectorFloat& coeff_a, const VectorFloat& input_a, AlgorithmData::COEF_TYPE input_coef_type_a, int32 index_a) { // declare local variable // Float temp_energy; // invoke the appropriate compute method and exit gracefully // return compute(coeff_a, temp_energy, input_a, input_coef_type_a, index_a); } // method: compute // // arguments: // VectorFloat& coeff: (output) predictor or reflection coefficents // const MatrixFloat& input: (input) input data (covariance matrix) // AlgorithmData::COEF_TYPE input_coef_type: (input) type of input // int32 index: (input) channel number // // return: a bool8 value indicating status // // this method computes the reflection coefficents from input covariance matrix // bool8 Reflection::compute(VectorFloat& coeff_a, const MatrixFloat& input_a, AlgorithmData::COEF_TYPE input_coef_type_a, int32 index_a) { // declare local variable // Float temp_energy; // invoke the appropriate compute method and exit gracefully // return compute(coeff_a, temp_energy, input_a, input_coef_type_a); } // method: compute // // arguments: // VectorFloat& coeff: (output) output reflection coefficients // Float& err_energy: (output) error energy // const VectorFloat& input: (input) input data // AlgorithmData::COEF_TYPE input_coef_type: (input) type of input // int32 index: (input) channel number // // return: a bool8 value indicating status // // this method computes the reflection coefficents as well as the error energy // bool8 Reflection::compute(VectorFloat& coeff_a, Float& err_energy_a, const VectorFloat& input_a, AlgorithmData::COEF_TYPE input_coef_type_a, int32 index_a) { // declare local variable // bool8 status = false; // algorithm: AUTOCORRELATION // if (algorithm_d == AUTOCORRELATION) { // implementation: DURBIN // if (implementation_d == DURBIN) { status = computeAutoDurbin(coeff_a, err_energy_a, input_a); } // implementation: LEROUX_GUEGUEN // else if (implementation_d == LEROUX_GUEGUEN) { status = computeAutoLeroux(coeff_a, err_energy_a, input_a); } // error: unknown implementation // else { return Error::handle(name(), L"compute", ERR_UNKIMP, __FILE__, __LINE__); } } // algorithm: LATTICE // else if (algorithm_d == LATTICE) { // implementation: BURG // if (implementation_d == BURG) { status = computeLatticeBurg(coeff_a, err_energy_a, input_a); } // error: unknown implementation // else { return Error::handle(name(), L"compute", ERR_UNKIMP, __FILE__, __LINE__); } } // algorithm: PREDICTION // else if (algorithm_d == PREDICTION) { // implementation: STEP_UP // if (implementation_d == STEP_UP) { status = computePredictionStepUp(coeff_a, input_a); } // error: unknown implementation // else { return Error::handle(name(), L"compute", ERR_UNKIMP, __FILE__, __LINE__); } } // algorithm: LOG_AREA_RATIO // else if (algorithm_d == LOG_AREA_RATIO) { // implementation: KELLY_LOCHBAUM // if (implementation_d == KELLY_LOCHBAUM) { status = computeLogAreaKellyLochbaum(coeff_a, input_a); } // error: unknown implementation // else { return Error::handle(name(), L"compute", ERR_UNKIMP, __FILE__, __LINE__); } } // error: unknown algorithm // else { status = Error::handle(name(), L"compute", ERR_UNKALG, __FILE__, __LINE__); } // possibly display the data // display(coeff_a, input_a, name()); // exit gracefully // return status; } // method: compute // // arguments: // VectorFloat& coeff: (output) predictor or reflection coefficents // Float& err_energy: (output) error energy // const MatrixFloat& input: (input) input data (covariance matrix) // AlgorithmData::COEF_TYPE input_coef_type: (input) type of input // int32 index: (input) channel number // // return: a bool8 value indicating status // // this method computes the reflection coefficents from input covariance matrix // as well as the error energy // bool8 Reflection::compute(VectorFloat& coeff_a, Float& err_energy_a, const MatrixFloat& input_a, AlgorithmData::COEF_TYPE input_coef_type_a, int32 index_a) { // declare local variable // bool8 status = false; // algorithm: COVARIANCE // if (algorithm_d == COVARIANCE) { // implementation: CHOLESKY // if (implementation_d == CHOLESKY) { status = computeCovarCholesky(coeff_a, err_energy_a, input_a); } // error: unknown implementation // else { return Error::handle(name(), L"compute", ERR_UNKIMP, __FILE__, __LINE__); } } // error: unknown algorithm // else { status = Error::handle(name(), L"compute", ERR_UNKALG, __FILE__, __LINE__); } // possibly display the data // display(coeff_a, input_a, name()); // exit gracefully // return status; } // method: computeAutoDurbin // // arguments: // VectorFloat& output: (output) reflection coefficients // Float& err_energy: (output) error energy // const VectorFloat& input: (input) autocorrelation coefficients // // return: a bool8 value indicating status // // this method computes the reflection coefficients from the // autocorrelation coefficents. it also outputs the error energy. // // Reference: // J.D. Markel and A.H. Gray, "Linear Prediction of Speech," // Springer-Verlag Berlin Heidelberg, New York, USA, pp. 217-219, // 1980. // bool8 Reflection::computeAutoDurbin(VectorFloat& output_a, Float& err_energy_a, const VectorFloat& input_a) { // declare local variables // VectorFloat pred_coef; // apply dynamic range threshold // float32 old_auto_0 = input_a(0); if (dyn_range_d < (float32)0) { float64 scale_factor = pow(10.0, (float32)dyn_range_d / Integral::DB_POW_SCALE_FACTOR); const_cast(input_a)(0) *= (1 + scale_factor); } else { return Error::handle(name(), L"computeAutoDurbin", ERR_DYNRANGE, __FILE__, __LINE__); } // declare local variables // int32 i, j; // loop counters int32 lp_order = input_a.length() - 1; // analysis order int32 j_bckwd; // a backwards index int32 middle_index; // inner loop limit float64 sum; // accumulator float64 rc_reg; // register // create space // if (!pred_coef.setLength(lp_order + 1)) { return Error::handle(name(), L"computeAutoDurbin", ERR, __FILE__, __LINE__, Error::WARNING); } if (!output_a.setLength(lp_order)) { return Error::handle(name(), L"computeAutoDurbin", ERR, __FILE__, __LINE__, Error::WARNING); } // initialization // pred_coef(0) = 1.0; err_energy_a = input_a(0); // if the energy of the signal is zero we set all the predictor // coefficients to zero and exit // if (err_energy_a == (float32)0) { for (int i = 0; i < output_a.length(); i++) { output_a(i) = 0; } return true; } // do the first step manually // sum = -(float64)input_a(1) / err_energy_a; output_a(0) = sum; pred_coef(1) = sum; err_energy_a *= (1 - sum * sum); // recursion // for (i = 2; i <= lp_order; i++) { // compute the next reflection coefficient // sum = 0; for (j = 1; j < i; j++) { sum += pred_coef(j) * input_a(i - j); } // check for divide by zero error // if (err_energy_a == (float32)0) { return Error::handle(name(), L"computeAutoDurbin", Error::ARG, __FILE__, __LINE__); } rc_reg = - ((float32)input_a(i) + sum) / err_energy_a; // compute the new prediction coefficients // note: the algorithm used here minimizes the storage. // // Reference: // J.D. Markel and A.H. Gray, "Linear Prediction of Speech," // Springer-Verlag Berlin Heidelberg, New York, USA, pp. 219, // 1980. // output_a(i - 1) = rc_reg; pred_coef(i) = rc_reg; j_bckwd = i - 1; middle_index = i / 2; for (j = 1; j <= middle_index; j++) { sum = pred_coef(j_bckwd) + (float32)rc_reg* pred_coef(j); pred_coef(j) = pred_coef(j) + (float32)rc_reg * pred_coef(i - j); pred_coef(j_bckwd) = sum; j_bckwd--; } // compute new error // err_energy_a *= 1.0 - rc_reg * rc_reg; } // resume the input_a(0) // const_cast(input_a)(0) = old_auto_0; // exit gracefully // return true; } // method: computeAutoLeroux // // arguments: // VectorFloat& output: (output) reflection coefficients // Float& err_energy: (output) error energy // const VectorFloat& input: (input) input data // // return: a bool8 value indicating status // // this method computes the reflection coefficients from the // autocorrelation coefficents. it also outputs the error energy. // // Reference: // J.D. Markel and A.H. Gray, "Linear Prediction of Speech," // Springer-Verlag Berlin Heidelberg, New York, USA, pp. 217-219, // 1980. // bool8 Reflection::computeAutoLeroux(VectorFloat& output_a, Float& err_energy_a, const VectorFloat& input_a) { return Error::handle(name(), L"computeAutoLeroux", ERR_UNKIMP, __FILE__, __LINE__); } // method: computeCovarCholesky // // arguments: // VectorFloat& output: (output) reflection coefficients // Float& err_energy: (output) error energy // const MatrixFloat& cor: (input) covariance coefficients // // return: a bool8 value indicating status // // this method computes the reflection coefficients using the // covariance analysis method. it also outputs the error energy. // // Reference: // J.D. Markel and A.H. Gray, "Linear Prediction of Speech," // Springer-Verlag Berlin Heidelberg, New York, USA, pp. 51-55, 1980. // bool8 Reflection::computeCovarCholesky(VectorFloat& output_a, Float& err_energy_a, const MatrixFloat& cor_a) { // declare local variable // VectorFloat pred_coef; // apply dynamic range threshold // VectorFloat temp_vector; float32 value; // add the dynamic range threshold // if (dyn_range_d < (float32)0.0) { float32 scale_factor = pow(10.0, ((float32)dyn_range_d / Integral::DB_POW_SCALE_FACTOR)); int32 num_rows = cor_a.getNumRows(); temp_vector.setLength(num_rows); for (int32 i = 0; i < num_rows; i++) { value = cor_a.getValue(i, i); temp_vector(i) = value; value *= 1 + scale_factor; const_cast(cor_a).setValue(i, i, value); } } else { return Error::handle(name(), L"computeCovarCholesky", ERR_DYNRANGE, __FILE__, __LINE__); } // compute the order // int32 refl_order = cor_a.getNumRows() - 1; // declare some scratch variables // MatrixFloat b(refl_order + (int32)1, refl_order + (int32)1); VectorFloat beta(refl_order + 1); // create space // if (!pred_coef.setLength(refl_order + 1)) { return Error::handle(name(), L"computeCovarCholesky", ERR, __FILE__, __LINE__, Error::WARNING); } if (!output_a.setLength(refl_order)) { return Error::handle(name(), L"computeCovarCholesky", ERR, __FILE__, __LINE__, Error::WARNING); } // declare local variables // int32 m = refl_order; int32 mf = refl_order; int32 minc; int32 ip; float32 gam; int32 j; int32 jp; float32 s; // Note: the equation numbers indicated here are from the COVAR // algorithm in following refence: // // J.D. Markel and A.H. Gray, "Linear Prediction of Speech," // Springer-Verlag Berlin Heidelberg, New York, USA, pp. 52, 1980. // initialization // b.setValue(1, 1, 1.0); // Eq. 3.40 err_energy_a = cor_a.getValue(1 - 1, 1 - 1); // Eq. 3.41 beta(1) = cor_a.getValue(2 - 1, 2 - 1); // Eq. 3.41 output_a(0) = -(float32)cor_a.getValue(2 - 1, 1 - 1) / cor_a.getValue(2 - 1, 2 - 1); // Eq. 3.42 pred_coef(0) = 1; // Eq. 3.43 pred_coef(1) = output_a((int32)0); // Eq. 3.43 err_energy_a = err_energy_a - output_a(0) * output_a(0) * beta(1); // Eq. 3.44 // iteration // for (minc = 2; minc <= mf; minc++) { // do 400 ... m = minc - 1; b.setValue(minc, minc, 1); // Eq. 3.51a for (ip = 1; ip <= m; ip++) { // do 200 ... if ((float32)beta(ip) < 0) { // if 600, 200, 130 return Error::handle(name(), L"computeCovarCholesky", ERR_BETA, __FILE__, __LINE__, Error::WARNING); } // Eq. 3.50 and 3.51b // else if ((float32)beta(ip) > 0) { gam = 0; for (j = 1; j <= ip; j++) { gam += cor_a.getValue(m + 2 - 1, j + 1 - 1) * b.getValue(ip, j); } gam /= (float32)beta(ip); for (jp = 1; jp <= ip; jp++) { value = b.getValue(minc, jp) - gam * b.getValue(ip, jp); b.setValue(minc, jp, value); } } } // Eq. 3.52 // beta(minc) = 0; for (j = 1; j <= minc; j++) { beta(minc) += cor_a.getValue(m + 2 - 1, j + 1 - 1) * b.getValue(minc, j); } m++; if ((float32)beta(m) < 0) { // If 600,360,260 return Error::handle(name(), L"computeCovarCholesky", ERR_BETA, __FILE__, __LINE__, Error::WARNING); } // Eq. 3.55 // else if ((float32)beta(m) > 0) { s = 0; for (ip = 1; ip <= m; ip++) { s += cor_a.getValue(m + 1 - 1, ip - 1) * pred_coef(ip - 1); } // Eq. 3.56 // output_a(m - 1) = -s / beta(m); for (ip = 2; ip <= m; ip++) { pred_coef(ip - 1) += output_a(m - 1) * b.getValue(m, ip - 1); } pred_coef(m) = output_a(m - 1); } // if beta(m) goes to zero, output zero reflection // coefficients. this is not part of Markel and Gray's algorithm. // else if ((float64)beta(m) == 0) { err_energy_a.assign(0.0); output_a.assign(0.0); break; } // Eq. 3.57 // err_energy_a -= output_a(m - 1) * output_a(m - 1) * beta(m); if (err_energy_a <= (float32)0) { err_energy_a = 0; return Error::handle(name(), L"computeCovarCholesky", ERR_ENERGY, __FILE__, __LINE__, Error::WARNING); } } // resume the values before applying the dynamic threshold // if (dyn_range_d < (float32)0.0) { int32 num_rows = temp_vector.length(); for (int32 i = 0; i < num_rows; i++) { const_cast(cor_a).setValue(i, i, temp_vector(i)); } } // exit gracefully // return true; } // method: computeLatticeBurg // // arguments: // VectorFloat& output: (output) reflection coefficients // Float& err_energy: (output) error energy // const VectorFloat& input: (input) input data // // return: a bool8 value indicating status // // this method computes the reflection coefficients using the burg // analysis method. it also outputs the error energy. // bool8 Reflection::computeLatticeBurg(VectorFloat& output_a, Float& err_energy_a, const VectorFloat& input_a) { // declare local variable // VectorFloat pred_coef; // resize output vectors // int32 pc_order = (int32)order_d + 1; if (!pred_coef.setLength(pc_order)) { return Error::handle(name(), L"computeLatticeBurg", ERR, __FILE__, __LINE__, Error::WARNING); } // declare local variables: // Float Registers/Accumulators // float32 sum, pe, akk, ai, aj; float32 sn, sd; akk = 0; // Indices In Autocorrelation // int32 i, k; int32 n = input_a.length(); // find the signal power // sum = input_a.sumSquare(); // if the signal power is zero, output zero reflection coefficients // if (sum == 0) { output_a.assign(order_d, 0.0); return true; } // check for divide by zero error // if (n == 0) { return Error::handle(name(), L"computeLatticeBurg", Error::ARG, __FILE__, __LINE__); } // compute error energy // err_energy_a = sum / n; // declare some internal scratch space: // note that this data is declared static so that it can be shared across // all objects. // float32 *tmp_r; float32 *tmp_rc; float32 *tmp_a; float32 *tmp_pef; float32 *tmp_per; // create new space // tmp_r = new float32[pc_order]; tmp_rc = new float32[pc_order]; tmp_a = new float32[pc_order]; tmp_pef = new float32[input_a.length() + 1]; tmp_per = new float32[input_a.length() + 1]; // initialize // float32 scale_factor = 1.0 + pow(10.0, ((float32)dyn_range_d / Integral::DB_POW_SCALE_FACTOR)); pe = sum * scale_factor; tmp_r[0] = sum * scale_factor; tmp_a[0] = 1.0; // main loop // for (k = 1; k <= order_d; k++) { // compute initial prediction errors // if (k == 1) { for (i = 2; i <= n; i++) { tmp_pef[i] = (float32)input_a(i - 1); tmp_per[i] = (float32)input_a(i - 2); } } // update prediction errors // else { for (i = n; i >= k + 1; i--) { tmp_pef[i] = tmp_pef[i] + akk * tmp_per[i]; tmp_per[i] = tmp_per[i - 1] + akk * tmp_pef[i - 1]; } } // compute new reflection coefficient // sn = 0; sd = 0; for (i = k + 1; i <= n; i++) { sn = sn - tmp_pef[i] * tmp_per[i]; sd = sd + tmp_pef[i]*tmp_pef[i] + tmp_per[i]*tmp_per[i]; } // check for divide by zero errors // if (sd == 0) { return Error::handle(name(), L"computeLatticeBurg", Error::ARG, __FILE__, __LINE__); } akk = 2.0 * sn / sd; tmp_rc[k] = akk; // convert rc[] to a[] using the levinson recursion // tmp_a[k] = akk; for (i = 1; i <= k / 2; i++) { ai = tmp_a[i]; aj = tmp_a[k - i]; tmp_a[i] = ai + akk * aj; tmp_a[k - i] = aj + akk * ai; } // compute new autocorrelation estimate // sum = 0; for (i = 1; i <= k; i++) { sum = sum + tmp_a[i] * tmp_r[k - i]; } tmp_r[k] = sum; // update prediction-error power // pe *= 1 - akk*akk; if (pe < 0) { break; } } // copy predictor coefficients // if (!pred_coef.assign(pc_order, tmp_a)) { return Error::handle(name(), L"computeLatticeBurg", ERR, __FILE__, __LINE__, Error::WARNING); } // copy reflection coefficients // if (!output_a.assign(order_d, &tmp_rc[1])) { return Error::handle(name(), L"computeLatticeBurg", ERR, __FILE__, __LINE__, Error::WARNING); } // compute the filter gain // err_energy_a = 1.0; for (i = 1; i <= order_d; i++) { err_energy_a *= 1 - tmp_rc[i] * tmp_rc[i]; } err_energy_a = sqrt(err_energy_a); // clean up old space // delete [] tmp_r; delete [] tmp_rc; delete [] tmp_a; delete [] tmp_pef; delete [] tmp_per; // exit gracefully // if (pe >= (float32)0) { return true; } else { return Error::handle(name(), L"computeLatticeBurg", ERR_PREDERR, __FILE__, __LINE__, Error::WARNING); } } // method: computePredictionStepUp // // arguments: // VectorFloat& refl_coef: (output) reflection coefficients // const VectorFloat& pred_coef: (input) prediction coefficients // // return: a bool8 value indicating status // // this method computes the reflection coefficients from the // prediction coefficents // // Reference: // J.D. Markel and A.H. Gray, "Linear Prediction of Speech," // Springer-Verlag, New York, pp. 95 - 97 1980. // bool8 Reflection::computePredictionStepUp(VectorFloat& refl_coef_a, const VectorFloat& pred_coef_a) { // declare local variables // int mp1, mrp1, mr, mm; float32 alpha, d; VectorFloat b; // set the filter order // int32 m = pred_coef_a.length() - 1; // make a copy of the predictor coefficients // VectorFloat temp_pred(pred_coef_a); // set the lengths of the vectors // b.setLength(m); refl_coef_a.setLength(m); mp1 = m + 1; alpha = 1.0; // compute the inverse of the step-up procedure, that is, given the synthesis // filter 1 / A(z), it is possible to determine the corresponding acoustic // tube reflection coeficients // for (int j = 0; j < m; j++) { mr = m + 1 - (j + 1); mrp1 = mr + 1; d = 1.0 - temp_pred(mrp1 - 1) * temp_pred(mrp1 - 1); // check for divide by zero error // if (d == 0) { return Error::handle(name(), L"computePredictionStepUp", Error::ARG, __FILE__, __LINE__); } alpha = alpha / d; for (int k = 0; k < mr; k++) { mm = mr + 2 - (k + 1); b(k) = temp_pred(mm - 1); } for (int k = 0; k < mr; k++) { // check for divide by zero error // if (d == 0) { return Error::handle(name(), L"computePredictionStepUp", Error::ARG, __FILE__, __LINE__); } temp_pred(k) = (temp_pred(k) - temp_pred(mrp1 - 1) * b(k)) / d; } refl_coef_a(mr - 1) = temp_pred(mrp1 - 1); // make sure that the reflection coefficients are not greater that one // if (abs((int32)refl_coef_a(mr - 1)) >= 1) { Error::handle(name(), L"The synthesis filter 1 / A(z) is unstable", \ Error::IO, __FILE__, __LINE__, Error::WARNING); } } // exit gracefully // return true; } // method: computeLogAreaKellyLochbaum // // arguments: // VectorFloat& output: (output) prediction coefficients // const VectorFloat& input: (input) log erea ratio coefficients // // return: a bool8 value indicating status // // this method computes the reflection coefficients from the // log-area-ratio, using LOG_AREA_RATIO algorithm and KELLY_LOCHBAUM // implementation // bool8 Reflection::computeLogAreaKellyLochbaum(VectorFloat& output_a, const VectorFloat& input_a) { // declare local variables // float64 g; int32 order = input_a.length(); output_a.setLength(order); // 1 - log_area_ratio(i) // reflection coefficient(i) = --------------------- // 1 + log_area_ratio(i) // for (int32 i = 0; i < order; i++) { g = exp(input_a(i)); // check for divide by zero error // if ((1 + g) == 0) { return Error::handle(name(), L"computeLogAreaKellyLochbaum", Error::ARG, __FILE__, __LINE__); } output_a(i) = (1 - g) / (1 + g); } // exit gracefully // return true; }