From f85dfa2c402cc42e3ecf1a960d84a9ceeac908c7 Mon Sep 17 00:00:00 2001 From: Christian Lorentzen Date: Thu, 20 Jan 2022 01:27:06 +0100 Subject: [PATCH] reduce duplicate computation in poisson, gamma, and tweedie objectives (#4950) * ENH save computations of exp in objectives * CLN missing declaration --- src/objective/regression_objective.hpp | 37 ++++++++++++++++---------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/src/objective/regression_objective.hpp b/src/objective/regression_objective.hpp index e711da012066..3bdeaa1de00f 100644 --- a/src/objective/regression_objective.hpp +++ b/src/objective/regression_objective.hpp @@ -439,17 +439,20 @@ class RegressionPoissonLoss: public RegressionL2loss { */ void GetGradients(const double* score, score_t* gradients, score_t* hessians) const override { + double exp_max_delta_step_ = std::exp(max_delta_step_); if (weights_ == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_; ++i) { - gradients[i] = static_cast(std::exp(score[i]) - label_[i]); - hessians[i] = static_cast(std::exp(score[i] + max_delta_step_)); + double exp_score = std::exp(score[i]); + gradients[i] = static_cast(exp_score - label_[i]); + hessians[i] = static_cast(exp_score * exp_max_delta_step_); } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_; ++i) { - gradients[i] = static_cast((std::exp(score[i]) - label_[i]) * weights_[i]); - hessians[i] = static_cast(std::exp(score[i] + max_delta_step_) * weights_[i]); + double exp_score = std::exp(score[i]); + gradients[i] = static_cast((exp_score - label_[i]) * weights_[i]); + hessians[i] = static_cast(exp_score * exp_max_delta_step_ * weights_[i]); } } } @@ -689,14 +692,16 @@ class RegressionGammaLoss : public RegressionPoissonLoss { if (weights_ == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_; ++i) { - gradients[i] = static_cast(1.0 - label_[i] * std::exp(-score[i])); - hessians[i] = static_cast(label_[i] * std::exp(-score[i])); + double exp_score = std::exp(-score[i]); + gradients[i] = static_cast(1.0 - label_[i] * exp_score); + hessians[i] = static_cast(label_[i] * exp_score); } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_; ++i) { - gradients[i] = static_cast((1.0 - label_[i] * std::exp(-score[i])) * weights_[i]); - hessians[i] = static_cast(label_[i] * std::exp(-score[i]) * weights_[i]); + double exp_score = std::exp(-score[i]); + gradients[i] = static_cast((1.0 - label_[i] * exp_score) * weights_[i]); + hessians[i] = static_cast(label_[i] * exp_score * weights_[i]); } } } @@ -725,16 +730,20 @@ class RegressionTweedieLoss: public RegressionPoissonLoss { if (weights_ == nullptr) { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_; ++i) { - gradients[i] = static_cast(-label_[i] * std::exp((1 - rho_) * score[i]) + std::exp((2 - rho_) * score[i])); - hessians[i] = static_cast(-label_[i] * (1 - rho_) * std::exp((1 - rho_) * score[i]) + - (2 - rho_) * std::exp((2 - rho_) * score[i])); + double exp_1_score = std::exp((1 - rho_) * score[i]); + double exp_2_score = std::exp((2 - rho_) * score[i]); + gradients[i] = static_cast(-label_[i] * exp_1_score + exp_2_score); + hessians[i] = static_cast(-label_[i] * (1 - rho_) * exp_1_score + + (2 - rho_) * exp_2_score); } } else { #pragma omp parallel for schedule(static) for (data_size_t i = 0; i < num_data_; ++i) { - gradients[i] = static_cast((-label_[i] * std::exp((1 - rho_) * score[i]) + std::exp((2 - rho_) * score[i])) * weights_[i]); - hessians[i] = static_cast((-label_[i] * (1 - rho_) * std::exp((1 - rho_) * score[i]) + - (2 - rho_) * std::exp((2 - rho_) * score[i])) * weights_[i]); + double exp_1_score = std::exp((1 - rho_) * score[i]); + double exp_2_score = std::exp((2 - rho_) * score[i]); + gradients[i] = static_cast((-label_[i] * exp_1_score + exp_2_score) * weights_[i]); + hessians[i] = static_cast((-label_[i] * (1 - rho_) * exp_1_score + + (2 - rho_) * exp_2_score) * weights_[i]); } } }