Make the callout arithmetic more robust adding checks for overflow.
Without these, if the timeout value passed is "large enough", the value of the sum of it and other factors (e.g. current time as returned by sbinuptime() or 'precision' argument) might result in a negative number. This negative number is then passed to eventtimers(4), which causes et_start() routine to load et_min_period into eventtimer, making the CPU where the thread is stuck forever in timer interrupt handler routine. This is now avoided rounding to INT64_MAX the timeout period in case of overflow. Reported by: kib, pho Discussed with: kib, mav Tested by: pho (stress2 suite, kevent7.sh scenario) Approved by: re (kib)
This commit is contained in:
parent
9439877e98
commit
5c31dfc658
@ -572,6 +572,8 @@ callout_cc_add(struct callout *c, struct callout_cpu *cc,
|
||||
* Inform the eventtimers(4) subsystem there's a new callout
|
||||
* that has been inserted, but only if really required.
|
||||
*/
|
||||
if (INT64_MAX - c->c_time < c->c_precision)
|
||||
c->c_precision = INT64_MAX - c->c_time;
|
||||
sbt = c->c_time + c->c_precision;
|
||||
if (sbt < cc->cc_firstevent) {
|
||||
cc->cc_firstevent = sbt;
|
||||
@ -949,7 +951,10 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
|
||||
to_sbt += tick_sbt;
|
||||
} else
|
||||
to_sbt = sbinuptime();
|
||||
to_sbt += sbt;
|
||||
if (INT64_MAX - to_sbt < sbt)
|
||||
to_sbt = INT64_MAX;
|
||||
else
|
||||
to_sbt += sbt;
|
||||
pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
|
||||
sbt >> C_PRELGET(flags));
|
||||
if (pr > precision)
|
||||
|
Loading…
Reference in New Issue
Block a user