context: avoid some scaling overflows

Make a macro to scale without overflows and use this in the context.
This commit is contained in:
Wim Taymans 2025-01-24 16:21:59 +01:00
parent eb462302b7
commit fa15af376f
2 changed files with 12 additions and 5 deletions

View file

@ -292,6 +292,13 @@ struct spa_fraction {
#define SPA_ROUND_DOWN_N(num,align) ((num) & ~SPA_ROUND_MASK(num, align)) #define SPA_ROUND_DOWN_N(num,align) ((num) & ~SPA_ROUND_MASK(num, align))
#define SPA_ROUND_UP_N(num,align) ((((num)-1) | SPA_ROUND_MASK(num, align))+1) #define SPA_ROUND_UP_N(num,align) ((((num)-1) | SPA_ROUND_MASK(num, align))+1)
#define SPA_SCALE32(val,num,denom) \
({ \
uint64_t _val = (val); \
uint64_t _denom = (denom); \
(uint32_t)(((_val) * (num)) / (_denom)); \
})
#define SPA_SCALE32_UP(val,num,denom) \ #define SPA_SCALE32_UP(val,num,denom) \
({ \ ({ \
uint64_t _val = (val); \ uint64_t _val = (val); \

View file

@ -1799,15 +1799,15 @@ again:
if (node_rate_quantum != 0 && current_rate != node_rate_quantum) { if (node_rate_quantum != 0 && current_rate != node_rate_quantum) {
/* the quantum values are scaled with the current rate */ /* the quantum values are scaled with the current rate */
node_def_quantum = node_def_quantum * current_rate / node_rate_quantum; node_def_quantum = SPA_SCALE32(node_def_quantum, current_rate, node_rate_quantum);
node_min_quantum = node_min_quantum * current_rate / node_rate_quantum; node_min_quantum = SPA_SCALE32(node_min_quantum, current_rate, node_rate_quantum);
node_max_quantum = node_max_quantum * current_rate / node_rate_quantum; node_max_quantum = SPA_SCALE32(node_max_quantum, current_rate, node_rate_quantum);
} }
/* calculate desired quantum. Don't limit to the max_latency when we are /* calculate desired quantum. Don't limit to the max_latency when we are
* going to force a quantum or rate and reconfigure the nodes. */ * going to force a quantum or rate and reconfigure the nodes. */
if (max_latency.denom != 0 && !force_quantum && !force_rate) { if (max_latency.denom != 0 && !force_quantum && !force_rate) {
uint32_t tmp = ((uint64_t)max_latency.num * current_rate / max_latency.denom); uint32_t tmp = SPA_SCALE32(max_latency.num, current_rate, max_latency.denom);
if (tmp < node_max_quantum) if (tmp < node_max_quantum)
node_max_quantum = tmp; node_max_quantum = tmp;
} }
@ -1824,7 +1824,7 @@ again:
else { else {
target_quantum = node_def_quantum; target_quantum = node_def_quantum;
if (latency.denom != 0) if (latency.denom != 0)
target_quantum = (latency.num * current_rate / latency.denom); target_quantum = SPA_SCALE32(latency.num, current_rate, latency.denom);
target_quantum = SPA_CLAMP(target_quantum, node_min_quantum, node_max_quantum); target_quantum = SPA_CLAMP(target_quantum, node_min_quantum, node_max_quantum);
target_quantum = SPA_CLAMP(target_quantum, floor_quantum, ceil_quantum); target_quantum = SPA_CLAMP(target_quantum, floor_quantum, ceil_quantum);