FastLED 3.9.7
Loading...
Searching...
No Matches
math8.h
Go to the documentation of this file.
1#pragma once
2
3#include "scale8.h"
5#include "intmap.h"
6#include "fl/namespace.h"
7
9
13
16
26
31LIB8STATIC_ALWAYS_INLINE uint8_t qadd8(uint8_t i, uint8_t j) {
32#if QADD8_C == 1
33 unsigned int t = i + j;
34 if (t > 255)
35 t = 255;
36 return t;
37#elif QADD8_AVRASM == 1
38 asm volatile(
39 /* First, add j to i, conditioning the C flag */
40 "add %0, %1 \n\t"
41
42 /* Now test the C flag.
43 If C is clear, we branch around a load of 0xFF into i.
44 If C is set, we go ahead and load 0xFF into i.
45 */
46 "brcc L_%= \n\t"
47 "ldi %0, 0xFF \n\t"
48 "L_%=: "
49 : "+d"(i) // r16-r31, restricted by ldi
50 : "r"(j));
51 return i;
52#elif QADD8_ARM_DSP_ASM == 1
53 asm volatile("uqadd8 %0, %0, %1" : "+r"(i) : "r"(j));
54 return i;
55#else
56#error "No implementation for qadd8 available."
57#endif
58}
59
64LIB8STATIC_ALWAYS_INLINE int8_t qadd7(int8_t i, int8_t j) {
65#if QADD7_C == 1
66 int16_t t = i + j;
67 if (t > 127)
68 t = 127;
69 else if (t < -128)
70 t = -128;
71 return t;
72#elif QADD7_AVRASM == 1
73 asm volatile(
74 /* First, add j to i, conditioning the V and C flags */
75 "add %0, %1 \n\t"
76
77 /* Now test the V flag.
78 If V is clear, we branch to end.
79 If V is set, we go ahead and load 0x7F into i.
80 */
81 "brvc L_%= \n\t"
82 "ldi %0, 0x7F \n\t"
83
84 /* When both numbers are negative, C is set.
85 Adding it to make result negative. */
86 "adc %0, __zero_reg__\n\t"
87 "L_%=: "
88 : "+d"(i) // r16-r31, restricted by ldi
89 : "r"(j));
90 return i;
91#elif QADD7_ARM_DSP_ASM == 1
92 asm volatile("qadd8 %0, %0, %1" : "+r"(i) : "r"(j));
93 return i;
94#else
95#error "No implementation for qadd7 available."
96#endif
97}
98
103LIB8STATIC_ALWAYS_INLINE uint8_t qsub8(uint8_t i, uint8_t j) {
104#if QSUB8_C == 1
105 int t = i - j;
106 if (t < 0)
107 t = 0;
108 return t;
109#elif QSUB8_AVRASM == 1
110
111 asm volatile(
112 /* First, subtract j from i, conditioning the C flag */
113 "sub %0, %1 \n\t"
114
115 /* Now test the C flag.
116 If C is clear, we branch around a load of 0x00 into i.
117 If C is set, we go ahead and load 0x00 into i.
118 */
119 "brcc L_%= \n\t"
120 "ldi %0, 0x00 \n\t"
121 "L_%=: "
122 : "+d"(i) // r16-r31, restricted by ldi
123 : "r"(j));
124 return i;
125#else
126#error "No implementation for qsub8 available."
127#endif
128}
129
135LIB8STATIC_ALWAYS_INLINE uint8_t add8(uint8_t i, uint8_t j) {
136#if ADD8_C == 1
137 int t = i + j;
138 return t;
139#elif ADD8_AVRASM == 1
140 // Add j to i, period.
141 asm volatile("add %0, %1" : "+r"(i) : "r"(j));
142 return i;
143#else
144#error "No implementation for add8 available."
145#endif
146}
147
153LIB8STATIC_ALWAYS_INLINE uint16_t add8to16(uint8_t i, uint16_t j) {
154#if ADD8_C == 1
155 uint16_t t = i + j;
156 return t;
157#elif ADD8_AVRASM == 1
158 // Add i(one byte) to j(two bytes)
159 asm volatile("add %A[j], %[i] \n\t"
160 "adc %B[j], __zero_reg__ \n\t"
161 : [j] "+r"(j)
162 : [i] "r"(i));
163 return i;
164#else
165#error "No implementation for add8to16 available."
166#endif
167}
168
174LIB8STATIC_ALWAYS_INLINE uint8_t sub8(uint8_t i, uint8_t j) {
175#if SUB8_C == 1
176 int t = i - j;
177 return t;
178#elif SUB8_AVRASM == 1
179 // Subtract j from i, period.
180 asm volatile("sub %0, %1" : "+r"(i) : "r"(j));
181 return i;
182#else
183#error "No implementation for sub8 available."
184#endif
185}
186
193LIB8STATIC_ALWAYS_INLINE uint8_t avg8(uint8_t i, uint8_t j) {
194#if AVG8_C == 1
195 return (i + j) >> 1;
196#elif AVG8_AVRASM == 1
197 asm volatile(
198 /* First, add j to i, 9th bit overflows into C flag */
199 "add %0, %1 \n\t"
200 /* Divide by two, moving C flag into high 8th bit */
201 "ror %0 \n\t"
202 : "+r"(i)
203 : "r"(j));
204 return i;
205#else
206#error "No implementation for avg8 available."
207#endif
208}
209
216LIB8STATIC_ALWAYS_INLINE uint16_t avg16(uint16_t i, uint16_t j) {
217#if AVG16_C == 1
218 return (uint32_t)((uint32_t)(i) + (uint32_t)(j)) >> 1;
219#elif AVG16_AVRASM == 1
220 asm volatile(
221 /* First, add jLo (heh) to iLo, 9th bit overflows into C flag */
222 "add %A[i], %A[j] \n\t"
223 /* Now, add C + jHi to iHi, 17th bit overflows into C flag */
224 "adc %B[i], %B[j] \n\t"
225 /* Divide iHi by two, moving C flag into high 16th bit, old 9th bit now
226 in
227 C */
228 "ror %B[i] \n\t"
229 /* Divide iLo by two, moving C flag into high 8th bit */
230 "ror %A[i] \n\t"
231 : [i] "+r"(i)
232 : [j] "r"(j));
233 return i;
234#else
235#error "No implementation for avg16 available."
236#endif
237}
238
245LIB8STATIC_ALWAYS_INLINE uint8_t avg8r(uint8_t i, uint8_t j) {
246#if AVG8R_C == 1
247 return (i + j + 1) >> 1;
248#elif AVG8R_AVRASM == 1
249 asm volatile(
250 /* First, add j to i, 9th bit overflows into C flag */
251 "add %0, %1 \n\t"
252 /* Divide by two, moving C flag into high 8th bit, old 1st bit now in C
253 */
254 "ror %0 \n\t"
255 /* Add C flag */
256 "adc %0, __zero_reg__\n\t"
257 : "+r"(i)
258 : "r"(j));
259 return i;
260#else
261#error "No implementation for avg8r available."
262#endif
263}
264
271LIB8STATIC_ALWAYS_INLINE uint16_t avg16r(uint16_t i, uint16_t j) {
272#if AVG16R_C == 1
273 return (uint32_t)((uint32_t)(i) + (uint32_t)(j) + 1) >> 1;
274#elif AVG16R_AVRASM == 1
275 asm volatile(
276 /* First, add jLo (heh) to iLo, 9th bit overflows into C flag */
277 "add %A[i], %A[j] \n\t"
278 /* Now, add C + jHi to iHi, 17th bit overflows into C flag */
279 "adc %B[i], %B[j] \n\t"
280 /* Divide iHi by two, moving C flag into high 16th bit, old 9th bit now
281 in
282 C */
283 "ror %B[i] \n\t"
284 /* Divide iLo by two, moving C flag into high 8th bit, old 1st bit now
285 in
286 C */
287 "ror %A[i] \n\t"
288 /* Add C flag */
289 "adc %A[i], __zero_reg__\n\t"
290 "adc %B[i], __zero_reg__\n\t"
291 : [i] "+r"(i)
292 : [j] "r"(j));
293 return i;
294#else
295#error "No implementation for avg16r available."
296#endif
297}
298
306LIB8STATIC_ALWAYS_INLINE int8_t avg7(int8_t i, int8_t j) {
307#if AVG7_C == 1
308 return (i >> 1) + (j >> 1) + (i & 0x1);
309#elif AVG7_AVRASM == 1
310 asm volatile("asr %1 \n\t"
311 "asr %0 \n\t"
312 "adc %0, %1 \n\t"
313 : "+r"(i)
314 : "r"(j));
315 return i;
316#else
317#error "No implementation for avg7 available."
318#endif
319}
320
328LIB8STATIC_ALWAYS_INLINE int16_t avg15(int16_t i, int16_t j) {
329#if AVG15_C == 1
330 return (i >> 1) + (j >> 1) + (i & 0x1);
331#elif AVG15_AVRASM == 1
332 asm volatile(
333 /* first divide j by 2, throwing away lowest bit */
334 "asr %B[j] \n\t"
335 "ror %A[j] \n\t"
336 /* now divide i by 2, with lowest bit going into C */
337 "asr %B[i] \n\t"
338 "ror %A[i] \n\t"
339 /* add j + C to i */
340 "adc %A[i], %A[j] \n\t"
341 "adc %B[i], %B[j] \n\t"
342 : [i] "+r"(i)
343 : [j] "r"(j));
344 return i;
345#else
346#error "No implementation for avg15 available."
347#endif
348}
349
361LIB8STATIC_ALWAYS_INLINE uint8_t mod8(uint8_t a, uint8_t m) {
362#if defined(__AVR__)
363 asm volatile("L_%=: sub %[a],%[m] \n\t"
364 " brcc L_%= \n\t"
365 " add %[a],%[m] \n\t"
366 : [a] "+r"(a)
367 : [m] "r"(m));
368#else
369 while (a >= m)
370 a -= m;
371#endif
372 return a;
373}
374
392LIB8STATIC uint8_t addmod8(uint8_t a, uint8_t b, uint8_t m) {
393#if defined(__AVR__)
394 asm volatile(" add %[a],%[b] \n\t"
395 "L_%=: sub %[a],%[m] \n\t"
396 " brcc L_%= \n\t"
397 " add %[a],%[m] \n\t"
398 : [a] "+r"(a)
399 : [b] "r"(b), [m] "r"(m));
400#else
401 a += b;
402 while (a >= m)
403 a -= m;
404#endif
405 return a;
406}
407
425LIB8STATIC uint8_t submod8(uint8_t a, uint8_t b, uint8_t m) {
426#if defined(__AVR__)
427 asm volatile(" sub %[a],%[b] \n\t"
428 "L_%=: sub %[a],%[m] \n\t"
429 " brcc L_%= \n\t"
430 " add %[a],%[m] \n\t"
431 : [a] "+r"(a)
432 : [b] "r"(b), [m] "r"(m));
433#else
434 a -= b;
435 while (a >= m)
436 a -= m;
437#endif
438 return a;
439}
440
446LIB8STATIC_ALWAYS_INLINE uint8_t mul8(uint8_t i, uint8_t j) {
447#if MUL8_C == 1
448 return ((int)i * (int)(j)) & 0xFF;
449#elif MUL8_AVRASM == 1
450 asm volatile(
451 /* Multiply 8-bit i * 8-bit j, giving 16-bit r1,r0 */
452 "mul %0, %1 \n\t"
453 /* Extract the LOW 8-bits (r0) */
454 "mov %0, r0 \n\t"
455 /* Restore r1 to "0"; it's expected to always be that */
456 "clr __zero_reg__ \n\t"
457 : "+r"(i)
458 : "r"(j)
459 : "r0", "r1");
460 return i;
461#else
462#error "No implementation for mul8 available."
463#endif
464}
465
470LIB8STATIC_ALWAYS_INLINE uint8_t qmul8(uint8_t i, uint8_t j) {
471#if QMUL8_C == 1
472 unsigned p = (unsigned)i * (unsigned)j;
473 if (p > 255)
474 p = 255;
475 return p;
476#elif QMUL8_AVRASM == 1
477 asm volatile(
478 /* Multiply 8-bit i * 8-bit j, giving 16-bit r1,r0 */
479 " mul %0, %1 \n\t"
480 /* Extract the LOW 8-bits (r0) */
481 " mov %0, r0 \n\t"
482 /* If high byte of result is zero, all is well. */
483 " tst r1 \n\t"
484 " breq Lnospill_%= \n\t"
485 /* If high byte of result > 0, saturate to 0xFF */
486 " ldi %0, 0xFF \n\t"
487 "Lnospill_%=: \n\t"
488 /* Restore r1 to "0"; it's expected to always be that */
489 " clr __zero_reg__ \n\t"
490 : "+d"(i) // r16-r31, restricted by ldi
491 : "r"(j)
492 : "r0", "r1");
493 return i;
494#else
495#error "No implementation for qmul8 available."
496#endif
497}
498
501#if ABS8_C == 1
502 if (i < 0)
503 i = -i;
504 return i;
505#elif ABS8_AVRASM == 1
506 asm volatile(
507 /* First, check the high bit, and prepare to skip if it's clear */
508 "sbrc %0, 7 \n"
509
510 /* Negate the value */
511 "neg %0 \n"
512
513 : "+r"(i)
514 : "r"(i));
515 return i;
516#else
517#error "No implementation for abs8 available."
518#endif
519}
520
524LIB8STATIC uint8_t sqrt16(uint16_t x) {
525 if (x <= 1) {
526 return x;
527 }
528
529 uint8_t low = 1; // lower bound
530 uint8_t hi, mid;
531
532 if (x > 7904) {
533 hi = 255;
534 } else {
535 hi = (x >> 5) + 8; // initial estimate for upper bound
536 }
537
538 do {
539 mid = (low + hi) >> 1;
540 if ((uint16_t)(mid * mid) > x) {
541 hi = mid - 1;
542 } else {
543 if (mid == 255) {
544 return 255;
545 }
546 low = mid + 1;
547 }
548 } while (hi >= low);
549
550 return low - 1;
551}
552
553LIB8STATIC_ALWAYS_INLINE uint8_t sqrt8(uint8_t x) {
554 return sqrt16(map8_to_16(x));
555}
556
562#if (FASTLED_BLEND_FIXED == 1)
563LIB8STATIC uint8_t blend8(uint8_t a, uint8_t b, uint8_t amountOfB) {
564
565 // The BLEND_FIXED formula is
566 //
567 // result = ( A*(amountOfA) + B*(amountOfB) )/ 256
568 //
569 // …where amountOfA = 255-amountOfB.
570 //
571 // This formula will never return 255, which is why the BLEND_FIXED +
572 // SCALE8_FIXED version is
573 //
574 // result = ( A*(amountOfA) + A + B*(amountOfB) + B ) / 256
575 //
576 // We can rearrange this formula for some great optimisations.
577 //
578 // result = ( A*(amountOfA) + A + B*(amountOfB) + B ) / 256
579 // = ( A*(255-amountOfB) + A + B*(amountOfB) + B ) / 256
580 // = ( A*(256-amountOfB) + B*(amountOfB) + B ) / 256
581 // = ( A*256 + B + B*(amountOfB) - A*(amountOfB) ) / 256 // this
582 // is the version used in SCALE8_FIXED AVR below = ( A*256 + B +
583 // (B-A)*(amountOfB) ) / 256 // this is the version
584 // used in SCALE8_FIXED C below
585
586 uint16_t partial;
587 uint8_t result;
588
589#if BLEND8_C == 1
590
591#if (FASTLED_SCALE8_FIXED == 1)
592 partial = (a << 8) | b; // A*256 + B
593
594 // on many platforms this compiles to a single multiply of (B-A) * amountOfB
595 partial += (b * amountOfB);
596 partial -= (a * amountOfB);
597
598#else
599 uint8_t amountOfA = 255 - amountOfB;
600
601 // on the other hand, this compiles to two multiplies, and gives the "wrong"
602 // answer :]
603 partial = (a * amountOfA);
604 partial += (b * amountOfB);
605#endif
606
607 result = partial >> 8;
608
609 return result;
610
611#elif BLEND8_AVRASM == 1
612
613#if (FASTLED_SCALE8_FIXED == 1)
614
615 // 1 or 2 cycles depending on how the compiler optimises
616 partial = (a << 8) | b;
617
618 // 7 cycles
619 asm volatile(" mul %[a], %[amountOfB] \n\t"
620 " sub %A[partial], r0 \n\t"
621 " sbc %B[partial], r1 \n\t"
622 " mul %[b], %[amountOfB] \n\t"
623 " add %A[partial], r0 \n\t"
624 " adc %B[partial], r1 \n\t"
625 " clr __zero_reg__ \n\t"
626 : [partial] "+r"(partial)
627 : [amountOfB] "r"(amountOfB), [a] "r"(a), [b] "r"(b)
628 : "r0", "r1");
629
630#else
631
632 // non-SCALE8-fixed version
633
634 // 7 cycles
635 asm volatile(
636 /* partial = b * amountOfB */
637 " mul %[b], %[amountOfB] \n\t"
638 " movw %A[partial], r0 \n\t"
639
640 /* amountOfB (aka amountOfA) = 255 - amountOfB */
641 " com %[amountOfB] \n\t"
642
643 /* partial += a * amountOfB (aka amountOfA) */
644 " mul %[a], %[amountOfB] \n\t"
645
646 " add %A[partial], r0 \n\t"
647 " adc %B[partial], r1 \n\t"
648
649 " clr __zero_reg__ \n\t"
650
651 : [partial] "=r"(partial), [amountOfB] "+r"(amountOfB)
652 : [a] "r"(a), [b] "r"(b)
653 : "r0", "r1");
654
655#endif
656
657 result = partial >> 8;
658
659 return result;
660
661#else
662#error "No implementation for blend8 available."
663#endif
664}
665
666#else
667LIB8STATIC uint8_t blend8(uint8_t a, uint8_t b, uint8_t amountOfB) {
668 // This version loses precision in the integer math
669 // and can actually return results outside of the range
670 // from a to b. Its use is not recommended.
671 uint8_t result;
672 uint8_t amountOfA = 255 - amountOfB;
673 result = scale8_LEAVING_R1_DIRTY(a, amountOfA) +
674 scale8_LEAVING_R1_DIRTY(b, amountOfB);
675 cleanup_R1();
676 return result;
677}
678#endif
679
682
LIB8STATIC_ALWAYS_INLINE uint8_t qadd8(uint8_t i, uint8_t j)
Add one byte to another, saturating at 0xFF.
Definition math8.h:31
LIB8STATIC_ALWAYS_INLINE int8_t abs8(int8_t i)
Take the absolute value of a signed 8-bit uint8_t.
Definition math8.h:500
LIB8STATIC_ALWAYS_INLINE uint8_t qmul8(uint8_t i, uint8_t j)
8x8 bit multiplication with 8-bit result, saturating at 0xFF.
Definition math8.h:470
LIB8STATIC_ALWAYS_INLINE uint16_t avg16(uint16_t i, uint16_t j)
Calculate an integer average of two unsigned 16-bit integer values (uint16_t), rounded down.
Definition math8.h:216
LIB8STATIC_ALWAYS_INLINE int16_t avg15(int16_t i, int16_t j)
Calculate an integer average of two signed 15-bit integers (int16_t).
Definition math8.h:328
LIB8STATIC uint8_t addmod8(uint8_t a, uint8_t b, uint8_t m)
Add two numbers, and calculate the modulo of the sum and a third number, M.
Definition math8.h:392
LIB8STATIC_ALWAYS_INLINE uint16_t avg16r(uint16_t i, uint16_t j)
Calculate an integer average of two unsigned 16-bit integer values (uint16_t), rounded up.
Definition math8.h:271
LIB8STATIC_ALWAYS_INLINE int8_t qadd7(int8_t i, int8_t j)
Add one byte to another, saturating at 0x7F and -0x80.
Definition math8.h:64
LIB8STATIC_ALWAYS_INLINE uint8_t avg8(uint8_t i, uint8_t j)
Calculate an integer average of two unsigned 8-bit integer values (uint8_t), rounded down.
Definition math8.h:193
LIB8STATIC uint8_t sqrt16(uint16_t x)
Square root for 16-bit integers.
Definition math8.h:524
LIB8STATIC_ALWAYS_INLINE uint8_t add8(uint8_t i, uint8_t j)
Add one byte to another, with 8-bit result.
Definition math8.h:135
LIB8STATIC_ALWAYS_INLINE uint8_t avg8r(uint8_t i, uint8_t j)
Calculate an integer average of two unsigned 8-bit integer values (uint8_t), rounded up.
Definition math8.h:245
LIB8STATIC uint8_t submod8(uint8_t a, uint8_t b, uint8_t m)
Subtract two numbers, and calculate the modulo of the difference and a third number,...
Definition math8.h:425
LIB8STATIC uint8_t blend8(uint8_t a, uint8_t b, uint8_t amountOfB)
Blend a variable proportion (0-255) of one byte to another.
Definition math8.h:667
LIB8STATIC_ALWAYS_INLINE uint8_t mod8(uint8_t a, uint8_t m)
Calculate the remainder of one unsigned 8-bit value divided by anoter, aka A % M.
Definition math8.h:361
LIB8STATIC_ALWAYS_INLINE uint16_t add8to16(uint8_t i, uint16_t j)
Add one byte to two bytes, with 16-bit result.
Definition math8.h:153
LIB8STATIC_ALWAYS_INLINE uint8_t sub8(uint8_t i, uint8_t j)
Subtract one byte from another, 8-bit result.
Definition math8.h:174
LIB8STATIC_ALWAYS_INLINE int8_t avg7(int8_t i, int8_t j)
Calculate an integer average of two signed 7-bit integers (int8_t).
Definition math8.h:306
LIB8STATIC_ALWAYS_INLINE uint8_t qsub8(uint8_t i, uint8_t j)
Subtract one byte from another, saturating at 0x00.
Definition math8.h:103
LIB8STATIC_ALWAYS_INLINE uint8_t mul8(uint8_t i, uint8_t j)
8x8 bit multiplication, with 8-bit result.
Definition math8.h:446
LIB8STATIC_ALWAYS_INLINE void cleanup_R1()
Clean up the r1 register after a series of *LEAVING_R1_DIRTY calls.
Definition scale8.h:333
LIB8STATIC_ALWAYS_INLINE uint8_t scale8_LEAVING_R1_DIRTY(uint8_t i, fract8 scale)
This version of scale8() does not clean up the R1 register on AVR.
Definition scale8.h:170
#define LIB8STATIC
Define a LIB8TION member function as static inline with an "unused" attribute.
Definition lib8static.h:10
#define LIB8STATIC_ALWAYS_INLINE
Define a LIB8TION member function as always static inline.
Definition lib8static.h:12
Defines integer mapping functions.
Defines static inlining macros for lib8tion functions.
Implements the FastLED namespace macros.
#define FASTLED_NAMESPACE_END
End of the FastLED namespace.
Definition namespace.h:16
#define FASTLED_NAMESPACE_BEGIN
Start of the FastLED namespace.
Definition namespace.h:14
Fast, efficient 8-bit scaling functions specifically designed for high-performance LED programming.