Subject: libavcodec ARM: Use pld only on supported architectures From: Sascha Hauer The pld instruction is supported since armv5. Do not use it on older architectures. Signed-off-by: Sascha Hauer --- libavcodec/armv4l/dsputil_arm_s.S | 100 ++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 47 deletions(-) Index: libavcodec/armv4l/dsputil_arm_s.S =================================================================== --- a/libavcodec/armv4l/dsputil_arm_s.S.orig +++ b/libavcodec/armv4l/dsputil_arm_s.S @@ -19,6 +19,12 @@ @ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA @ +#if defined __ARM_ARCH_4__ || defined __ARM_ARCH_3__ || defined __ARM_ARCH_4T__ +#define PLD(code...) +#else +#define PLD(code...) code +#endif + .macro ADJ_ALIGN_QUADWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4 mov \Rd0, \Rn0, lsr #(\shift * 8) mov \Rd1, \Rn1, lsr #(\shift * 8) @@ -76,7 +82,7 @@ put_pixels16_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] +PLD( pld [r1] ) stmfd sp!, {r4-r11, lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -87,7 +93,7 @@ put_pixels16_arm: ldmia r1, {r4-r7} add r1, r1, r2 stmia r0, {r4-r7} - pld [r1] +PLD( pld [r1] ) subs r3, r3, #1 add r0, r0, r2 bne 1b @@ -97,7 +103,7 @@ put_pixels16_arm: ldmia r1, {r4-r8} add r1, r1, r2 ADJ_ALIGN_QUADWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8 - pld [r1] +PLD( pld [r1] ) subs r3, r3, #1 stmia r0, {r9-r12} add r0, r0, r2 @@ -108,7 +114,7 @@ put_pixels16_arm: ldmia r1, {r4-r8} add r1, r1, r2 ADJ_ALIGN_QUADWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8 - pld [r1] +PLD( pld [r1] ) subs r3, r3, #1 stmia r0, {r9-r12} add r0, r0, r2 @@ -119,7 +125,7 @@ put_pixels16_arm: ldmia r1, {r4-r8} add r1, r1, r2 ADJ_ALIGN_QUADWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8 - pld [r1] +PLD( pld [r1] ) subs r3, r3, #1 stmia r0, {r9-r12} add r0, r0, r2 @@ -138,7 +144,7 @@ put_pixels16_arm: put_pixels8_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] +PLD( pld [r1] ) stmfd sp!, {r4-r5,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -149,7 +155,7 @@ put_pixels8_arm: ldmia r1, {r4-r5} add r1, r1, r2 subs r3, r3, #1 - pld [r1] +PLD( pld [r1] ) stmia r0, {r4-r5} add r0, r0, r2 bne 1b @@ -159,7 +165,7 @@ put_pixels8_arm: ldmia r1, {r4-r5, r12} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r12 - pld [r1] +PLD( pld [r1] ) subs r3, r3, #1 stmia r0, {r4-r5} add r0, r0, r2 @@ -170,7 +176,7 @@ put_pixels8_arm: ldmia r1, {r4-r5, r12} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r12 - pld [r1] +PLD( pld [r1] ) subs r3, r3, #1 stmia r0, {r4-r5} add r0, r0, r2 @@ -181,7 +187,7 @@ put_pixels8_arm: ldmia r1, {r4-r5, r12} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r12 - pld [r1] +PLD( pld [r1] ) subs r3, r3, #1 stmia r0, {r4-r5} add r0, r0, r2 @@ -200,7 +206,7 @@ put_pixels8_arm: put_pixels8_x2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] +PLD( pld [r1] ) stmfd sp!, {r4-r10,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -212,7 +218,7 @@ put_pixels8_x2_arm: ldmia r1, {r4-r5, r10} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10 - pld [r1] +PLD( pld [r1] ) RND_AVG32 r8, r9, r4, r5, r6, r7, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -225,7 +231,7 @@ put_pixels8_x2_arm: add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10 ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10 - pld [r1] +PLD( pld [r1] ) RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stmia r0, {r4-r5} @@ -238,7 +244,7 @@ put_pixels8_x2_arm: add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10 ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10 - pld [r1] +PLD( pld [r1] ) RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stmia r0, {r4-r5} @@ -250,7 +256,7 @@ put_pixels8_x2_arm: ldmia r1, {r4-r5, r10} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10 - pld [r1] +PLD( pld [r1] ) RND_AVG32 r8, r9, r6, r7, r5, r10, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -269,7 +275,7 @@ put_pixels8_x2_arm: put_no_rnd_pixels8_x2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] +PLD( pld [r1] ) stmfd sp!, {r4-r10,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -281,7 +287,7 @@ put_no_rnd_pixels8_x2_arm: ldmia r1, {r4-r5, r10} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10 - pld [r1] +PLD( pld [r1] ) NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -294,7 +300,7 @@ put_no_rnd_pixels8_x2_arm: add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10 ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10 - pld [r1] +PLD( pld [r1] ) NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stmia r0, {r4-r5} @@ -307,7 +313,7 @@ put_no_rnd_pixels8_x2_arm: add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10 ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10 - pld [r1] +PLD( pld [r1] ) NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stmia r0, {r4-r5} @@ -319,7 +325,7 @@ put_no_rnd_pixels8_x2_arm: ldmia r1, {r4-r5, r10} add r1, r1, r2 ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10 - pld [r1] +PLD( pld [r1] ) NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -340,7 +346,7 @@ put_no_rnd_pixels8_x2_arm: put_pixels8_y2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] +PLD( pld [r1] ) stmfd sp!, {r4-r11,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -354,13 +360,13 @@ put_pixels8_y2_arm: add r1, r1, r2 6: ldmia r1, {r6-r7} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) RND_AVG32 r8, r9, r4, r5, r6, r7, r12 ldmia r1, {r4-r5} add r1, r1, r2 stmia r0, {r8-r9} add r0, r0, r2 - pld [r1] +PLD( pld [r1] ) RND_AVG32 r8, r9, r6, r7, r4, r5, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -371,18 +377,18 @@ put_pixels8_y2_arm: 2: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -394,18 +400,18 @@ put_pixels8_y2_arm: 3: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -417,18 +423,18 @@ put_pixels8_y2_arm: 4: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -449,7 +455,7 @@ put_pixels8_y2_arm: put_no_rnd_pixels8_y2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] +PLD( pld [r1] ) stmfd sp!, {r4-r11,lr} @ R14 is also called LR adr r5, 5f ands r4, r1, #3 @@ -463,13 +469,13 @@ put_no_rnd_pixels8_y2_arm: add r1, r1, r2 6: ldmia r1, {r6-r7} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12 ldmia r1, {r4-r5} add r1, r1, r2 stmia r0, {r8-r9} add r0, r0, r2 - pld [r1] +PLD( pld [r1] ) NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12 subs r3, r3, #1 stmia r0, {r8-r9} @@ -480,18 +486,18 @@ put_no_rnd_pixels8_y2_arm: 2: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -503,18 +509,18 @@ put_no_rnd_pixels8_y2_arm: 3: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -526,18 +532,18 @@ put_no_rnd_pixels8_y2_arm: 4: ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6 6: ldmia r1, {r7-r9} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stmia r0, {r10-r11} add r0, r0, r2 ldmia r1, {r4-r6} add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 @@ -564,7 +570,7 @@ put_no_rnd_pixels8_y2_arm: ldmia r1, {r8-r10} .endif add r1, r1, r2 - pld [r1] +PLD( pld [r1] ) .if \align == 0 ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r6, r7, r8 .elseif \align == 1 @@ -622,7 +628,7 @@ put_no_rnd_pixels8_y2_arm: put_pixels8_xy2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] +PLD( pld [r1] ) stmfd sp!, {r4-r11,lr} @ R14 is also called LR adrl r12, 5f ands r4, r1, #3 @@ -658,7 +664,7 @@ put_pixels8_xy2_arm: put_no_rnd_pixels8_xy2_arm: @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned - pld [r1] +PLD( pld [r1] ) stmfd sp!, {r4-r11,lr} @ R14 is also called LR adrl r12, 5f ands r4, r1, #3