diff options
author | Timothy Pearson <[email protected]> | 2011-08-10 12:13:27 -0500 |
---|---|---|
committer | Timothy Pearson <[email protected]> | 2011-08-10 12:13:27 -0500 |
commit | d296f1d337dabfeae5191955fdadb874965dbbe9 (patch) | |
tree | 1a4b3b4cca01f2ea77eee2497297219d60e9bbd4 /tqtinterface/qt4/src/3rdparty/libpng/pnggccrd.c | |
parent | eaa7ee2e0bbca40ba3173c4304f81957e8964291 (diff) | |
download | experimental-d296f1d337dabfeae5191955fdadb874965dbbe9.tar.gz experimental-d296f1d337dabfeae5191955fdadb874965dbbe9.zip |
rename the following methods:
tqparent parent
tqmask mask
Diffstat (limited to 'tqtinterface/qt4/src/3rdparty/libpng/pnggccrd.c')
-rw-r--r-- | tqtinterface/qt4/src/3rdparty/libpng/pnggccrd.c | 248 |
1 files changed, 124 insertions, 124 deletions
diff --git a/tqtinterface/qt4/src/3rdparty/libpng/pnggccrd.c b/tqtinterface/qt4/src/3rdparty/libpng/pnggccrd.c index 1056251..88a1da2 100644 --- a/tqtinterface/qt4/src/3rdparty/libpng/pnggccrd.c +++ b/tqtinterface/qt4/src/3rdparty/libpng/pnggccrd.c @@ -109,7 +109,7 @@ * listings... Apparently register spillage has to do with ebx, since * it's used to index the global offset table. Commenting it out of the * input-reg lists in png_combine_row() eliminated compiler barfage, so - * ifdef'd with __PIC__ macro: if defined, use a global for untqmask + * ifdef'd with __PIC__ macro: if defined, use a global for unmask * * 19991107: * - verified CPUID clobberage: 12-char string constant ("GenuineIntel", @@ -146,7 +146,7 @@ * pnggccrd.c:994: more than 10 operands in `asm' * pnggccrd.c:1177: more than 10 operands in `asm' * They are all the same problem and can be worked around by using the - * global _untqmask variable unconditionally, not just in the -fPIC case. + * global _unmask variable unconditionally, not just in the -fPIC case. * Reportedly earlier versions of gcc also have the problem with more than * 10 operands; they just don't report it. Much strangeness ensues, etc. * @@ -257,22 +257,22 @@ static const int FARDATA png_pass_width[7] = {8, 4, 4, 2, 2, 1, 1}; # define _mmx_supported mmx_supported # define _const4 const4 # define _const6 const6 -# define _mask8_0 tqmask8_0 -# define _mask16_1 tqmask16_1 -# define _mask16_0 tqmask16_0 -# define _mask24_2 tqmask24_2 -# define _mask24_1 tqmask24_1 -# define _mask24_0 tqmask24_0 -# define _mask32_3 tqmask32_3 -# define _mask32_2 tqmask32_2 -# define _mask32_1 tqmask32_1 -# define _mask32_0 tqmask32_0 -# define _mask48_5 tqmask48_5 -# define _mask48_4 tqmask48_4 -# define _mask48_3 tqmask48_3 -# define _mask48_2 tqmask48_2 -# define _mask48_1 tqmask48_1 -# define _mask48_0 tqmask48_0 +# define _mask8_0 mask8_0 +# define _mask16_1 mask16_1 +# define _mask16_0 mask16_0 +# define _mask24_2 mask24_2 +# define _mask24_1 mask24_1 +# define _mask24_0 mask24_0 +# define _mask32_3 mask32_3 +# define _mask32_2 mask32_2 +# define _mask32_1 mask32_1 +# define _mask32_0 mask32_0 +# define _mask48_5 mask48_5 +# define _mask48_4 mask48_4 +# define _mask48_3 mask48_3 +# define _mask48_2 mask48_2 +# define _mask48_1 mask48_1 +# define _mask48_0 mask48_0 # define _LBCarryMask LBCarryMask # define _HBClearMask HBClearMask # define _ActiveMask ActiveMask @@ -281,7 +281,7 @@ static const int FARDATA png_pass_width[7] = {8, 4, 4, 2, 2, 1, 1}; # define _ShiftBpp ShiftBpp # define _ShiftRem ShiftRem #ifdef PNG_THREAD_UNSAFE_OK -# define _untqmask untqmask +# define _unmask unmask # define _FullLength FullLength # define _MMXLength MMXLength # define _dif dif @@ -295,14 +295,14 @@ static const int FARDATA png_pass_width[7] = {8, 4, 4, 2, 2, 1, 1}; /* These constants are used in the inlined MMX assembly code. Ignore gcc's "At top level: defined but not used" warnings. */ -/* GRR 20000706: originally _untqmask was needed only when compiling with -fPIC, +/* GRR 20000706: originally _unmask was needed only when compiling with -fPIC, * since that case uses the %ebx register for indexing the Global Offset Table * and there were no other registers available. But gcc 2.95 and later emit - * "more than 10 operands in `asm'" errors when %ebx is used to preload untqmask + * "more than 10 operands in `asm'" errors when %ebx is used to preload unmask * in the non-PIC case, so we'll just use the global unconditionally now. */ #ifdef PNG_THREAD_UNSAFE_OK -static int _untqmask; +static int _unmask; #endif static unsigned long long _mask8_0 = 0x0102040810204080LL; @@ -394,19 +394,19 @@ static int _mmx_supported = 2; /* Combines the row recently read in with the previous row. This routine takes care of alpha and transparency if requested. This routine also handles the two methods of progressive display - of interlaced images, depending on the tqmask value. - The tqmask value describes which pixels are to be combined with + of interlaced images, depending on the mask value. + The mask value describes which pixels are to be combined with the row. The pattern always repeats every 8 pixels, so just 8 bits are needed. A one indicates the pixel is to be combined; a zero indicates the pixel is to be skipped. This is in addition to any alpha or transparency value associated with the pixel. - If you want all pixels to be combined, pass 0xff (255) in tqmask. */ + If you want all pixels to be combined, pass 0xff (255) in mask. */ /* Use this routine for the x86 platform - it uses a faster MMX routine if the machine supports MMX. */ void /* PRIVATE */ -png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) +png_combine_row(png_structp png_ptr, png_bytep row, int mask) { png_debug(1, "in png_combine_row (pnggccrd.c)\n"); @@ -418,13 +418,13 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) } #endif - if (tqmask == 0xff) + if (mask == 0xff) { - png_debug(2,"tqmask == 0xff: doing single png_memcpy()\n"); + png_debug(2,"mask == 0xff: doing single png_memcpy()\n"); png_memcpy(row, png_ptr->row_buf + 1, (png_size_t)((png_ptr->width * png_ptr->row_info.pixel_depth + 7) >> 3)); } - else /* (png_combine_row() is never called with tqmask == 0) */ + else /* (png_combine_row() is never called with mask == 0) */ { switch (png_ptr->row_info.pixel_depth) { @@ -459,7 +459,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) for (i = 0; i < png_ptr->width; i++) { - if (m & tqmask) + if (m & mask) { int value; @@ -517,7 +517,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) for (i = 0; i < png_ptr->width; i++) { - if (m & tqmask) + if (m & mask) { value = (*sp >> shift) & 0x3; *dp &= (png_byte)((0x3f3f >> (6 - shift)) & 0xff); @@ -571,7 +571,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) for (i = 0; i < png_ptr->width; i++) { - if (m & tqmask) + if (m & mask) { value = (*sp >> shift) & 0xf; *dp &= (png_byte)((0xf0f >> (4 - shift)) & 0xff); @@ -614,14 +614,14 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) int dummy_value_c; int dummy_value_S; int dummy_value_D; - _untqmask = ~tqmask; // global variable for -fPIC version + _unmask = ~mask; // global variable for -fPIC version srcptr = png_ptr->row_buf + 1; dstptr = row; len = png_ptr->width &~7; // reduce to multiple of 8 diff = (int) (png_ptr->width & 7); // amount lost __asm__ __volatile__ ( - "movd _untqmask, %%mm7 \n\t" // load bit pattern + "movd _unmask, %%mm7 \n\t" // load bit pattern "psubb %%mm6, %%mm6 \n\t" // zero mm6 "punpcklbw %%mm7, %%mm7 \n\t" "punpcklwd %%mm7, %%mm7 \n\t" @@ -655,7 +655,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) "movl %%eax, %%ecx \n\t" "cmpl $0, %%ecx \n\t" "jz end8 \n\t" -// preload "movl tqmask, %%edx \n\t" +// preload "movl mask, %%edx \n\t" "sall $24, %%edx \n\t" // make low byte, high byte "secondloop8: \n\t" @@ -682,9 +682,9 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) : "3" (srcptr), // esi // input regs "4" (dstptr), // edi "0" (diff), // eax -// was (untqmask) "b" RESERVED // ebx // Global Offset Table idx +// was (unmask) "b" RESERVED // ebx // Global Offset Table idx "2" (len), // ecx - "1" (tqmask) // edx + "1" (mask) // edx #if 0 /* MMX regs (%mm0, etc.) not supported by gcc 2.7.2.3 or egcs 1.1 */ : "%mm0", "%mm4", "%mm6", "%mm7" // clobber list @@ -752,14 +752,14 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) int dummy_value_c; int dummy_value_S; int dummy_value_D; - _untqmask = ~tqmask; // global variable for -fPIC version + _unmask = ~mask; // global variable for -fPIC version srcptr = png_ptr->row_buf + 1; dstptr = row; len = png_ptr->width &~7; // reduce to multiple of 8 diff = (int) (png_ptr->width & 7); // amount lost // __asm__ __volatile__ ( - "movd _untqmask, %%mm7 \n\t" // load bit pattern + "movd _unmask, %%mm7 \n\t" // load bit pattern "psubb %%mm6, %%mm6 \n\t" // zero mm6 "punpcklbw %%mm7, %%mm7 \n\t" "punpcklwd %%mm7, %%mm7 \n\t" @@ -808,7 +808,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) "movl %%eax, %%ecx \n\t" "cmpl $0, %%ecx \n\t" "jz end16 \n\t" -// preload "movl tqmask, %%edx \n\t" +// preload "movl mask, %%edx \n\t" "sall $24, %%edx \n\t" // make low byte, high byte "secondloop16: \n\t" @@ -833,9 +833,9 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) "=D" (dummy_value_D) : "0" (diff), // eax // input regs -// was (untqmask) " " RESERVED // ebx // Global Offset Table idx +// was (unmask) " " RESERVED // ebx // Global Offset Table idx "1" (len), // ecx - "2" (tqmask), // edx + "2" (mask), // edx "3" (srcptr), // esi "4" (dstptr) // edi @@ -905,14 +905,14 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) int dummy_value_c; int dummy_value_S; int dummy_value_D; - _untqmask = ~tqmask; // global variable for -fPIC version + _unmask = ~mask; // global variable for -fPIC version srcptr = png_ptr->row_buf + 1; dstptr = row; len = png_ptr->width &~7; // reduce to multiple of 8 diff = (int) (png_ptr->width & 7); // amount lost // __asm__ __volatile__ ( - "movd _untqmask, %%mm7 \n\t" // load bit pattern + "movd _unmask, %%mm7 \n\t" // load bit pattern "psubb %%mm6, %%mm6 \n\t" // zero mm6 "punpcklbw %%mm7, %%mm7 \n\t" "punpcklwd %%mm7, %%mm7 \n\t" @@ -973,7 +973,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) "movl %%eax, %%ecx \n\t" "cmpl $0, %%ecx \n\t" "jz end24 \n\t" -// preload "movl tqmask, %%edx \n\t" +// preload "movl mask, %%edx \n\t" "sall $24, %%edx \n\t" // make low byte, high byte "secondloop24: \n\t" @@ -1003,9 +1003,9 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) : "3" (srcptr), // esi // input regs "4" (dstptr), // edi "0" (diff), // eax -// was (untqmask) "b" RESERVED // ebx // Global Offset Table idx +// was (unmask) "b" RESERVED // ebx // Global Offset Table idx "2" (len), // ecx - "1" (tqmask) // edx + "1" (mask) // edx #if 0 /* MMX regs (%mm0, etc.) not supported by gcc 2.7.2.3 or egcs 1.1 */ : "%mm0", "%mm1", "%mm2" // clobber list @@ -1073,14 +1073,14 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) int dummy_value_c; int dummy_value_S; int dummy_value_D; - _untqmask = ~tqmask; // global variable for -fPIC version + _unmask = ~mask; // global variable for -fPIC version srcptr = png_ptr->row_buf + 1; dstptr = row; len = png_ptr->width &~7; // reduce to multiple of 8 diff = (int) (png_ptr->width & 7); // amount lost // __asm__ __volatile__ ( - "movd _untqmask, %%mm7 \n\t" // load bit pattern + "movd _unmask, %%mm7 \n\t" // load bit pattern "psubb %%mm6, %%mm6 \n\t" // zero mm6 "punpcklbw %%mm7, %%mm7 \n\t" "punpcklwd %%mm7, %%mm7 \n\t" @@ -1151,7 +1151,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) "movl %%eax, %%ecx \n\t" "cmpl $0, %%ecx \n\t" "jz end32 \n\t" -// preload "movl tqmask, %%edx \n\t" +// preload "movl mask, %%edx \n\t" "sall $24, %%edx \n\t" // low byte => high byte "secondloop32: \n\t" @@ -1178,9 +1178,9 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) : "3" (srcptr), // esi // input regs "4" (dstptr), // edi "0" (diff), // eax -// was (untqmask) "b" RESERVED // ebx // Global Offset Table idx +// was (unmask) "b" RESERVED // ebx // Global Offset Table idx "2" (len), // ecx - "1" (tqmask) // edx + "1" (mask) // edx #if 0 /* MMX regs (%mm0, etc.) not supported by gcc 2.7.2.3 or egcs 1.1 */ : "%mm0", "%mm1", "%mm2", "%mm3" // clobber list @@ -1248,14 +1248,14 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) int dummy_value_c; int dummy_value_S; int dummy_value_D; - _untqmask = ~tqmask; // global variable for -fPIC version + _unmask = ~mask; // global variable for -fPIC version srcptr = png_ptr->row_buf + 1; dstptr = row; len = png_ptr->width &~7; // reduce to multiple of 8 diff = (int) (png_ptr->width & 7); // amount lost // __asm__ __volatile__ ( - "movd _untqmask, %%mm7 \n\t" // load bit pattern + "movd _unmask, %%mm7 \n\t" // load bit pattern "psubb %%mm6, %%mm6 \n\t" // zero mm6 "punpcklbw %%mm7, %%mm7 \n\t" "punpcklwd %%mm7, %%mm7 \n\t" @@ -1343,7 +1343,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) "movl %%eax, %%ecx \n\t" "cmpl $0, %%ecx \n\t" "jz end48 \n\t" -// preload "movl tqmask, %%edx \n\t" +// preload "movl mask, %%edx \n\t" "sall $24, %%edx \n\t" // make low byte, high byte "secondloop48: \n\t" @@ -1370,9 +1370,9 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) : "3" (srcptr), // esi // input regs "4" (dstptr), // edi "0" (diff), // eax -// was (untqmask) "b" RESERVED // ebx // Global Offset Table idx +// was (unmask) "b" RESERVED // ebx // Global Offset Table idx "2" (len), // ecx - "1" (tqmask) // edx + "1" (mask) // edx #if 0 /* MMX regs (%mm0, etc.) not supported by gcc 2.7.2.3 or egcs 1.1 */ : "%mm0", "%mm1", "%mm2", "%mm3" // clobber list @@ -1468,7 +1468,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask) } } /* end switch (png_ptr->row_info.pixel_depth) */ - } /* end if (non-trivial tqmask) */ + } /* end if (non-trivial mask) */ } /* end png_combine_row() */ @@ -2802,7 +2802,7 @@ png_read_filter_row_mmx_avg(png_row_infop row_info, png_bytep row, "movl %%edi, _dif \n\t" // take start of row "addl %%ebx, _dif \n\t" // add bpp "addl $0xf, _dif \n\t" // add 7+8 to incr past tqalignment bdry - "andl $0xfffffff8, _dif \n\t" // tqmask to tqalignment boundary + "andl $0xfffffff8, _dif \n\t" // mask to tqalignment boundary "subl %%edi, _dif \n\t" // subtract from start => value ebx at "jz avg_go \n\t" // tqalignment @@ -2901,7 +2901,7 @@ png_read_filter_row_mmx_avg(png_row_infop row_info, png_bytep row, // Avg for each Active // byte // add 2nd active group (Raw(x-bpp)/2) to average with _LBCarry - "psllq _ShiftBpp, %%mm6 \n\t" // shift the mm6 tqmask to cover + "psllq _ShiftBpp, %%mm6 \n\t" // shift the mm6 mask to cover // bytes 3-5 "movq %%mm0, %%mm2 \n\t" // mov updated Raws to mm2 "psllq _ShiftBpp, %%mm2 \n\t" // shift data to pos. correctly @@ -2922,7 +2922,7 @@ png_read_filter_row_mmx_avg(png_row_infop row_info, png_bytep row, // byte // add 3rd active group (Raw(x-bpp)/2) to average with _LBCarry - "psllq _ShiftBpp, %%mm6 \n\t" // shift mm6 tqmask to cover last + "psllq _ShiftBpp, %%mm6 \n\t" // shift mm6 mask to cover last // two // bytes "movq %%mm0, %%mm2 \n\t" // mov updated Raws to mm2 @@ -2991,7 +2991,7 @@ png_read_filter_row_mmx_avg(png_row_infop row_info, png_bytep row, // preload "movl prev_row, %%esi \n\t" // esi: Prior(x) "movq %%mm7, %%mm6 \n\t" "movq _LBCarryMask, %%mm5 \n\t" - "psllq _ShiftBpp, %%mm6 \n\t" // create tqmask for 2nd active + "psllq _ShiftBpp, %%mm6 \n\t" // create mask for 2nd active // group // prime the pump: load the first Raw(x-bpp) data set @@ -3118,7 +3118,7 @@ png_read_filter_row_mmx_avg(png_row_infop row_info, png_bytep row, // for each Active byte // add 2nd active group (Raw(x-bpp)/2) to average with _LBCarry - "psllq _ShiftBpp, %%mm6 \n\t" // shift the mm6 tqmask to cover + "psllq _ShiftBpp, %%mm6 \n\t" // shift the mm6 mask to cover // bytes 2 & 3 "movq %%mm0, %%mm2 \n\t" // mov updated Raws to mm2 "psllq _ShiftBpp, %%mm2 \n\t" // shift data to pos. correctly @@ -3139,7 +3139,7 @@ png_read_filter_row_mmx_avg(png_row_infop row_info, png_bytep row, // Avg for each Active byte // add 3rd active group (Raw(x-bpp)/2) to average with _LBCarry - "psllq _ShiftBpp, %%mm6 \n\t" // shift the mm6 tqmask to cover + "psllq _ShiftBpp, %%mm6 \n\t" // shift the mm6 mask to cover // bytes 4 & 5 "movq %%mm0, %%mm2 \n\t" // mov updated Raws to mm2 "psllq _ShiftBpp, %%mm2 \n\t" // shift data to pos. correctly @@ -3159,7 +3159,7 @@ png_read_filter_row_mmx_avg(png_row_infop row_info, png_bytep row, // Avg for each Active byte // add 4th active group (Raw(x-bpp)/2) to average with _LBCarry - "psllq _ShiftBpp, %%mm6 \n\t" // shift the mm6 tqmask to cover + "psllq _ShiftBpp, %%mm6 \n\t" // shift the mm6 mask to cover // bytes 6 & 7 "movq %%mm0, %%mm2 \n\t" // mov updated Raws to mm2 "psllq _ShiftBpp, %%mm2 \n\t" // shift data to pos. correctly @@ -3467,7 +3467,7 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "xorl %%ecx, %%ecx \n\t" "addl $0xf, _dif \n\t" // add 7 + 8 to incr past tqalignment // boundary - "andl $0xfffffff8, _dif \n\t" // tqmask to tqalignment boundary + "andl $0xfffffff8, _dif \n\t" // mask to tqalignment boundary "subl %%edi, _dif \n\t" // subtract from start ==> value ebx // at tqalignment "jz paeth_go \n\t" @@ -3609,16 +3609,16 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm4, %%mm0 \n\t" // create tqmask pav bytes < 0 + "pcmpgtw %%mm4, %%mm0 \n\t" // create mask pav bytes < 0 "paddw %%mm5, %%mm6 \n\t" "pand %%mm4, %%mm0 \n\t" // only pav bytes < 0 in mm7 - "pcmpgtw %%mm5, %%mm7 \n\t" // create tqmask pbv bytes < 0 + "pcmpgtw %%mm5, %%mm7 \n\t" // create mask pbv bytes < 0 "psubw %%mm0, %%mm4 \n\t" "pand %%mm5, %%mm7 \n\t" // only pbv bytes < 0 in mm0 "psubw %%mm0, %%mm4 \n\t" "psubw %%mm7, %%mm5 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm7, %%mm5 \n\t" "psubw %%mm0, %%mm6 \n\t" @@ -3627,9 +3627,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" "pandn %%mm4, %%mm7 \n\t" "pandn %%mm1, %%mm0 \n\t" @@ -3670,8 +3670,8 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm5, %%mm0 \n\t" // create tqmask pbv bytes < 0 - "pcmpgtw %%mm4, %%mm7 \n\t" // create tqmask pav bytes < 0 + "pcmpgtw %%mm5, %%mm0 \n\t" // create mask pbv bytes < 0 + "pcmpgtw %%mm4, %%mm7 \n\t" // create mask pav bytes < 0 "pand %%mm5, %%mm0 \n\t" // only pbv bytes < 0 in mm0 "pand %%mm4, %%mm7 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm0, %%mm5 \n\t" @@ -3679,7 +3679,7 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm5 \n\t" "psubw %%mm7, %%mm4 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm0, %%mm6 \n\t" // test pa <= pb @@ -3687,9 +3687,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" "pandn %%mm4, %%mm7 \n\t" "pandn %%mm1, %%mm0 \n\t" @@ -3733,8 +3733,8 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm4, %%mm0 \n\t" // create tqmask pav bytes < 0 - "pcmpgtw %%mm5, %%mm7 \n\t" // create tqmask pbv bytes < 0 + "pcmpgtw %%mm4, %%mm0 \n\t" // create mask pav bytes < 0 + "pcmpgtw %%mm5, %%mm7 \n\t" // create mask pbv bytes < 0 "pand %%mm4, %%mm0 \n\t" // only pav bytes < 0 in mm7 "pand %%mm5, %%mm7 \n\t" // only pbv bytes < 0 in mm0 "psubw %%mm0, %%mm4 \n\t" @@ -3742,7 +3742,7 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm4 \n\t" "psubw %%mm7, %%mm5 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm0, %%mm6 \n\t" // test pa <= pb @@ -3750,9 +3750,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" "pandn %%mm1, %%mm0 \n\t" "pandn %%mm4, %%mm7 \n\t" @@ -3833,16 +3833,16 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm4, %%mm0 \n\t" // create tqmask pav bytes < 0 + "pcmpgtw %%mm4, %%mm0 \n\t" // create mask pav bytes < 0 "paddw %%mm5, %%mm6 \n\t" "pand %%mm4, %%mm0 \n\t" // only pav bytes < 0 in mm7 - "pcmpgtw %%mm5, %%mm7 \n\t" // create tqmask pbv bytes < 0 + "pcmpgtw %%mm5, %%mm7 \n\t" // create mask pbv bytes < 0 "psubw %%mm0, %%mm4 \n\t" "pand %%mm5, %%mm7 \n\t" // only pbv bytes < 0 in mm0 "psubw %%mm0, %%mm4 \n\t" "psubw %%mm7, %%mm5 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm7, %%mm5 \n\t" "psubw %%mm0, %%mm6 \n\t" @@ -3851,9 +3851,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" "pandn %%mm4, %%mm7 \n\t" "pandn %%mm1, %%mm0 \n\t" @@ -3897,16 +3897,16 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm4, %%mm0 \n\t" // create tqmask pav bytes < 0 + "pcmpgtw %%mm4, %%mm0 \n\t" // create mask pav bytes < 0 "paddw %%mm5, %%mm6 \n\t" "pand %%mm4, %%mm0 \n\t" // only pav bytes < 0 in mm7 - "pcmpgtw %%mm5, %%mm7 \n\t" // create tqmask pbv bytes < 0 + "pcmpgtw %%mm5, %%mm7 \n\t" // create mask pbv bytes < 0 "psubw %%mm0, %%mm4 \n\t" "pand %%mm5, %%mm7 \n\t" // only pbv bytes < 0 in mm0 "psubw %%mm0, %%mm4 \n\t" "psubw %%mm7, %%mm5 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm7, %%mm5 \n\t" "psubw %%mm0, %%mm6 \n\t" @@ -3915,9 +3915,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" "pandn %%mm4, %%mm7 \n\t" "pandn %%mm1, %%mm0 \n\t" @@ -3986,16 +3986,16 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm4, %%mm0 \n\t" // create tqmask pav bytes < 0 + "pcmpgtw %%mm4, %%mm0 \n\t" // create mask pav bytes < 0 "paddw %%mm5, %%mm6 \n\t" "pand %%mm4, %%mm0 \n\t" // only pav bytes < 0 in mm7 - "pcmpgtw %%mm5, %%mm7 \n\t" // create tqmask pbv bytes < 0 + "pcmpgtw %%mm5, %%mm7 \n\t" // create mask pbv bytes < 0 "psubw %%mm0, %%mm4 \n\t" "pand %%mm5, %%mm7 \n\t" // only pbv bytes < 0 in mm0 "psubw %%mm0, %%mm4 \n\t" "psubw %%mm7, %%mm5 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm7, %%mm5 \n\t" "psubw %%mm0, %%mm6 \n\t" @@ -4004,9 +4004,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" "pandn %%mm4, %%mm7 \n\t" "pandn %%mm1, %%mm0 \n\t" @@ -4042,16 +4042,16 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm4, %%mm0 \n\t" // create tqmask pav bytes < 0 + "pcmpgtw %%mm4, %%mm0 \n\t" // create mask pav bytes < 0 "paddw %%mm5, %%mm6 \n\t" "pand %%mm4, %%mm0 \n\t" // only pav bytes < 0 in mm7 - "pcmpgtw %%mm5, %%mm7 \n\t" // create tqmask pbv bytes < 0 + "pcmpgtw %%mm5, %%mm7 \n\t" // create mask pbv bytes < 0 "psubw %%mm0, %%mm4 \n\t" "pand %%mm5, %%mm7 \n\t" // only pbv bytes < 0 in mm0 "psubw %%mm0, %%mm4 \n\t" "psubw %%mm7, %%mm5 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm7, %%mm5 \n\t" "psubw %%mm0, %%mm6 \n\t" @@ -4060,9 +4060,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" "pandn %%mm4, %%mm7 \n\t" "pandn %%mm1, %%mm0 \n\t" @@ -4131,16 +4131,16 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm4, %%mm0 \n\t" // create tqmask pav bytes < 0 + "pcmpgtw %%mm4, %%mm0 \n\t" // create mask pav bytes < 0 "paddw %%mm5, %%mm6 \n\t" "pand %%mm4, %%mm0 \n\t" // only pav bytes < 0 in mm7 - "pcmpgtw %%mm5, %%mm7 \n\t" // create tqmask pbv bytes < 0 + "pcmpgtw %%mm5, %%mm7 \n\t" // create mask pbv bytes < 0 "psubw %%mm0, %%mm4 \n\t" "pand %%mm5, %%mm7 \n\t" // only pbv bytes < 0 in mm0 "psubw %%mm0, %%mm4 \n\t" "psubw %%mm7, %%mm5 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm7, %%mm5 \n\t" "psubw %%mm0, %%mm6 \n\t" @@ -4149,9 +4149,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" "pandn %%mm4, %%mm7 \n\t" "pandn %%mm1, %%mm0 \n\t" @@ -4188,16 +4188,16 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, // pa = abs(p-a) = abs(pav) // pb = abs(p-b) = abs(pbv) // pc = abs(p-c) = abs(pcv) - "pcmpgtw %%mm4, %%mm0 \n\t" // create tqmask pav bytes < 0 + "pcmpgtw %%mm4, %%mm0 \n\t" // create mask pav bytes < 0 "paddw %%mm5, %%mm6 \n\t" "pand %%mm4, %%mm0 \n\t" // only pav bytes < 0 in mm7 - "pcmpgtw %%mm5, %%mm7 \n\t" // create tqmask pbv bytes < 0 + "pcmpgtw %%mm5, %%mm7 \n\t" // create mask pbv bytes < 0 "psubw %%mm0, %%mm4 \n\t" "pand %%mm5, %%mm7 \n\t" // only pbv bytes < 0 in mm0 "psubw %%mm0, %%mm4 \n\t" "psubw %%mm7, %%mm5 \n\t" "pxor %%mm0, %%mm0 \n\t" - "pcmpgtw %%mm6, %%mm0 \n\t" // create tqmask pcv bytes < 0 + "pcmpgtw %%mm6, %%mm0 \n\t" // create mask pcv bytes < 0 "pand %%mm6, %%mm0 \n\t" // only pav bytes < 0 in mm7 "psubw %%mm7, %%mm5 \n\t" "psubw %%mm0, %%mm6 \n\t" @@ -4206,9 +4206,9 @@ png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, "psubw %%mm0, %%mm6 \n\t" "pcmpgtw %%mm5, %%mm7 \n\t" // pa > pb? "movq %%mm7, %%mm0 \n\t" - // use mm7 tqmask to merge pa & pb + // use mm7 mask to merge pa & pb "pand %%mm7, %%mm5 \n\t" - // use mm0 tqmask copy to merge a & b + // use mm0 mask copy to merge a & b "pand %%mm0, %%mm2 \n\t" "pandn %%mm4, %%mm7 \n\t" "pandn %%mm1, %%mm0 \n\t" @@ -4504,7 +4504,7 @@ png_read_filter_row_mmx_sub(png_row_infop row_info, png_bytep row) "addl $0xf, _dif \n\t" // add 7 + 8 to incr past // tqalignment boundary "xorl %%ecx, %%ecx \n\t" - "andl $0xfffffff8, _dif \n\t" // tqmask to tqalignment boundary + "andl $0xfffffff8, _dif \n\t" // mask to tqalignment boundary "subl %%edi, _dif \n\t" // subtract from start ==> value "jz sub_go \n\t" // ecx at tqalignment @@ -4556,13 +4556,13 @@ png_read_filter_row_mmx_sub(png_row_infop row_info, png_bytep row) "addl %%eax, %%edi \n\t" // rp = row + bpp "movq %%mm7, %%mm6 \n\t" "movl _dif, %%edx \n\t" - "psllq _ShiftBpp, %%mm6 \n\t" // move tqmask in mm6 to cover + "psllq _ShiftBpp, %%mm6 \n\t" // move mask in mm6 to cover // 3rd active byte group // prime the pump: load the first Raw(x-bpp) data set "movq -8(%%edi,%%edx,), %%mm1 \n\t" "sub_3lp: \n\t" // shift data for adding first - "psrlq _ShiftRem, %%mm1 \n\t" // bpp bytes (no need for tqmask; + "psrlq _ShiftRem, %%mm1 \n\t" // bpp bytes (no need for mask; // shift clears inactive bytes) // add 1st active group "movq (%%edi,%%edx,), %%mm0 \n\t" @@ -4571,13 +4571,13 @@ png_read_filter_row_mmx_sub(png_row_infop row_info, png_bytep row) // add 2nd active group "movq %%mm0, %%mm1 \n\t" // mov updated Raws to mm1 "psllq _ShiftBpp, %%mm1 \n\t" // shift data to pos. correctly - "pand %%mm7, %%mm1 \n\t" // tqmask to use 2nd active group + "pand %%mm7, %%mm1 \n\t" // mask to use 2nd active group "paddb %%mm1, %%mm0 \n\t" // add 3rd active group "movq %%mm0, %%mm1 \n\t" // mov updated Raws to mm1 "psllq _ShiftBpp, %%mm1 \n\t" // shift data to pos. correctly - "pand %%mm6, %%mm1 \n\t" // tqmask to use 3rd active group + "pand %%mm6, %%mm1 \n\t" // mask to use 3rd active group "addl $8, %%edx \n\t" "paddb %%mm1, %%mm0 \n\t" @@ -4651,7 +4651,7 @@ png_read_filter_row_mmx_sub(png_row_infop row_info, png_bytep row) "movq -8(%%edi,%%edx,), %%mm1 \n\t" "sub_4lp: \n\t" // shift data for adding first - "psrlq _ShiftRem, %%mm1 \n\t" // bpp bytes (no need for tqmask; + "psrlq _ShiftRem, %%mm1 \n\t" // bpp bytes (no need for mask; // shift clears inactive bytes) "movq (%%edi,%%edx,), %%mm0 \n\t" "paddb %%mm1, %%mm0 \n\t" @@ -4693,19 +4693,19 @@ png_read_filter_row_mmx_sub(png_row_infop row_info, png_bytep row) "movl _dif, %%edx \n\t" "movq %%mm7, %%mm6 \n\t" // preload "movl row, %%edi \n\t" - "psllq _ShiftBpp, %%mm6 \n\t" // move tqmask in mm6 to cover + "psllq _ShiftBpp, %%mm6 \n\t" // move mask in mm6 to cover // 3rd active byte group "movl %%edi, %%esi \n\t" // lp = row "movq %%mm6, %%mm5 \n\t" // preload "movl bpp, %%eax \n\t" "addl %%eax, %%edi \n\t" // rp = row + bpp - "psllq _ShiftBpp, %%mm5 \n\t" // move tqmask in mm5 to cover + "psllq _ShiftBpp, %%mm5 \n\t" // move mask in mm5 to cover // 4th active byte group // prime the pump: load the first Raw(x-bpp) data set "movq -8(%%edi,%%edx,), %%mm1 \n\t" "sub_2lp: \n\t" // shift data for adding first - "psrlq _ShiftRem, %%mm1 \n\t" // bpp bytes (no need for tqmask; + "psrlq _ShiftRem, %%mm1 \n\t" // bpp bytes (no need for mask; // shift clears inactive bytes) // add 1st active group "movq (%%edi,%%edx,), %%mm0 \n\t" @@ -4714,19 +4714,19 @@ png_read_filter_row_mmx_sub(png_row_infop row_info, png_bytep row) // add 2nd active group "movq %%mm0, %%mm1 \n\t" // mov updated Raws to mm1 "psllq _ShiftBpp, %%mm1 \n\t" // shift data to pos. correctly - "pand %%mm7, %%mm1 \n\t" // tqmask to use 2nd active group + "pand %%mm7, %%mm1 \n\t" // mask to use 2nd active group "paddb %%mm1, %%mm0 \n\t" // add 3rd active group "movq %%mm0, %%mm1 \n\t" // mov updated Raws to mm1 "psllq _ShiftBpp, %%mm1 \n\t" // shift data to pos. correctly - "pand %%mm6, %%mm1 \n\t" // tqmask to use 3rd active group + "pand %%mm6, %%mm1 \n\t" // mask to use 3rd active group "paddb %%mm1, %%mm0 \n\t" // add 4th active group "movq %%mm0, %%mm1 \n\t" // mov updated Raws to mm1 "psllq _ShiftBpp, %%mm1 \n\t" // shift data to pos. correctly - "pand %%mm5, %%mm1 \n\t" // tqmask to use 4th active group + "pand %%mm5, %%mm1 \n\t" // mask to use 4th active group "addl $8, %%edx \n\t" "paddb %%mm1, %%mm0 \n\t" "cmpl _MMXLength, %%edx \n\t" @@ -5359,7 +5359,7 @@ png_mmx_support(void) "incl %%eax \n\t" // ...increment eax to 1. This pair is // faster than the instruction "mov eax, 1" "cpuid \n\t" // get the CPU identification info again - "andl $0x800000, %%edx \n\t" // tqmask out all bits but MMX bit (23) + "andl $0x800000, %%edx \n\t" // mask out all bits but MMX bit (23) "cmpl $0, %%edx \n\t" // 0 = MMX not supported "jz 0f \n\t" // non-zero = yes, MMX IS supported |