[Sumover-dev] [svn commit] r3886 - vic/branches/mpeg4/postproc

sumover-dev at cs.ucl.ac.uk sumover-dev at cs.ucl.ac.uk
Wed Oct 4 18:05:53 BST 2006


Author: barz
Date: Wed Oct  4 18:05:29 2006
New Revision: 3886

Added:
   vic/branches/mpeg4/postproc/asmalign.h
   vic/branches/mpeg4/postproc/swscale_altivec_template.c
   vic/branches/mpeg4/postproc/yuv2rgb_altivec.c

Log:
postproc fixed

Added: vic/branches/mpeg4/postproc/asmalign.h
==============================================================================
--- (empty file)
+++ vic/branches/mpeg4/postproc/asmalign.h	Wed Oct  4 18:05:29 2006
@@ -0,0 +1,7 @@
+#ifdef SYS_DARWIN
+#define ASMALIGN8  ".align 3\n\t"
+#define ASMALIGN16 ".align 4\n\t"
+#else
+#define ASMALIGN8  ".balign 8\n\t"
+#define ASMALIGN16 ".balign 16\n\t"
+#endif

Added: vic/branches/mpeg4/postproc/swscale_altivec_template.c
==============================================================================
--- (empty file)
+++ vic/branches/mpeg4/postproc/swscale_altivec_template.c	Wed Oct  4 18:05:29 2006
@@ -0,0 +1,541 @@
+/*
+  AltiVec-enhanced yuv2yuvX
+
+    Copyright (C) 2004 Romain Dolbeau <romain at dolbeau.org>
+    based on the equivalent C code in "postproc/swscale.c"
+
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifdef CONFIG_DARWIN
+#define AVV(x...) (x)
+#else
+#define AVV(x...) {x}
+#endif
+
+#define vzero vec_splat_s32(0)
+
+static inline void
+altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) {
+  register int i;
+  vector unsigned int altivec_vectorShiftInt19 =
+    vec_add(vec_splat_u32(10),vec_splat_u32(9));
+  if ((unsigned long)dest % 16) {
+    /* badly aligned store, we force store alignement */
+    /* and will handle load misalignement on val w/ vec_perm */
+    for (i = 0 ; (i < dstW) &&
+	   (((unsigned long)dest + i) % 16) ; i++) {
+      int t = val[i] >> 19;
+      dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
+    }
+    vector unsigned char perm1 = vec_lvsl(i << 2, val);
+    vector signed int v1 = vec_ld(i << 2, val);
+    for ( ; i < (dstW - 15); i+=16) {
+      int offset = i << 2;
+      vector signed int v2 = vec_ld(offset + 16, val);
+      vector signed int v3 = vec_ld(offset + 32, val);
+      vector signed int v4 = vec_ld(offset + 48, val);
+      vector signed int v5 = vec_ld(offset + 64, val);
+      vector signed int v12 = vec_perm(v1,v2,perm1);
+      vector signed int v23 = vec_perm(v2,v3,perm1);
+      vector signed int v34 = vec_perm(v3,v4,perm1);
+      vector signed int v45 = vec_perm(v4,v5,perm1);
+      
+      vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
+      vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
+      vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
+      vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
+      vector unsigned short vs1 = vec_packsu(vA, vB);
+      vector unsigned short vs2 = vec_packsu(vC, vD);
+      vector unsigned char vf = vec_packsu(vs1, vs2);
+      vec_st(vf, i, dest);
+      v1 = v5;
+    }
+  } else { // dest is properly aligned, great
+    for (i = 0; i < (dstW - 15); i+=16) {
+      int offset = i << 2;
+      vector signed int v1 = vec_ld(offset, val);
+      vector signed int v2 = vec_ld(offset + 16, val);
+      vector signed int v3 = vec_ld(offset + 32, val);
+      vector signed int v4 = vec_ld(offset + 48, val);
+      vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19);
+      vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19);
+      vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19);
+      vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19);
+      vector unsigned short vs1 = vec_packsu(v5, v6);
+      vector unsigned short vs2 = vec_packsu(v7, v8);
+      vector unsigned char vf = vec_packsu(vs1, vs2);
+      vec_st(vf, i, dest);
+    }
+  }
+  for ( ; i < dstW ; i++) {
+    int t = val[i] >> 19;
+    dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
+  }
+}
+
+static inline void
+yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+		      int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+		      uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+{
+  const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
+  register int i, j;
+  {
+    int __attribute__ ((aligned (16))) val[dstW];
+    
+    for (i = 0; i < (dstW -7); i+=4) {
+      vec_st(vini, i << 2, val);
+    }
+    for (; i < dstW; i++) {
+      val[i] = (1 << 18);
+    }
+    
+    for (j = 0; j < lumFilterSize; j++) {
+      vector signed short vLumFilter = vec_ld(j << 1, lumFilter);
+      vector unsigned char perm0 = vec_lvsl(j << 1, lumFilter);
+      vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
+      vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
+      
+      vector unsigned char perm = vec_lvsl(0, lumSrc[j]);
+      vector signed short l1 = vec_ld(0, lumSrc[j]);
+      
+      for (i = 0; i < (dstW - 7); i+=8) {
+	int offset = i << 2;
+	vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]);
+	
+	vector signed int v1 = vec_ld(offset, val);
+	vector signed int v2 = vec_ld(offset + 16, val);
+	
+	vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7]
+	
+	vector signed int i1 = vec_mule(vLumFilter, ls);
+	vector signed int i2 = vec_mulo(vLumFilter, ls);
+	
+	vector signed int vf1 = vec_mergeh(i1, i2);
+	vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
+	
+	vector signed int vo1 = vec_add(v1, vf1);
+	vector signed int vo2 = vec_add(v2, vf2);
+	
+	vec_st(vo1, offset, val);
+	vec_st(vo2, offset + 16, val);
+	
+	l1 = l2;
+      }
+      for ( ; i < dstW; i++) {
+	val[i] += lumSrc[j][i] * lumFilter[j];
+      }
+    }
+    altivec_packIntArrayToCharArray(val,dest,dstW);
+  }
+  if (uDest != 0) {
+    int  __attribute__ ((aligned (16))) u[chrDstW];
+    int  __attribute__ ((aligned (16))) v[chrDstW];
+
+    for (i = 0; i < (chrDstW -7); i+=4) {
+      vec_st(vini, i << 2, u);
+      vec_st(vini, i << 2, v);
+    }
+    for (; i < chrDstW; i++) {
+      u[i] = (1 << 18);
+      v[i] = (1 << 18);
+    }
+    
+    for (j = 0; j < chrFilterSize; j++) {
+      vector signed short vChrFilter = vec_ld(j << 1, chrFilter);
+      vector unsigned char perm0 = vec_lvsl(j << 1, chrFilter);
+      vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0);
+      vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter
+      
+      vector unsigned char perm = vec_lvsl(0, chrSrc[j]);
+      vector signed short l1 = vec_ld(0, chrSrc[j]);
+      vector signed short l1_V = vec_ld(2048 << 1, chrSrc[j]);
+      
+      for (i = 0; i < (chrDstW - 7); i+=8) {
+	int offset = i << 2;
+	vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]);
+	vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]);
+	
+	vector signed int v1 = vec_ld(offset, u);
+	vector signed int v2 = vec_ld(offset + 16, u);
+	vector signed int v1_V = vec_ld(offset, v);
+	vector signed int v2_V = vec_ld(offset + 16, v);
+	
+	vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7]
+	vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055]
+	
+	vector signed int i1 = vec_mule(vChrFilter, ls);
+	vector signed int i2 = vec_mulo(vChrFilter, ls);
+	vector signed int i1_V = vec_mule(vChrFilter, ls_V);
+	vector signed int i2_V = vec_mulo(vChrFilter, ls_V);
+	
+	vector signed int vf1 = vec_mergeh(i1, i2);
+	vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
+	vector signed int vf1_V = vec_mergeh(i1_V, i2_V);
+	vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
+	
+	vector signed int vo1 = vec_add(v1, vf1);
+	vector signed int vo2 = vec_add(v2, vf2);
+	vector signed int vo1_V = vec_add(v1_V, vf1_V);
+	vector signed int vo2_V = vec_add(v2_V, vf2_V);
+	
+	vec_st(vo1, offset, u);
+	vec_st(vo2, offset + 16, u);
+	vec_st(vo1_V, offset, v);
+	vec_st(vo2_V, offset + 16, v);
+	
+	l1 = l2;
+	l1_V = l2_V;
+      }
+      for ( ; i < chrDstW; i++) {
+	u[i] += chrSrc[j][i] * chrFilter[j];
+	v[i] += chrSrc[j][i + 2048] * chrFilter[j];
+      } 
+    }
+    altivec_packIntArrayToCharArray(u,uDest,chrDstW);
+    altivec_packIntArrayToCharArray(v,vDest,chrDstW);
+  }
+}
+
+static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) {
+  register int i;
+  int __attribute__ ((aligned (16))) tempo[4];
+
+  if (filterSize % 4) {
+    for(i=0; i<dstW; i++) {
+      register int j;
+      register int srcPos = filterPos[i];
+      register int val = 0;
+      for(j=0; j<filterSize; j++) {
+	val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+      }
+      dst[i] = MIN(MAX(0, val>>7), (1<<15)-1);
+    }
+  }
+  else
+  switch (filterSize) {
+  case 4:
+    {
+      for(i=0; i<dstW; i++) {
+	register int j;
+	register int srcPos = filterPos[i];
+
+	vector unsigned char src_v0 = vec_ld(srcPos, src);
+	vector unsigned char src_v1;
+	if ((((int)src + srcPos)% 16) > 12) {
+	  src_v1 = vec_ld(srcPos + 16, src);
+	}
+	vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+	vector signed short src_v = // vec_unpackh sign-extends...
+	  (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+	// now put our elements in the even slots
+	src_v = vec_mergeh(src_v, (vector signed short)vzero);
+
+	vector signed short filter_v = vec_ld(i << 3, filter);
+        // the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2)
+
+        // the neat trick : we only care for half the elements,
+        // high or low depending on (i<<3)%16 (it's 0 or 8 here),
+        // and we're going to use vec_mule, so we chose
+        // carefully how to "unpack" the elements into the even slots
+	if ((i << 3) % 16)
+	  filter_v = vec_mergel(filter_v,(vector signed short)vzero);
+	else
+	  filter_v = vec_mergeh(filter_v,(vector signed short)vzero);
+
+	vector signed int val_vEven = vec_mule(src_v, filter_v);
+	vector signed int val_s = vec_sums(val_vEven, vzero);
+	vec_st(val_s, 0, tempo);
+	dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
+      }
+    }
+    break;
+
+  case 8:
+    {
+      for(i=0; i<dstW; i++) {
+	register int srcPos = filterPos[i];
+
+	vector unsigned char src_v0 = vec_ld(srcPos, src);
+	vector unsigned char src_v1;
+	if ((((int)src + srcPos)% 16) > 8) {
+	  src_v1 = vec_ld(srcPos + 16, src);
+	}
+	vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+	vector signed short src_v = // vec_unpackh sign-extends...
+	  (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+	vector signed short filter_v = vec_ld(i << 4, filter);
+        // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
+
+	vector signed int val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
+	vector signed int val_s = vec_sums(val_v, vzero);
+	vec_st(val_s, 0, tempo);
+	dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
+      }
+    }
+    break;
+
+  case 16:
+    {
+      for(i=0; i<dstW; i++) {
+	register int srcPos = filterPos[i];
+
+	vector unsigned char src_v0 = vec_ld(srcPos, src);
+	vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
+	vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+	vector signed short src_vA = // vec_unpackh sign-extends...
+	  (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+	vector signed short src_vB = // vec_unpackh sign-extends...
+	  (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+
+	vector signed short filter_v0 = vec_ld(i << 5, filter);
+       	vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
+        // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
+
+	vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
+	vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+	vector signed int val_s = vec_sums(val_v, vzero);
+
+	vec_st(val_s, 0, tempo);
+	dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
+      }
+    }
+    break;
+    
+  default:
+    {
+      for(i=0; i<dstW; i++) {
+	register int j;
+	register int srcPos = filterPos[i];
+
+        vector signed int val_v = (vector signed int)vzero;
+	vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
+        vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter);
+
+        vector unsigned char src_v0 = vec_ld(srcPos, src);
+        vector unsigned char permS = vec_lvsl(srcPos, src);
+
+        for (j = 0 ; j < filterSize - 15; j += 16) {
+          vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
+          vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
+          
+          vector signed short src_vA = // vec_unpackh sign-extends...
+            (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+          vector signed short src_vB = // vec_unpackh sign-extends...
+            (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+          
+          vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+          vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
+          vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF);
+          vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF);
+          
+          vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
+          val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+          filter_v0R = filter_v2R;
+          src_v0 = src_v1;
+        }
+
+        if (j < (filterSize-7)) {
+          // loading src_v0 is useless, it's already done above
+          //vector unsigned char src_v0 = vec_ld(srcPos + j, src);
+          vector unsigned char src_v1;
+          if ((((int)src + srcPos)% 16) > 8) {
+            src_v1 = vec_ld(srcPos + j + 16, src);
+          }
+          vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
+          
+          vector signed short src_v = // vec_unpackh sign-extends...
+            (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+          // loading filter_v0R is useless, it's already done above
+          //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
+          vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+          vector signed short filter_v = vec_perm(filter_v0R, filter_v1R, permF);
+          
+          val_v = vec_msums(src_v, filter_v, val_v);
+        }
+
+        vector signed int val_s = vec_sums(val_v, vzero);
+          
+        vec_st(val_s, 0, tempo);
+        dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);        
+      }
+      
+    }
+  }
+}
+
+static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+     int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
+  uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
+  // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+  uint8_t *ysrc = src[0];
+  uint8_t *usrc = src[1];
+  uint8_t *vsrc = src[2];
+  const int width = c->srcW;
+  const int height = srcSliceH;
+  const int lumStride = srcStride[0];
+  const int chromStride = srcStride[1];
+  const int dstStride = dstStride_a[0];
+  const vector unsigned char yperm = vec_lvsl(0, ysrc);
+  const int vertLumPerChroma = 2;  
+  register unsigned int y;
+
+  if(width&15){
+    yv12toyuy2( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
+    return srcSliceH;
+  }
+
+  /* this code assume:
+
+  1) dst is 16 bytes-aligned
+  2) dstStride is a multiple of 16
+  3) width is a multiple of 16
+  4) lum&chrom stride are multiple of 8
+  */
+  
+  for(y=0; y<height; y++)
+    {
+      int i;
+      for (i = 0; i < width - 31; i+= 32) {
+	const unsigned int j = i >> 1;
+	vector unsigned char v_yA = vec_ld(i, ysrc);
+	vector unsigned char v_yB = vec_ld(i + 16, ysrc);
+	vector unsigned char v_yC = vec_ld(i + 32, ysrc);
+	vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
+	vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
+	vector unsigned char v_uA = vec_ld(j, usrc);
+	vector unsigned char v_uB = vec_ld(j + 16, usrc);
+	vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
+	vector unsigned char v_vA = vec_ld(j, vsrc);
+	vector unsigned char v_vB = vec_ld(j + 16, vsrc);
+	vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
+	vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+	vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
+	vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
+	vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
+	vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
+	vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
+	vec_st(v_yuy2_0, (i << 1), dst);
+	vec_st(v_yuy2_1, (i << 1) + 16, dst);
+	vec_st(v_yuy2_2, (i << 1) + 32, dst);
+	vec_st(v_yuy2_3, (i << 1) + 48, dst);
+      }
+      if (i < width) {
+	const unsigned int j = i >> 1;
+	vector unsigned char v_y1 = vec_ld(i, ysrc);
+	vector unsigned char v_u = vec_ld(j, usrc);
+	vector unsigned char v_v = vec_ld(j, vsrc);
+	vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+	vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
+	vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
+	vec_st(v_yuy2_0, (i << 1), dst);
+	vec_st(v_yuy2_1, (i << 1) + 16, dst);
+      }
+      if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+	{
+	  usrc += chromStride;
+	  vsrc += chromStride;
+	}
+      ysrc += lumStride;
+      dst += dstStride;
+    }
+  
+  return srcSliceH;
+}
+
+static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+     int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
+  uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
+  // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+  uint8_t *ysrc = src[0];
+  uint8_t *usrc = src[1];
+  uint8_t *vsrc = src[2];
+  const int width = c->srcW;
+  const int height = srcSliceH;
+  const int lumStride = srcStride[0];
+  const int chromStride = srcStride[1];
+  const int dstStride = dstStride_a[0];
+  const int vertLumPerChroma = 2;
+  const vector unsigned char yperm = vec_lvsl(0, ysrc);
+  register unsigned int y;
+
+  if(width&15){
+    yv12touyvy( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
+    return srcSliceH;
+  }
+
+  /* this code assume:
+
+  1) dst is 16 bytes-aligned
+  2) dstStride is a multiple of 16
+  3) width is a multiple of 16
+  4) lum&chrom stride are multiple of 8
+  */
+  
+  for(y=0; y<height; y++)
+    {
+      int i;
+      for (i = 0; i < width - 31; i+= 32) {
+	const unsigned int j = i >> 1;
+	vector unsigned char v_yA = vec_ld(i, ysrc);
+	vector unsigned char v_yB = vec_ld(i + 16, ysrc);
+	vector unsigned char v_yC = vec_ld(i + 32, ysrc);
+	vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
+	vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
+	vector unsigned char v_uA = vec_ld(j, usrc);
+	vector unsigned char v_uB = vec_ld(j + 16, usrc);
+	vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
+	vector unsigned char v_vA = vec_ld(j, vsrc);
+	vector unsigned char v_vB = vec_ld(j + 16, vsrc);
+	vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
+	vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+	vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
+	vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
+	vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
+	vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2);
+	vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2);
+	vec_st(v_uyvy_0, (i << 1), dst);
+	vec_st(v_uyvy_1, (i << 1) + 16, dst);
+	vec_st(v_uyvy_2, (i << 1) + 32, dst);
+	vec_st(v_uyvy_3, (i << 1) + 48, dst);
+      }
+      if (i < width) {
+	const unsigned int j = i >> 1;
+	vector unsigned char v_y1 = vec_ld(i, ysrc);
+	vector unsigned char v_u = vec_ld(j, usrc);
+	vector unsigned char v_v = vec_ld(j, vsrc);
+	vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+	vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
+	vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
+	vec_st(v_uyvy_0, (i << 1), dst);
+	vec_st(v_uyvy_1, (i << 1) + 16, dst);
+      }
+      if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+	{
+	  usrc += chromStride;
+	  vsrc += chromStride;
+	}
+      ysrc += lumStride;
+      dst += dstStride;
+    }
+  return srcSliceH;
+}

Added: vic/branches/mpeg4/postproc/yuv2rgb_altivec.c
==============================================================================
--- (empty file)
+++ vic/branches/mpeg4/postproc/yuv2rgb_altivec.c	Wed Oct  4 18:05:29 2006
@@ -0,0 +1,953 @@
+/*
+  marc.hoffman at analog.com    March 8, 2004
+
+  Altivec Acceleration for Color Space Conversion revision 0.2
+
+  convert I420 YV12 to RGB in various formats,
+    it rejects images that are not in 420 formats
+    it rejects images that don't have widths of multiples of 16
+    it rejects images that don't have heights of multiples of 2
+  reject defers to C simulation codes.
+
+  lots of optimizations to be done here
+
+  1. need to fix saturation code, I just couldn't get it to fly with packs and adds.
+     so we currently use max min to clip
+
+  2. the inefficient use of chroma loading needs a bit of brushing up
+
+  3. analysis of pipeline stalls needs to be done, use shark to identify pipeline stalls
+
+
+  MODIFIED to calculate coeffs from currently selected color space.
+  MODIFIED core to be a macro which you spec the output format.
+  ADDED UYVY conversion which is never called due to some thing in SWSCALE.
+  CORRECTED algorithim selection to be strict on input formats.
+  ADDED runtime detection of altivec.
+
+  ADDED altivec_yuv2packedX vertical scl + RGB converter
+
+  March 27,2004
+  PERFORMANCE ANALYSIS
+
+  The C version use 25% of the processor or ~250Mips for D1 video rawvideo used as test
+  The ALTIVEC version uses 10% of the processor or ~100Mips for D1 video same sequence
+
+  720*480*30  ~10MPS
+
+  so we have roughly 10clocks per pixel this is too high something has to be wrong.
+
+  OPTIMIZED clip codes to utilize vec_max and vec_packs removing the need for vec_min.
+
+  OPTIMIZED DST OUTPUT cache/dma controls. we are pretty much
+  guaranteed to have the input video frame it was just decompressed so
+  it probably resides in L1 caches.  However we are creating the
+  output video stream this needs to use the DSTST instruction to
+  optimize for the cache.  We couple this with the fact that we are
+  not going to be visiting the input buffer again so we mark it Least
+  Recently Used.  This shaves 25% of the processor cycles off.
+
+  Now MEMCPY is the largest mips consumer in the system, probably due
+  to the inefficient X11 stuff.
+
+  GL libraries seem to be very slow on this machine 1.33Ghz PB running
+  Jaguar, this is not the case for my 1Ghz PB.  I thought it might be
+  a versioning issues, however i have libGL.1.2.dylib for both
+  machines. ((We need to figure this out now))
+
+  GL2 libraries work now with patch for RGB32
+
+  NOTE quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor
+
+  Integrated luma prescaling adjustment for saturation/contrast/brightness adjustment. 
+
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <assert.h>
+#include "config.h"
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "mangle.h"
+#include "img_format.h" //FIXME try to reduce dependency of such stuff
+
+#undef PROFILE_THE_BEAST
+#undef INC_SCALING
+
+typedef unsigned char ubyte;
+typedef signed char   sbyte;
+
+
+/* RGB interleaver, 16 planar pels 8-bit samples per channel in
+   homogeneous vector registers x0,x1,x2 are interleaved with the
+   following technique:
+
+      o0 = vec_mergeh (x0,x1);
+      o1 = vec_perm (o0, x2, perm_rgb_0);
+      o2 = vec_perm (o0, x2, perm_rgb_1);
+      o3 = vec_mergel (x0,x1);
+      o4 = vec_perm (o3,o2,perm_rgb_2);
+      o5 = vec_perm (o3,o2,perm_rgb_3);
+
+  perm_rgb_0:   o0(RG).h v1(B) --> o1*
+              0   1  2   3   4
+             rgbr|gbrg|brgb|rgbr
+             0010 0100 1001 0010
+             0102 3145 2673 894A
+
+  perm_rgb_1:   o0(RG).h v1(B) --> o2
+              0   1  2   3   4
+             gbrg|brgb|bbbb|bbbb
+             0100 1001 1111 1111
+             B5CD 6EF7 89AB CDEF
+
+  perm_rgb_2:   o3(RG).l o2(rgbB.l) --> o4*
+              0   1  2   3   4
+             gbrg|brgb|rgbr|gbrg
+             1111 1111 0010 0100
+             89AB CDEF 0182 3945
+
+  perm_rgb_2:   o3(RG).l o2(rgbB.l) ---> o5*
+              0   1  2   3   4
+             brgb|rgbr|gbrg|brgb
+             1001 0010 0100 1001
+             a67b 89cA BdCD eEFf
+
+*/
+static
+const vector unsigned char
+  perm_rgb_0 = (const vector unsigned char)AVV(0x00,0x01,0x10,0x02,0x03,0x11,0x04,0x05,
+				      0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a),
+  perm_rgb_1 = (const vector unsigned char)AVV(0x0b,0x15,0x0c,0x0d,0x16,0x0e,0x0f,0x17,
+				      0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f),
+  perm_rgb_2 = (const vector unsigned char)AVV(0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
+				      0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05),
+  perm_rgb_3 = (const vector unsigned char)AVV(0x1a,0x06,0x07,0x1b,0x08,0x09,0x1c,0x0a,
+				      0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f);
+
+#define vec_merge3(x2,x1,x0,y0,y1,y2)    \
+do {					 \
+  typeof(x0) o0,o2,o3;			 \
+      o0 = vec_mergeh (x0,x1);		 \
+      y0 = vec_perm (o0, x2, perm_rgb_0);\
+      o2 = vec_perm (o0, x2, perm_rgb_1);\
+      o3 = vec_mergel (x0,x1);		 \
+      y1 = vec_perm (o3,o2,perm_rgb_2);	 \
+      y2 = vec_perm (o3,o2,perm_rgb_3);	 \
+} while(0)
+
+#define vec_mstbgr24(x0,x1,x2,ptr)        \
+do {					 \
+  typeof(x0) _0,_1,_2;			 \
+  vec_merge3 (x0,x1,x2,_0,_1,_2);	 \
+  vec_st (_0, 0, ptr++);		 \
+  vec_st (_1, 0, ptr++);		 \
+  vec_st (_2, 0, ptr++);		 \
+}  while (0);
+
+#define vec_mstrgb24(x0,x1,x2,ptr)       \
+do {					 \
+  typeof(x0) _0,_1,_2;			 \
+  vec_merge3 (x2,x1,x0,_0,_1,_2);	 \
+  vec_st (_0, 0, ptr++);		 \
+  vec_st (_1, 0, ptr++);		 \
+  vec_st (_2, 0, ptr++);		 \
+}  while (0);
+
+/* pack the pixels in rgb0 format
+   msb R
+   lsb 0
+*/
+#define vec_mstrgb32(T,x0,x1,x2,x3,ptr)						       \
+do {										       \
+  T _0,_1,_2,_3;								       \
+  _0 = vec_mergeh (x0,x1);							       \
+  _1 = vec_mergeh (x2,x3);					    	  	       \
+  _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1);            \
+  _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1);            \
+  vec_st (_2, 0*16, (T *)ptr);						               \
+  vec_st (_3, 1*16, (T *)ptr);						       	       \
+  _0 = vec_mergel (x0,x1);							       \
+  _1 = vec_mergel (x2,x3);						       	       \
+  _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); 	       \
+  _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); 	       \
+  vec_st (_2, 2*16, (T *)ptr);						       	       \
+  vec_st (_3, 3*16, (T *)ptr);						       	       \
+  ptr += 4;									       \
+}  while (0);
+
+/*
+
+  | 1     0       1.4021   | | Y |
+  | 1    -0.3441 -0.7142   |x| Cb|
+  | 1     1.7718  0	   | | Cr|
+
+
+  Y:      [-128 127]
+  Cb/Cr : [-128 127]
+
+  typical yuv conversion work on Y: 0-255 this version has been optimized for jpeg decode.
+
+*/
+
+
+
+
+#define vec_unh(x) \
+  (vector signed short) \
+    vec_perm(x,(typeof(x))AVV(0),\
+             (vector unsigned char)AVV(0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\
+                                    0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07))
+#define vec_unl(x) \
+  (vector signed short) \
+    vec_perm(x,(typeof(x))AVV(0),\
+             (vector unsigned char)AVV(0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\
+                                    0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F))
+
+#define vec_clip_s16(x) \
+  vec_max (vec_min (x, (vector signed short)AVV(235,235,235,235,235,235,235,235)),\
+                       (vector signed short)AVV(16, 16, 16, 16, 16, 16, 16, 16 ))
+
+#define vec_packclp(x,y) \
+  (vector unsigned char)vec_packs \
+      ((vector unsigned short)vec_max (x,(vector signed short) AVV(0)), \
+       (vector unsigned short)vec_max (y,(vector signed short) AVV(0)))
+
+//#define out_pixels(a,b,c,ptr) vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),a,a,a,ptr)
+
+
+static inline void cvtyuvtoRGB (SwsContext *c,
+			   vector signed short Y, vector signed short U, vector signed short V,
+			   vector signed short *R, vector signed short *G, vector signed short *B)
+{
+  vector signed   short vx,ux,uvx;
+
+  Y = vec_mradds (Y, c->CY, c->OY);
+  U  = vec_sub (U,(vector signed short)
+  			vec_splat((vector signed short)AVV(128),0));
+  V  = vec_sub (V,(vector signed short)
+  			vec_splat((vector signed short)AVV(128),0));
+
+  //   ux  = (CBU*(u<<c->CSHIFT)+0x4000)>>15;
+  ux = vec_sl (U, c->CSHIFT);
+  *B = vec_mradds (ux, c->CBU, Y);
+
+  // vx  = (CRV*(v<<c->CSHIFT)+0x4000)>>15;
+  vx = vec_sl (V, c->CSHIFT);
+  *R = vec_mradds (vx, c->CRV, Y);
+
+  // uvx = ((CGU*u) + (CGV*v))>>15;
+  uvx = vec_mradds (U, c->CGU, Y);
+  *G = vec_mradds (V, c->CGV, uvx);
+}
+
+
+/*
+  ------------------------------------------------------------------------------
+  CS converters
+  ------------------------------------------------------------------------------
+*/
+
+
+#define DEFCSP420_CVT(name,out_pixels)                                     \
+static int altivec_##name (SwsContext *c,                                  \
+				unsigned char **in, int *instrides,	   \
+				int srcSliceY,	int srcSliceH,		   \
+				unsigned char **oplanes, int *outstrides)  \
+{									   \
+  int w = c->srcW;							   \
+  int h = srcSliceH;							   \
+  int i,j;								   \
+  int instrides_scl[3];							   \
+  vector unsigned char y0,y1;						   \
+									   \
+  vector signed char  u,v;						   \
+									   \
+  vector signed short Y0,Y1,Y2,Y3;					   \
+  vector signed short U,V;						   \
+  vector signed short vx,ux,uvx;					   \
+  vector signed short vx0,ux0,uvx0;					   \
+  vector signed short vx1,ux1,uvx1;					   \
+  vector signed short R0,G0,B0;						   \
+  vector signed short R1,G1,B1;						   \
+  vector unsigned char R,G,B;						   \
+									   \
+  vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP;			   \
+  vector unsigned char align_perm;					   \
+									   \
+  vector signed short 							   \
+    lCY  = c->CY,							   \
+    lOY  = c->OY,							   \
+    lCRV = c->CRV,							   \
+    lCBU = c->CBU,							   \
+    lCGU = c->CGU,							   \
+    lCGV = c->CGV;							   \
+									   \
+  vector unsigned short lCSHIFT = c->CSHIFT;				   \
+									   \
+  ubyte *y1i   = in[0];							   \
+  ubyte *y2i   = in[0]+instrides[0];					   \
+  ubyte *ui    = in[1];							   \
+  ubyte *vi    = in[2];							   \
+									   \
+  vector unsigned char *oute						   \
+    = (vector unsigned char *)						   \
+        (oplanes[0]+srcSliceY*outstrides[0]);				   \
+  vector unsigned char *outo						   \
+    = (vector unsigned char *)						   \
+        (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);		   \
+									   \
+									   \
+  instrides_scl[0] = instrides[0]*2-w;  /* the loop moves y{1,2}i by w */  \
+  instrides_scl[1] = instrides[1]-w/2;  /* the loop moves ui by w/2 */	   \
+  instrides_scl[2] = instrides[2]-w/2;  /* the loop moves vi by w/2 */	   \
+									   \
+									   \
+  for (i=0;i<h/2;i++) {							   \
+    vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);                 \
+    vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);                 \
+									   \
+    for (j=0;j<w/16;j++) {						   \
+									   \
+      y1ivP = (vector unsigned char *)y1i;				   \
+      y2ivP = (vector unsigned char *)y2i;				   \
+      uivP = (vector unsigned char *)ui;				   \
+      vivP = (vector unsigned char *)vi;				   \
+									   \
+      align_perm = vec_lvsl (0, y1i);					   \
+      y0 = (vector unsigned char)vec_perm (y1ivP[0], y1ivP[1], align_perm);\
+									   \
+      align_perm = vec_lvsl (0, y2i);					   \
+      y1 = (vector unsigned char)vec_perm (y2ivP[0], y2ivP[1], align_perm);\
+									   \
+      align_perm = vec_lvsl (0, ui);					   \
+      u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm);	   \
+									   \
+      align_perm = vec_lvsl (0, vi);					   \
+      v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm);	   \
+									   \
+      u  = (vector signed char)						   \
+     		vec_sub (u,(vector signed char)                            \
+				vec_splat((vector signed char)AVV(128),0));\
+      v  = (vector signed char)						   \
+     		vec_sub (v,(vector signed char)				   \
+				vec_splat((vector signed char)AVV(128),0));\
+									   \
+      U  = vec_unpackh (u);						   \
+      V  = vec_unpackh (v);						   \
+									   \
+									   \
+	Y0 = vec_unh (y0);						   \
+	Y1 = vec_unl (y0);						   \
+	Y2 = vec_unh (y1);						   \
+	Y3 = vec_unl (y1);						   \
+									   \
+        Y0 = vec_mradds (Y0, lCY, lOY);					   \
+        Y1 = vec_mradds (Y1, lCY, lOY);					   \
+        Y2 = vec_mradds (Y2, lCY, lOY);					   \
+        Y3 = vec_mradds (Y3, lCY, lOY);					   \
+									   \
+	/*   ux  = (CBU*(u<<CSHIFT)+0x4000)>>15 */			   \
+	ux = vec_sl (U, lCSHIFT);					   \
+	ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0));		   \
+	ux0  = vec_mergeh (ux,ux);					   \
+	ux1  = vec_mergel (ux,ux);					   \
+									   \
+	/* vx  = (CRV*(v<<CSHIFT)+0x4000)>>15;	*/			   \
+	vx = vec_sl (V, lCSHIFT);					   \
+	vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0));		   \
+	vx0  = vec_mergeh (vx,vx);					   \
+	vx1  = vec_mergel (vx,vx);					   \
+									   \
+	/* uvx = ((CGU*u) + (CGV*v))>>15 */				   \
+	uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0));		   \
+	uvx = vec_mradds (V, lCGV, uvx);				   \
+	uvx0 = vec_mergeh (uvx,uvx);					   \
+	uvx1 = vec_mergel (uvx,uvx);					   \
+									   \
+	R0 = vec_add (Y0,vx0);						   \
+	G0 = vec_add (Y0,uvx0);						   \
+	B0 = vec_add (Y0,ux0);						   \
+	R1 = vec_add (Y1,vx1);						   \
+	G1 = vec_add (Y1,uvx1);						   \
+	B1 = vec_add (Y1,ux1);						   \
+									   \
+	R  = vec_packclp (R0,R1);					   \
+	G  = vec_packclp (G0,G1);					   \
+	B  = vec_packclp (B0,B1);					   \
+									   \
+	out_pixels(R,G,B,oute);						   \
+									   \
+	R0 = vec_add (Y2,vx0);						   \
+	G0 = vec_add (Y2,uvx0);						   \
+	B0 = vec_add (Y2,ux0);						   \
+	R1 = vec_add (Y3,vx1);						   \
+	G1 = vec_add (Y3,uvx1);						   \
+	B1 = vec_add (Y3,ux1);						   \
+	R  = vec_packclp (R0,R1);					   \
+	G  = vec_packclp (G0,G1);					   \
+	B  = vec_packclp (B0,B1);					   \
+									   \
+									   \
+	out_pixels(R,G,B,outo);						   \
+									   \
+      y1i  += 16;							   \
+      y2i  += 16;							   \
+      ui   += 8;							   \
+      vi   += 8;							   \
+									   \
+    }									   \
+									   \
+    outo += (outstrides[0])>>4;					           \
+    oute += (outstrides[0])>>4;					           \
+									   \
+    ui    += instrides_scl[1];						   \
+    vi    += instrides_scl[2];						   \
+    y1i   += instrides_scl[0];						   \
+    y2i   += instrides_scl[0];						   \
+  }									   \
+  return srcSliceH;							   \
+}
+
+
+#define out_abgr(a,b,c,ptr)  vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),c,b,a,ptr)
+#define out_bgra(a,b,c,ptr)  vec_mstrgb32(typeof(a),c,b,a,((typeof (a))AVV(0)),ptr)
+#define out_rgba(a,b,c,ptr)  vec_mstrgb32(typeof(a),a,b,c,((typeof (a))AVV(0)),ptr)
+#define out_argb(a,b,c,ptr)  vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),a,b,c,ptr)
+#define out_rgb24(a,b,c,ptr) vec_mstrgb24(a,b,c,ptr)
+#define out_bgr24(a,b,c,ptr) vec_mstbgr24(a,b,c,ptr)
+
+DEFCSP420_CVT (yuv2_abgr, out_abgr)
+#if 1
+DEFCSP420_CVT (yuv2_bgra, out_bgra)
+#else
+static int altivec_yuv2_bgra32 (SwsContext *c,                                  
+				unsigned char **in, int *instrides,	   
+				int srcSliceY,	int srcSliceH,		   
+				unsigned char **oplanes, int *outstrides)  
+{									   
+  int w = c->srcW;							   
+  int h = srcSliceH;							   
+  int i,j;								   
+  int instrides_scl[3];							   
+  vector unsigned char y0,y1;						   
+									   
+  vector signed char  u,v;						   
+									   
+  vector signed short Y0,Y1,Y2,Y3;					   
+  vector signed short U,V;						   
+  vector signed short vx,ux,uvx;					   
+  vector signed short vx0,ux0,uvx0;					   
+  vector signed short vx1,ux1,uvx1;					   
+  vector signed short R0,G0,B0;						   
+  vector signed short R1,G1,B1;						   
+  vector unsigned char R,G,B;						   
+									   
+  vector unsigned char *uivP, *vivP;			   		   
+  vector unsigned char align_perm;					   
+									   
+  vector signed short 							   
+    lCY  = c->CY,							   
+    lOY  = c->OY,							   
+    lCRV = c->CRV,							   
+    lCBU = c->CBU,							   
+    lCGU = c->CGU,							   
+    lCGV = c->CGV;							   
+									   
+  vector unsigned short lCSHIFT = c->CSHIFT;				   
+									   
+  ubyte *y1i   = in[0];							   
+  ubyte *y2i   = in[0]+w;						   
+  ubyte *ui    = in[1];							   
+  ubyte *vi    = in[2];							   
+									   
+  vector unsigned char *oute						   
+    = (vector unsigned char *)						   
+        (oplanes[0]+srcSliceY*outstrides[0]);				   
+  vector unsigned char *outo						   
+    = (vector unsigned char *)						   
+        (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);		   
+									   
+									   
+  instrides_scl[0] = instrides[0];					   
+  instrides_scl[1] = instrides[1]-w/2;  /* the loop moves ui by w/2 */	   
+  instrides_scl[2] = instrides[2]-w/2;  /* the loop moves vi by w/2 */	   
+									   
+									   
+  for (i=0;i<h/2;i++) {							   
+    vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);                 
+    vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);                 
+									   
+    for (j=0;j<w/16;j++) {						   
+									   
+      y0 = vec_ldl (0,y1i);						   
+      y1 = vec_ldl (0,y2i);						   
+      uivP = (vector unsigned char *)ui;				   
+      vivP = (vector unsigned char *)vi;				   
+									   
+      align_perm = vec_lvsl (0, ui);					   
+      u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm);	   
+									   
+      align_perm = vec_lvsl (0, vi);					   
+      v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm);
+      u  = (vector signed char)
+     		vec_sub (u,(vector signed char)
+				vec_splat((vector signed char)AVV(128),0));
+      
+      v  = (vector signed char)
+      		vec_sub (v, (vector signed char)
+				vec_splat((vector signed char)AVV(128),0));
+      
+      U  = vec_unpackh (u);						   
+      V  = vec_unpackh (v);						   
+									   
+									   
+	Y0 = vec_unh (y0);						   
+	Y1 = vec_unl (y0);						   
+	Y2 = vec_unh (y1);						   
+	Y3 = vec_unl (y1);						   
+									   
+        Y0 = vec_mradds (Y0, lCY, lOY);					   
+        Y1 = vec_mradds (Y1, lCY, lOY);					   
+        Y2 = vec_mradds (Y2, lCY, lOY);					   
+        Y3 = vec_mradds (Y3, lCY, lOY);					   
+									   
+	/*   ux  = (CBU*(u<<CSHIFT)+0x4000)>>15 */			   
+	ux = vec_sl (U, lCSHIFT);					   
+	ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0));
+	ux0  = vec_mergeh (ux,ux);					   
+	ux1  = vec_mergel (ux,ux);					   
+									   
+	/* vx  = (CRV*(v<<CSHIFT)+0x4000)>>15;	*/			   
+	vx = vec_sl (V, lCSHIFT);					   
+	vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0));
+	vx0  = vec_mergeh (vx,vx);
+	vx1  = vec_mergel (vx,vx);
+	/* uvx = ((CGU*u) + (CGV*v))>>15 */
+	uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0));
+	uvx = vec_mradds (V, lCGV, uvx);
+	uvx0 = vec_mergeh (uvx,uvx);
+	uvx1 = vec_mergel (uvx,uvx);
+	R0 = vec_add (Y0,vx0);
+	G0 = vec_add (Y0,uvx0);
+	B0 = vec_add (Y0,ux0);
+	R1 = vec_add (Y1,vx1);
+	G1 = vec_add (Y1,uvx1);
+	B1 = vec_add (Y1,ux1);
+	R  = vec_packclp (R0,R1);
+	G  = vec_packclp (G0,G1);
+	B  = vec_packclp (B0,B1);
+	
+	out_argb(R,G,B,oute);
+	R0 = vec_add (Y2,vx0);
+	G0 = vec_add (Y2,uvx0);
+	B0 = vec_add (Y2,ux0);
+	R1 = vec_add (Y3,vx1);
+	G1 = vec_add (Y3,uvx1);
+	B1 = vec_add (Y3,ux1);
+	R  = vec_packclp (R0,R1);
+	G  = vec_packclp (G0,G1);
+	B  = vec_packclp (B0,B1);
+	
+	out_argb(R,G,B,outo);
+	y1i  += 16;							   
+	y2i  += 16;							   
+	ui   += 8;
+	vi   += 8;							   
+									   
+    }									   
+									   
+    outo += (outstrides[0])>>4;					           
+    oute += (outstrides[0])>>4;					           
+									   
+    ui    += instrides_scl[1];						   
+    vi    += instrides_scl[2];						   
+    y1i   += instrides_scl[0];						   
+    y2i   += instrides_scl[0];						   
+  }									   
+  return srcSliceH;							   
+}
+
+#endif
+
+
+DEFCSP420_CVT (yuv2_rgba, out_rgba)
+DEFCSP420_CVT (yuv2_argb, out_argb)
+DEFCSP420_CVT (yuv2_rgb24,  out_rgb24)
+DEFCSP420_CVT (yuv2_bgr24,  out_bgr24)
+
+
+// uyvy|uyvy|uyvy|uyvy
+// 0123 4567 89ab cdef
+static
+const vector unsigned char
+  demux_u = (const vector unsigned char)AVV(0x10,0x00,0x10,0x00,
+				   0x10,0x04,0x10,0x04,
+				   0x10,0x08,0x10,0x08,
+				   0x10,0x0c,0x10,0x0c),
+  demux_v = (const vector unsigned char)AVV(0x10,0x02,0x10,0x02,
+				   0x10,0x06,0x10,0x06,
+				   0x10,0x0A,0x10,0x0A,
+				   0x10,0x0E,0x10,0x0E),
+  demux_y = (const vector unsigned char)AVV(0x10,0x01,0x10,0x03,
+				   0x10,0x05,0x10,0x07,
+				   0x10,0x09,0x10,0x0B,
+				   0x10,0x0D,0x10,0x0F);
+
+/*
+  this is so I can play live CCIR raw video
+*/
+static int altivec_uyvy_rgb32 (SwsContext *c,
+			       unsigned char **in, int *instrides,
+			       int srcSliceY,	int srcSliceH,
+			       unsigned char **oplanes, int *outstrides)
+{
+  int w = c->srcW;
+  int h = srcSliceH;
+  int i,j;
+  vector unsigned char uyvy;
+  vector signed   short Y,U,V;
+  vector signed   short vx,ux,uvx;
+  vector signed   short R0,G0,B0,R1,G1,B1;
+  vector unsigned char  R,G,B;
+  vector unsigned char *out;
+  ubyte *img;
+
+  img = in[0];
+  out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
+
+  for (i=0;i<h;i++) {
+    for (j=0;j<w/16;j++) {
+      uyvy = vec_ld (0, img);
+      U = (vector signed short)
+	vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
+
+      V = (vector signed short)
+	vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
+
+      Y = (vector signed short)
+	vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
+
+      cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
+
+      uyvy = vec_ld (16, img);
+      U = (vector signed short)
+	vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
+
+      V = (vector signed short)
+	vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
+
+      Y = (vector signed short)
+	vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
+
+      cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
+
+      R  = vec_packclp (R0,R1);
+      G  = vec_packclp (G0,G1);
+      B  = vec_packclp (B0,B1);
+
+      //      vec_mstbgr24 (R,G,B, out);
+      out_rgba (R,G,B,out);
+
+      img += 32;
+    }
+  }
+  return srcSliceH;
+}
+
+
+
+/* Ok currently the acceleration routine only supports
+   inputs of widths a multiple of 16
+   and heights a multiple 2
+
+   So we just fall back to the C codes for this.
+*/
+SwsFunc yuv2rgb_init_altivec (SwsContext *c)
+{
+  if (!(c->flags & SWS_CPU_CAPS_ALTIVEC))    
+    return NULL;
+
+  /*
+    and this seems not to matter too much I tried a bunch of 
+    videos with abnormal widths and mplayer crashes else where.
+    mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv 
+    boom with X11 bad match.
+    
+  */
+  if ((c->srcW & 0xf) != 0)    return NULL;
+
+  switch (c->srcFormat) {
+  case IMGFMT_YVU9:
+  case IMGFMT_IF09:
+  case IMGFMT_YV12:
+  case IMGFMT_I420:
+  case IMGFMT_IYUV:
+  case IMGFMT_CLPL:
+  case IMGFMT_Y800:
+  case IMGFMT_Y8:
+  case IMGFMT_NV12:
+  case IMGFMT_NV21:
+    if ((c->srcH & 0x1) != 0)
+      return NULL;
+
+    switch(c->dstFormat){
+    case IMGFMT_RGB24:
+      MSG_WARN("ALTIVEC: Color Space RGB24\n");
+      return altivec_yuv2_rgb24;
+    case IMGFMT_BGR24:
+      MSG_WARN("ALTIVEC: Color Space BGR24\n");
+      return altivec_yuv2_bgr24;
+    case IMGFMT_ARGB:
+      MSG_WARN("ALTIVEC: Color Space ARGB\n");
+      return altivec_yuv2_argb;
+    case IMGFMT_ABGR:
+      MSG_WARN("ALTIVEC: Color Space ABGR\n");
+      return altivec_yuv2_abgr;
+    case IMGFMT_RGBA:
+      MSG_WARN("ALTIVEC: Color Space RGBA\n");
+      return altivec_yuv2_rgba;
+    case IMGFMT_BGRA:
+      MSG_WARN("ALTIVEC: Color Space BGRA\n");
+      return altivec_yuv2_bgra;
+    default: return NULL;
+    }
+    break;
+
+  case IMGFMT_UYVY:
+    switch(c->dstFormat){
+    case IMGFMT_RGB32:
+      MSG_WARN("ALTIVEC: Color Space UYVY -> RGB32\n");
+      return altivec_uyvy_rgb32;
+    default: return NULL;
+    }
+    break;
+
+  }
+  return NULL;
+}
+
+static uint16_t roundToInt16(int64_t f){
+	int r= (f + (1<<15))>>16;
+	     if(r<-0x7FFF) return 0x8000;
+	else if(r> 0x7FFF) return 0x7FFF;
+	else               return r;
+}
+
+void yuv2rgb_altivec_init_tables (SwsContext *c, const int inv_table[4],int brightness,int contrast, int saturation)
+{
+  union {
+  	signed short tmp[8] __attribute__ ((aligned(16)));
+	vector signed short vec;
+	} buf;
+
+  buf.tmp[0] =  ( (0xffffLL) * contrast>>8 )>>9;			//cy
+  buf.tmp[1] =  -256*brightness;					//oy
+  buf.tmp[2] =  (inv_table[0]>>3) *(contrast>>16)*(saturation>>16);	//crv
+  buf.tmp[3] =  (inv_table[1]>>3) *(contrast>>16)*(saturation>>16);	//cbu
+  buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16));	//cgu
+  buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16));	//cgv
+
+
+  c->CSHIFT = (vector unsigned short)vec_splat_u16(2);
+  c->CY  = vec_splat ((vector signed short)buf.vec, 0);
+  c->OY  = vec_splat ((vector signed short)buf.vec, 1);
+  c->CRV  = vec_splat ((vector signed short)buf.vec, 2);
+  c->CBU  = vec_splat ((vector signed short)buf.vec, 3);
+  c->CGU  = vec_splat ((vector signed short)buf.vec, 4);
+  c->CGV  = vec_splat ((vector signed short)buf.vec, 5);
+#if 0
+{
+int i;
+char *v[6]={"cy","oy","crv","cbu","cgu","cgv"};
+for (i=0; i<6;i++)
+  printf("%s %d ", v[i],buf.tmp[i] );
+  printf("\n");
+}
+#endif
+ return;
+}
+
+
+void
+altivec_yuv2packedX (SwsContext *c,
+		       int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+		       int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+		       uint8_t *dest, int dstW, int dstY)
+{
+  int i,j;
+  short *f;
+  vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
+  vector signed short R0,G0,B0,R1,G1,B1;
+
+  vector unsigned char R,G,B,pels[3];
+  vector unsigned char *out,*nout;
+
+  vector signed short   RND = vec_splat_s16(1<<3);
+  vector unsigned short SCL = vec_splat_u16(4);
+  unsigned long scratch[16] __attribute__ ((aligned (16)));
+
+  vector signed short *YCoeffs, *CCoeffs;
+
+  YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
+  CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
+
+  out = (vector unsigned char *)dest;
+
+  for(i=0; i<dstW; i+=16){
+    Y0 = RND;
+    Y1 = RND;
+    /* extract 16 coeffs from lumSrc */
+    for(j=0; j<lumFilterSize; j++) {
+      X0 = vec_ld (0,  &lumSrc[j][i]);
+      X1 = vec_ld (16, &lumSrc[j][i]);
+      Y0 = vec_mradds (X0, YCoeffs[j], Y0);
+      Y1 = vec_mradds (X1, YCoeffs[j], Y1);
+    }
+
+    U = RND;
+    V = RND;
+    /* extract 8 coeffs from U,V */
+    for(j=0; j<chrFilterSize; j++) {
+      X  = vec_ld (0, &chrSrc[j][i/2]);
+      U  = vec_mradds (X, CCoeffs[j], U);
+      X  = vec_ld (0, &chrSrc[j][i/2+2048]);
+      V  = vec_mradds (X, CCoeffs[j], V);
+    }
+
+    /* scale and clip signals */
+    Y0 = vec_sra (Y0, SCL);
+    Y1 = vec_sra (Y1, SCL);
+    U  = vec_sra (U,  SCL);
+    V  = vec_sra (V,  SCL);
+
+    Y0 = vec_clip_s16 (Y0);
+    Y1 = vec_clip_s16 (Y1);
+    U  = vec_clip_s16 (U);
+    V  = vec_clip_s16 (V);
+
+    /* now we have
+      Y0= y0 y1 y2 y3 y4 y5 y6 y7     Y1= y8 y9 y10 y11 y12 y13 y14 y15
+      U= u0 u1 u2 u3 u4 u5 u6 u7      V= v0 v1 v2 v3 v4 v5 v6 v7
+
+      Y0= y0 y1 y2 y3 y4 y5 y6 y7    Y1= y8 y9 y10 y11 y12 y13 y14 y15
+      U0= u0 u0 u1 u1 u2 u2 u3 u3    U1= u4 u4 u5 u5 u6 u6 u7 u7
+      V0= v0 v0 v1 v1 v2 v2 v3 v3    V1= v4 v4 v5 v5 v6 v6 v7 v7
+    */
+
+    U0 = vec_mergeh (U,U);
+    V0 = vec_mergeh (V,V);
+
+    U1 = vec_mergel (U,U);
+    V1 = vec_mergel (V,V);
+
+    cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
+    cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
+
+    R  = vec_packclp (R0,R1);
+    G  = vec_packclp (G0,G1);
+    B  = vec_packclp (B0,B1);
+
+    switch(c->dstFormat) {
+      case IMGFMT_ABGR: out_abgr (R,G,B,out); break;
+      case IMGFMT_BGRA: out_bgra (R,G,B,out); break;
+      case IMGFMT_RGBA: out_rgba (R,G,B,out); break;
+      case IMGFMT_ARGB: out_argb (R,G,B,out); break;
+      case IMGFMT_RGB24: out_rgb24 (R,G,B,out); break;
+      case IMGFMT_BGR24: out_bgr24 (R,G,B,out); break;
+      default:
+        {
+          /* If this is reached, the caller should have called yuv2packedXinC
+             instead. */
+          static int printed_error_message;
+          if(!printed_error_message) {
+            MSG_ERR("altivec_yuv2packedX doesn't support %s output\n",
+                    vo_format_name(c->dstFormat));
+            printed_error_message=1;
+          }
+          return;
+        }
+    }
+  }
+
+  if (i < dstW) {
+    i -= 16;
+
+    Y0 = RND;
+    Y1 = RND;
+    /* extract 16 coeffs from lumSrc */
+    for(j=0; j<lumFilterSize; j++) {
+      X0 = vec_ld (0,  &lumSrc[j][i]);
+      X1 = vec_ld (16, &lumSrc[j][i]);
+      Y0 = vec_mradds (X0, YCoeffs[j], Y0);
+      Y1 = vec_mradds (X1, YCoeffs[j], Y1);
+    }
+
+    U = RND;
+    V = RND;
+    /* extract 8 coeffs from U,V */
+    for(j=0; j<chrFilterSize; j++) {
+      X  = vec_ld (0, &chrSrc[j][i/2]);
+      U  = vec_mradds (X, CCoeffs[j], U);
+      X  = vec_ld (0, &chrSrc[j][i/2+2048]);
+      V  = vec_mradds (X, CCoeffs[j], V);
+    }
+
+    /* scale and clip signals */
+    Y0 = vec_sra (Y0, SCL);
+    Y1 = vec_sra (Y1, SCL);
+    U  = vec_sra (U,  SCL);
+    V  = vec_sra (V,  SCL);
+
+    Y0 = vec_clip_s16 (Y0);
+    Y1 = vec_clip_s16 (Y1);
+    U  = vec_clip_s16 (U);
+    V  = vec_clip_s16 (V);
+
+    /* now we have
+       Y0= y0 y1 y2 y3 y4 y5 y6 y7     Y1= y8 y9 y10 y11 y12 y13 y14 y15
+       U= u0 u1 u2 u3 u4 u5 u6 u7      V= v0 v1 v2 v3 v4 v5 v6 v7
+
+       Y0= y0 y1 y2 y3 y4 y5 y6 y7    Y1= y8 y9 y10 y11 y12 y13 y14 y15
+       U0= u0 u0 u1 u1 u2 u2 u3 u3    U1= u4 u4 u5 u5 u6 u6 u7 u7
+       V0= v0 v0 v1 v1 v2 v2 v3 v3    V1= v4 v4 v5 v5 v6 v6 v7 v7
+    */
+
+    U0 = vec_mergeh (U,U);
+    V0 = vec_mergeh (V,V);
+
+    U1 = vec_mergel (U,U);
+    V1 = vec_mergel (V,V);
+
+    cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
+    cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
+
+    R  = vec_packclp (R0,R1);
+    G  = vec_packclp (G0,G1);
+    B  = vec_packclp (B0,B1);
+
+    nout = (vector unsigned char *)scratch;
+    switch(c->dstFormat) {
+      case IMGFMT_ABGR: out_abgr (R,G,B,nout); break;
+      case IMGFMT_BGRA: out_bgra (R,G,B,nout); break;
+      case IMGFMT_RGBA: out_rgba (R,G,B,nout); break;
+      case IMGFMT_ARGB: out_argb (R,G,B,nout); break;
+      case IMGFMT_RGB24: out_rgb24 (R,G,B,nout); break;
+      case IMGFMT_BGR24: out_bgr24 (R,G,B,nout); break;
+      default:
+        /* Unreachable, I think. */
+        MSG_ERR("altivec_yuv2packedX doesn't support %s output\n",
+                vo_format_name(c->dstFormat));
+        return;
+    }
+
+    memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
+  }
+
+}



More information about the Sumover-dev mailing list