From 96dbb4fc58fe2dcf4390e073dbb42cc77ef2f0b5 Mon Sep 17 00:00:00 2001
From: Martyn Capewell <martyn.capewell@arm.com>
Date: Mon, 7 Dec 2009 13:59:59 +0000
Subject: [PATCH] Adds UXTB16 support to Pixelflinger

 * Add support for UXTB16 to the disassembler
 * Add encoding of the UXTB16 instruction to the Pixelflinger JIT.

Introducing the UXTB16 instruction allows removal of some masking code, and is
beneficial from a pipeline point of view - lots of UXTB16 followed by MUL
sequences.

Also, further rescheduling and use of SMULWB brings extra performance
improvements.

 * Use UXTB16 in bilinear filtered texturing

Uses UXTB16 to extract channels for SIMD operations, rather than creating and
ANDing with masks. Saves a register and is faster on A8, as UXTB16 result can
feed into first stage of multiply, unlike AND.

Also, used SMULWB rather than SMULBB, which allows removal of MOVs used to
rescale results.

Code has been scheduled for A8 pipeline, specifically aiming to allow
multiplies to issue in pipeline 0, for efficient dual issue operation.

Testing on SpriteMethodTest (http://code.google.com/p/apps-for-android/) gives
8% improvement (12.7 vs. 13.7 fps.)

SMULBB to SMULWB trick could be used in <v6 code path, but this hasn't been
implemented.
---
 libpixelflinger/codeflinger/ARMAssembler.cpp  |  10 ++
 libpixelflinger/codeflinger/ARMAssembler.h    |   1 +
 .../codeflinger/ARMAssemblerInterface.h       |   3 +
 .../codeflinger/ARMAssemblerProxy.cpp         |   3 +
 .../codeflinger/ARMAssemblerProxy.h           |   2 +
 libpixelflinger/codeflinger/disassem.c        |   6 +
 libpixelflinger/codeflinger/texturing.cpp     | 104 +++++++++++++++++-
 7 files changed, 128 insertions(+), 1 deletion(-)

diff --git a/libpixelflinger/codeflinger/ARMAssembler.cpp b/libpixelflinger/codeflinger/ARMAssembler.cpp
index ff7b0b3e0d..d3720c3ee1 100644
--- a/libpixelflinger/codeflinger/ARMAssembler.cpp
+++ b/libpixelflinger/codeflinger/ARMAssembler.cpp
@@ -424,5 +424,15 @@ void ARMAssembler::SMLAW(int cc, int y,
     *mPC++ = (cc<<28) | 0x1200080 | (Rd<<16) | (Rn<<12) | (Rs<<8) | (y<<4) | Rm;
 }
 
+#if 0
+#pragma mark -
+#pragma mark Byte/half word extract and extend (ARMv6+ only)...
+#endif
+
+void ARMAssembler::UXTB16(int cc, int Rd, int Rm, int rotate)
+{
+    *mPC++ = (cc<<28) | 0x6CF0070 | (Rd<<12) | ((rotate >> 3) << 10) | Rm;
+}
+
 }; // namespace android
 
diff --git a/libpixelflinger/codeflinger/ARMAssembler.h b/libpixelflinger/codeflinger/ARMAssembler.h
index ef3b66af7b..a667cb5112 100644
--- a/libpixelflinger/codeflinger/ARMAssembler.h
+++ b/libpixelflinger/codeflinger/ARMAssembler.h
@@ -123,6 +123,7 @@ public:
                 int RdHi, int RdLo, int Rs, int Rm);
     virtual void SMLAW(int cc, int y,
                 int Rd, int Rm, int Rs, int Rn);
+    virtual void UXTB16(int cc, int Rd, int Rm, int rotate);
 
 private:
                 ARMAssembler(const ARMAssembler& rhs);
diff --git a/libpixelflinger/codeflinger/ARMAssemblerInterface.h b/libpixelflinger/codeflinger/ARMAssemblerInterface.h
index 465b3bd9d9..ff6af2a22b 100644
--- a/libpixelflinger/codeflinger/ARMAssemblerInterface.h
+++ b/libpixelflinger/codeflinger/ARMAssemblerInterface.h
@@ -203,6 +203,9 @@ public:
     virtual void SMLAW(int cc, int y,
                 int Rd, int Rm, int Rs, int Rn) = 0;
 
+    // byte/half word extract...
+    virtual void UXTB16(int cc, int Rd, int Rm, int rotate) = 0;
+
     // -----------------------------------------------------------------------
     // convenience...
     // -----------------------------------------------------------------------
diff --git a/libpixelflinger/codeflinger/ARMAssemblerProxy.cpp b/libpixelflinger/codeflinger/ARMAssemblerProxy.cpp
index 18c4618649..7c422dbadb 100644
--- a/libpixelflinger/codeflinger/ARMAssemblerProxy.cpp
+++ b/libpixelflinger/codeflinger/ARMAssemblerProxy.cpp
@@ -195,6 +195,9 @@ void ARMAssemblerProxy::SMLAW(int cc, int y, int Rd, int Rm, int Rs, int Rn) {
     mTarget->SMLAW(cc, y, Rd, Rm, Rs, Rn);
 }
 
+void ARMAssemblerProxy::UXTB16(int cc, int Rd, int Rm, int rotate) {
+    mTarget->UXTB16(cc, Rd, Rm, rotate);
+}
 
 }; // namespace android
 
diff --git a/libpixelflinger/codeflinger/ARMAssemblerProxy.h b/libpixelflinger/codeflinger/ARMAssemblerProxy.h
index 4bdca9cf5c..9134cce6f1 100644
--- a/libpixelflinger/codeflinger/ARMAssemblerProxy.h
+++ b/libpixelflinger/codeflinger/ARMAssemblerProxy.h
@@ -114,6 +114,8 @@ public:
     virtual void SMLAW(int cc, int y,
                 int Rd, int Rm, int Rs, int Rn);
 
+    virtual void UXTB16(int cc, int Rd, int Rm, int rotate);
+
 private:
     ARMAssemblerInterface*  mTarget;
 };
diff --git a/libpixelflinger/codeflinger/disassem.c b/libpixelflinger/codeflinger/disassem.c
index 4676da0d82..ee5e63a2b2 100644
--- a/libpixelflinger/codeflinger/disassem.c
+++ b/libpixelflinger/codeflinger/disassem.c
@@ -80,6 +80,7 @@
  * f - 1st fp operand (register) (bits 12-14)
  * g - 2nd fp operand (register) (bits 16-18)
  * h - 3rd fp operand (register/immediate) (bits 0-4)
+ * j - xtb rotate literal (bits 10-11)
  * b - branch address
  * t - thumb branch address (bits 24, 0-23)
  * k - breakpoint comment (bits 0-3, 8-19)
@@ -122,6 +123,7 @@ static const struct arm32_insn arm32_i[] = {
     { 0x0fe000f0, 0x00c00090, "smull",	"Sdnms" },
     { 0x0fe000f0, 0x00a00090, "umlal",	"Sdnms" },
     { 0x0fe000f0, 0x00e00090, "smlal",	"Sdnms" },
+    { 0x0fff03f0, 0x06cf0070, "uxtb16", "dmj" },
     { 0x0d700000, 0x04200000, "strt",	"daW" },
     { 0x0d700000, 0x04300000, "ldrt",	"daW" },
     { 0x0d700000, 0x04600000, "strbt",	"daW" },
@@ -406,6 +408,10 @@ disasm(const disasm_interface_t *di, u_int loc, int altfmt)
 			else
 				di->di_printf("f%d", insn & 7);
 			break;
+		/* j - xtb rotate literal (bits 10-11) */
+		case 'j':
+			di->di_printf("ror #%d", ((insn >> 10) & 3) << 3);
+			break;
 		/* b - branch address */
 		case 'b':
 			branch = ((insn << 2) & 0x03ffffff);
diff --git a/libpixelflinger/codeflinger/texturing.cpp b/libpixelflinger/codeflinger/texturing.cpp
index 90e658407b..ba13fb3035 100644
--- a/libpixelflinger/codeflinger/texturing.cpp
+++ b/libpixelflinger/codeflinger/texturing.cpp
@@ -25,6 +25,7 @@
 
 #include "codeflinger/GGLAssembler.h"
 
+#include <machine/cpu-features.h>
 
 namespace android {
 
@@ -567,7 +568,7 @@ void GGLAssembler::build_textures(  fragment_parts_t& parts,
                     RSB(GE, 0, height, height, imm(0));
                     MUL(AL, 0, height, stride, height);
                 } else {
-                    // u has not been CLAMPed yet
+                    // v has not been CLAMPed yet
                     CMP(AL, height, reg_imm(v, ASR, FRAC_BITS));
                     MOV(LE, 0, v, reg_imm(height, LSL, FRAC_BITS));
                     MOV(LE, 0, height, imm(0));
@@ -868,6 +869,106 @@ void GGLAssembler::filter24(
     load(txPtr, texel, 0);
 }
 
+#if __ARM_ARCH__ >= 6
+// ARMv6 version, using UXTB16, and scheduled for Cortex-A8 pipeline
+void GGLAssembler::filter32(
+        const fragment_parts_t& parts,
+        pixel_t& texel, const texture_unit_t& tmu,
+        int U, int V, pointer_t& txPtr,
+        int FRAC_BITS)
+{
+    const int adjust = FRAC_BITS*2 - 8;
+    const int round  = 0;
+    const int prescale = 16 - adjust;
+
+    Scratch scratches(registerFile());
+    
+    int pixel= scratches.obtain();
+    int dh   = scratches.obtain();
+    int u    = scratches.obtain();
+    int k    = scratches.obtain();
+
+    int temp = scratches.obtain();
+    int dl   = scratches.obtain();
+
+    int offsetrt = scratches.obtain();
+    int offsetlb = scratches.obtain();
+
+    int pixellb = offsetlb;
+
+    // RB -> U * V
+    CONTEXT_LOAD(offsetrt, generated_vars.rt);
+    CONTEXT_LOAD(offsetlb, generated_vars.lb);
+    if(!round) {
+        MOV(AL, 0, U, reg_imm(U, LSL, prescale));
+    }
+    ADD(AL, 0, u, offsetrt, offsetlb);
+
+    LDR(AL, pixel, txPtr.reg, reg_scale_pre(u));
+    if (round) {
+        SMULBB(AL, u, U, V);
+        RSB(AL, 0, U, U, imm(1<<FRAC_BITS));
+    } else {
+        SMULWB(AL, u, U, V);
+        RSB(AL, 0, U, U, imm(1<<(FRAC_BITS+prescale)));
+    }
+    UXTB16(AL, temp, pixel, 0);
+    if (round) {
+        ADD(AL, 0, u, u, imm(1<<(adjust-1)));
+        MOV(AL, 0, u, reg_imm(u, LSR, adjust));
+    }
+    LDR(AL, pixellb, txPtr.reg, reg_scale_pre(offsetlb));
+    MUL(AL, 0, dh, temp, u);
+    UXTB16(AL, temp, pixel, 8);
+    MUL(AL, 0, dl, temp, u);
+    RSB(AL, 0, k, u, imm(0x100));
+
+    // LB -> (1-U) * V
+    if (round) {
+        SMULBB(AL, u, U, V);
+    } else {
+        SMULWB(AL, u, U, V);
+    }
+    UXTB16(AL, temp, pixellb, 0);
+    if (round) {
+        ADD(AL, 0, u, u, imm(1<<(adjust-1)));
+        MOV(AL, 0, u, reg_imm(u, LSR, adjust));
+    }
+    MLA(AL, 0, dh, temp, u, dh);    
+    UXTB16(AL, temp, pixellb, 8);
+    MLA(AL, 0, dl, temp, u, dl);
+    SUB(AL, 0, k, k, u);
+
+    // LT -> (1-U)*(1-V)
+    RSB(AL, 0, V, V, imm(1<<FRAC_BITS));
+    LDR(AL, pixel, txPtr.reg);
+    if (round) {
+        SMULBB(AL, u, U, V);
+    } else {
+        SMULWB(AL, u, U, V);
+    }
+    UXTB16(AL, temp, pixel, 0);
+    if (round) {
+        ADD(AL, 0, u, u, imm(1<<(adjust-1)));
+        MOV(AL, 0, u, reg_imm(u, LSR, adjust));
+    }
+    MLA(AL, 0, dh, temp, u, dh);    
+    UXTB16(AL, temp, pixel, 8);
+    MLA(AL, 0, dl, temp, u, dl);
+
+    // RT -> U*(1-V)            
+    LDR(AL, pixel, txPtr.reg, reg_scale_pre(offsetrt));
+    SUB(AL, 0, u, k, u);
+    UXTB16(AL, temp, pixel, 0);
+    MLA(AL, 0, dh, temp, u, dh);    
+    UXTB16(AL, temp, pixel, 8);
+    MLA(AL, 0, dl, temp, u, dl);
+
+    UXTB16(AL, dh, dh, 8);
+    UXTB16(AL, dl, dl, 8);
+    ORR(AL, 0, texel.reg, dh, reg_imm(dl, LSL, 8));
+}
+#else
 void GGLAssembler::filter32(
         const fragment_parts_t& parts,
         pixel_t& texel, const texture_unit_t& tmu,
@@ -955,6 +1056,7 @@ void GGLAssembler::filter32(
     AND(AL, 0, dl, dl, reg_imm(mask, LSL, 8));
     ORR(AL, 0, texel.reg, dh, dl);
 }
+#endif
 
 void GGLAssembler::build_texture_environment(
         component_t& fragment,
-- 
GitLab