From 877e06ed890a9d17146079c09ef34beb2f4ebd7d Mon Sep 17 00:00:00 2001 From: David Given Date: Tue, 21 May 2013 18:16:30 +0100 Subject: [PATCH] Lots more opcodes including float support. Define float and double to be the same thing (as the VC4 seems not to have double-precision float support). --HG-- branch : dtrg-videocore --- mach/vc4/ncg/table | 439 ++++++++++++++++++++++++++------------------- plat/rpi/build.mk | 1 + plat/rpi/descr | 2 +- 3 files changed, 259 insertions(+), 183 deletions(-) diff --git a/mach/vc4/ncg/table b/mach/vc4/ncg/table index 2168afb9b..368c6177e 100644 --- a/mach/vc4/ncg/table +++ b/mach/vc4/ncg/table @@ -9,12 +9,9 @@ EM_WSIZE = 4 EM_PSIZE = 4 EM_BSIZE = 8 /* two words saved in call frame */ -INT8 = 1 /* Size of values */ -INT16 = 2 -INT32 = 4 -INT64 = 8 -FLOAT32 = 4 -FLOAT64 = 8 +BYTE = 1 /* Size of values */ +WORD = 2 +QUAD = 4 FP_OFFSET = 0 /* Offset of saved FP relative to our FP */ PC_OFFSET = 4 /* Offset of saved PC relative to our FP */ @@ -22,7 +19,7 @@ PC_OFFSET = 4 /* Offset of saved PC relative to our FP */ #define COMMENT(n) /* noop */ -#define nicesize(x) ((x)==INT8 || (x)==INT16 || (x)==INT32 || (x)==INT64) +#define nicesize(x) ((x)==BYTE || (x)==WORD || (x)==QUAD) @@ -103,6 +100,7 @@ INSTRUCTIONS add GPRI:wo, GPRI:ro, GPRI+CONST:ro. add GPRI:rw, GPRI+CONST:ro. and GPRI:rw, GPRI+CONST:ro. + asr GPRI:rw, GPRI+CONST:ro. beq "b.eq" LABEL:ro. bne "b.ne" LABEL:ro. bgt "b.gt" LABEL:ro. @@ -116,6 +114,11 @@ INSTRUCTIONS eor GPRI:rw, GPRI+CONST:ro. exts GPRI:wo, GPRI:ro, GPRI+CONST:ro. exts GPRI:rw, GPRI+CONST:ro. + fadd GPRI:wo, GPRI:ro, GPRI:ro. + fcmp GPRI:wo, GPRI:ro, GPRI:ro. + fdiv GPRI:wo, GPRI:ro, GPRI:ro. + fmul GPRI:wo, GPRI:ro, GPRI:ro. + fsub GPRI:wo, GPRI:ro, GPRI:ro. ld GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. ldb GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. ldh GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. @@ -123,6 +126,7 @@ INSTRUCTIONS lea GPRI:wo, LABEL:ro. lsl GPRI:rw, GPRI+CONST:ro. lsl GPRI:wo, GPRI:ro, GPRI+CONST:ro. + lsr GPRI:rw, GPRI+CONST:ro. mov GPRI:wo, GPRI+CONST:ro. mul GPRI:rw, GPRI+CONST:ro. neg GPRI:rw, GPRI+CONST:ro. @@ -189,9 +193,9 @@ STACKINGRULES from GPR to STACK uses STACKABLE - gen - move %1, %a - push %a + gen + move %1, %a + push %a from GPR to STACK gen @@ -200,9 +204,9 @@ STACKINGRULES from GPRE to STACK uses STACKABLE - gen - move %1, %a - push %a + gen + move %1, %a + push %a from GPRE to STACK gen @@ -222,6 +226,7 @@ STACKINGRULES sub SP, SP, {CONST, 4} st SCRATCH, {GPROFFSET, SP, 0} + COERCIONS @@ -259,22 +264,22 @@ PATTERNS pat loc /* Load constant */ yields {CONST, $1} - pat dup $1<=INT32 /* Duplicate word on top of stack */ - with GPR + pat dup $1<=QUAD /* Duplicate word on top of stack */ + with GPRI yields %1 %1 - pat dup $1==INT64 /* Duplicate double-word on top of stack */ - with GPR GPR - yields %2 %1 %2 %1 - - pat exg $1==INT32 /* Exchange top two words on stack */ - with GPR GPR + pat dup $1<=(2*QUAD) /* Duplicate word pair on top of stack */ + with GPRI GPRI + yields %1 %2 %1 %2 + + pat exg $1==QUAD /* Exchange top two words on stack */ + with GPRI GPRI yields %1 %2 #if 0 pat stl lol $1==$2 /* Store then load local */ leaving - dup INT32 + dup QUAD stl $1 #endif @@ -298,7 +303,7 @@ PATTERNS loc $2 cii - pat loc loc cii loc loc cii $2==INT32 && $5==INT32 && $4<$2 /* madness, generated by the C compiler */ + pat loc loc cii loc loc cii $2==QUAD && $5==QUAD && $4<$2 /* madness, generated by the C compiler */ leaving loc $4 loc $5 @@ -319,20 +324,20 @@ PATTERNS pat loc loc cui $1==$2 /* unsigned X -> signed X */ /* nop */ - pat loc loc cui $1==INT8 && $2==INT32 /* unsigned char -> signed int */ + pat loc loc cui $1==BYTE && $2==QUAD /* unsigned char -> signed int */ /* nop */ - pat loc loc cui $1==INT16 && $2==INT32 /* unsigned short -> signed int */ + pat loc loc cui $1==WORD && $2==QUAD /* unsigned short -> signed int */ /* nop */ - pat loc loc cii $1==INT8 && $2>INT8 /* signed char -> anything */ + pat loc loc cii $1==BYTE && $2>BYTE /* signed char -> anything */ with REG uses reusing %1, REG=%1 gen exts %a, {CONST, 8} yields %a - pat loc loc cii $1==INT16 && $2>INT16 /* signed short -> anything */ + pat loc loc cii $1==WORD && $2>WORD /* signed short -> anything */ with REG uses reusing %1, REG=%1 gen @@ -362,7 +367,7 @@ PATTERNS pat ldl /* Load double-word from local */ leaving lal $1 - loi INT32*2 + loi QUAD*2 pat stl inreg($1)>0 /* Store to local */ with CONST+GPRI @@ -378,7 +383,7 @@ PATTERNS pat sdl /* Store double-word to local */ leaving lal $1 - sti INT32*2 + sti QUAD*2 pat lil inreg($1)>0 /* Load from indirected local */ uses REG @@ -389,12 +394,12 @@ PATTERNS pat lil /* Load from indirected local */ leaving lol $1 - loi INT32 + loi QUAD pat sil /* Save to indirected local */ leaving lol $1 - sti INT32 + sti QUAD pat stl lol $1==$2 /* Save then load (generated by C compiler) */ leaving @@ -433,22 +438,12 @@ PATTERNS pat loe /* Load word external */ leaving lae $1 - loi INT32 + loi QUAD pat ste /* Store word external */ leaving lae $1 - sti INT32 - - pat lde /* Load double-word external */ - leaving - lae $1 - loi INT64 - - pat sde /* Store double-word external */ - leaving - lae $1 - sti INT64 + sti QUAD pat zre /* Zero external */ leaving @@ -478,28 +473,19 @@ PATTERNS pat lof /* Load word offsetted */ leaving adp $1 - loi INT32 + loi QUAD - pat ldf /* Load double-word offsetted */ - leaving - adp $1 - loi INT64 pat stf /* Store word offsetted */ leaving adp $1 - sti INT32 + sti QUAD - pat sdf /* Store double-word offsetted */ - leaving - adp $1 - sti INT64 - /* Loads and stores */ - pat loi $1==INT8 /* Load byte indirect */ + pat loi $1==BYTE /* Load byte indirect */ with GPR uses reusing %1, REG gen @@ -512,7 +498,7 @@ PATTERNS yields %a #if 0 - pat loi loc loc cii $1==INT16 && $2==INT16 && $3==INT32 /* Load half-word indirect and sign extend */ + pat loi loc loc cii $1==WORD && $2==WORD && $3==QUAD /* Load half-word indirect and sign extend */ with GPR uses REG gen @@ -529,7 +515,7 @@ PATTERNS move {IND_RC_H_S, %1.reg, %1.off}, %a yields %a - pat loi $1==INT16 /* Load half-word indirect */ + pat loi $1==WORD /* Load half-word indirect */ with GPR uses REG gen @@ -547,7 +533,7 @@ PATTERNS yields %a #endif - pat loi $1==INT32 /* Load quad indirect */ + pat loi $1==QUAD /* Load quad indirect */ with GPR uses reusing %1, REG gen @@ -555,19 +541,10 @@ PATTERNS ld %a, {GPROFFSET, %a, 0} yields %a - pat loi $1==INT64 /* Load double-quad indirect */ - with GPRI - uses reusing %1, REG, REG - gen - add %a, %1, GP - ld %b, {GPROFFSET, %a, 4} - ld %a, {GPROFFSET, %a, 0} - yields %a %b - pat loi /* Load arbitrary size */ leaving loc $1 - los INT32 + los QUAD pat los /* Load arbitrary size */ with STACK @@ -575,7 +552,7 @@ PATTERNS gen bl {LABEL, ".los"} - pat sti $1==INT8 /* Store byte indirect */ + pat sti $1==BYTE /* Store byte indirect */ with GPR GPRI gen stb %2, {GPRGPR, %1, GP} @@ -583,32 +560,24 @@ PATTERNS gen stb %2, {GPRGPR, %1.reg, GP} - pat sti $1==INT16 /* Store half-word indirect */ + pat sti $1==WORD /* Store half-word indirect */ with GPR GPR uses REG gen add %a, %1, GP sth %2, {GPROFFSET, %a, 0} - pat sti $1==INT32 /* Store quad indirect */ + pat sti $1==QUAD /* Store quad indirect */ with GPR GPR uses REG gen add %a, %1, GP st %2, {GPROFFSET, %a, 0} - pat sti $1==INT64 /* Store double-quad indirect */ - with GPR GPR - uses REG - gen - add %a, %1, GP - st %1, {GPROFFSET, %a, 0} - st %2, {GPROFFSET, %a, 4} - pat sti /* Store arbitrary size */ leaving loc $1 - sts INT32 + sts QUAD pat sts /* Load arbitrary size */ with STACK @@ -671,7 +640,7 @@ PATTERNS /* Word arithmetic */ - pat adi $1==INT32 /* Add word (second + top) */ + pat adi $1==QUAD /* Add word (second + top) */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen @@ -683,21 +652,21 @@ PATTERNS add %a, %2 yields %a - pat sbi $1==INT32 /* Subtract word (second - top) */ + pat sbi $1==QUAD /* Subtract word (second - top) */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen sub %a, %1 yields %a - pat ngi $1==INT32 /* Negate word */ + pat ngi $1==QUAD /* Negate word */ with GPRI uses reusing %1, REG=%1 gen neg %a, %a yields %a - pat and $1==INT32 /* AND word */ + pat and $1==QUAD /* AND word */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen @@ -709,7 +678,7 @@ PATTERNS and %a, %2 yields %a - pat ior $1==INT32 /* OR word */ + pat ior $1==QUAD /* OR word */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen @@ -721,7 +690,7 @@ PATTERNS or %a, %2 yields %a - pat xor $1==INT32 /* XOR word */ + pat xor $1==QUAD /* XOR word */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen @@ -733,21 +702,21 @@ PATTERNS eor %a, %2 yields %a - pat dvi $1==INT32 /* Divide word (second / top) */ + pat dvi $1==QUAD /* Divide word (second / top) */ with GPRI GPRI uses reusing %2, REG gen divs %a, %2, %1 yields %a - pat dvu $1==INT32 /* Divide unsigned word (second / top) */ + pat dvu $1==QUAD /* Divide unsigned word (second / top) */ with GPRI GPRI uses reusing %2, REG gen divu %a, %2, %1 yields %a - pat rmu $1==INT32 /* Remainder unsigned word (second % top) */ + pat rmu $1==QUAD /* Remainder unsigned word (second % top) */ with GPRI GPRI uses REG gen @@ -796,7 +765,7 @@ PATTERNS gen bl {LABEL, ".xor"} - pat com $1==INT32 /* NOT word */ + pat com $1==QUAD /* NOT word */ with AND_RR uses REG gen @@ -828,60 +797,50 @@ PATTERNS lsl %a, %1 yields %a -#if 0 pat sri $1==4 /* Shift right signed (second >> top) */ - with CONST GPR - uses reusing %2, REG - gen - srawi %a, %2, {CONST, %1.val & 0x1F} - yields %a - with GPR GPR - uses reusing %2, REG + with CONST+GPRI GPRI + uses reusing %2, REG=%2 gen - sraw %a, %2, %1 + asr %2, %1 yields %a pat sru $1==4 /* Shift right unsigned (second >> top) */ - with CONST GPR - uses reusing %2, REG - gen - rlwinm %a, %2, {CONST, 32-(%1.val & 0x1F)}, {CONST, (%1.val & 0x1F)}, {CONST, 31} - yields %a - with GPR GPR - uses reusing %2, REG + with CONST+GPRI GPRI + uses reusing %2, REG=%2 gen - srw %a, %2, %1 + lsr %2, %1 yields %a +#if 0 /* Arrays */ - pat aar $1==INT32 /* Index array */ + pat aar $1==QUAD /* Index array */ with GPR3 GPR4 GPR5 gen bl {LABEL, ".aar4"} yields R3 - pat lae lar $2==INT32 && nicesize(rom($1, 3)) /* Load array */ + pat lae lar $2==QUAD && nicesize(rom($1, 3)) /* Load array */ leaving lae $1 - aar INT32 + aar QUAD loi rom($1, 3) - pat lar $1==INT32 /* Load array */ + pat lar $1==QUAD /* Load array */ with GPR3 GPR4 GPR5 STACK kills ALL gen bl {LABEL, ".lar4"} - pat lae sar $2==INT32 && nicesize(rom($1, 3)) /* Store array */ + pat lae sar $2==QUAD && nicesize(rom($1, 3)) /* Store array */ leaving lae $1 - aar INT32 + aar QUAD sti rom($1, 3) - pat sar $1==INT32 /* Store array */ + pat sar $1==QUAD /* Store array */ with GPR3 GPR4 GPR5 STACK kills ALL gen @@ -895,8 +854,8 @@ PATTERNS pat set defined($1) /* Create word with set bit */ leaving loc 1 - exg INT32 - sli INT32 + exg QUAD + sli QUAD pat set !defined($1) /* Create structure with set bit (variable) */ with GPR3 GPR4 STACK @@ -905,8 +864,8 @@ PATTERNS pat inn defined($1) /* Test for set bit */ leaving - set INT32 - and INT32 + set QUAD + and QUAD pat inn !defined($1) /* Test for set bit (variable) */ with GPR3 STACK @@ -918,21 +877,59 @@ PATTERNS /* Boolean resolutions */ - proc anyt example teq + proc cm_t example teq + with GPRI GPRI + uses reusing %1, REG + gen + cmp %1, %2 + mov %a, {CONST, 0} + add[1] %a, {CONST, 1} + yields %a + + pat cmu teq call cm_t("add.eq") /* top = (second == top) */ + pat cmu tne call cm_t("add.ne") /* top = (second != top) */ + pat cmu tlt call cm_t("add.lo") /* top = unsigned (second < top) */ + pat cmu tle call cm_t("add.ls") /* top = unsigned (second <= top) */ + pat cmu tgt call cm_t("add.hi") /* top = unsigned (second < top) */ + pat cmu tge call cm_t("add.hs") /* top = unsigned (second >= top) */ + pat cmi teq call cm_t("add.eq") /* top = (second == top) */ + pat cmi tne call cm_t("add.ne") /* top = (second != top) */ + pat cmi tlt call cm_t("add.lt") /* top = signed (second < top) */ + pat cmi tle call cm_t("add.le") /* top = signed (second <= top) */ + pat cmi tgt call cm_t("add.gt") /* top = signed (second < top) */ + pat cmi tge call cm_t("add.ge") /* top = signed (second >= top) */ + + proc cmf_t example teq + with GPRI GPRI + uses reusing %1, REG + gen + fcmp %a, %1, %2 + mov %a, {CONST, 0} + add[1] %a, {CONST, 1} + yields %a + + pat cmf teq call cmf_t("add.eq") /* top = float (second == top) */ + pat cmf tne call cmf_t("add.ne") /* top = float (second != top) */ + pat cmf tlt call cmf_t("add.lo") /* top = float (second < top) */ + pat cmf tle call cmf_t("add.ls") /* top = float (second <= top) */ + pat cmf tgt call cmf_t("add.hi") /* top = float (second < top) */ + pat cmf tge call cmf_t("add.hs") /* top = float (second >= top) */ + + proc fallback_t example teq with GPRI - uses reusing %1, REG=%1 + uses reusing %1, REG gen cmp %1, {CONST, 0} - mov %a, {CONST, 0} - add[1] %a, {CONST, 1} + mov %a, {CONST, 0} + add[1] %a, {CONST, 1} yields %a - pat cmu teq call anyt("add.eq") /* top = (top == 0) */ - pat cmu tne call anyt("add.ne") /* top = (top != 0) */ - pat cmu tlt call anyt("add.lo") /* top = unsigned (top < 0) */ - pat cmu tle call anyt("add.ls") /* top = unsigned (top <= 0) */ - pat cmu tgt call anyt("add.hi") /* top = unsigned (top > 0) */ - pat cmu tge call anyt("add.hs") /* top = unsigned (top >= 0) */ + pat teq call fallback_t("add.eq") /* top = float (top == 0) */ + pat tne call fallback_t("add.ne") /* top = float (top != 0) */ + pat tlt call fallback_t("add.lo") /* top = float (top < 0) */ + pat tle call fallback_t("add.ls") /* top = float (top <= 0) */ + pat tgt call fallback_t("add.hi") /* top = float (top < 0) */ + pat tge call fallback_t("add.hs") /* top = float (top >= 0) */ @@ -964,16 +961,29 @@ PATTERNS pat blt call anyz("b.lt") /* Branch if signed second < top */ pat ble call anyz("b.le") /* Branch if signed second <= top */ - proc anycmpb example cmu zeq + proc cmu_z example cmu zeq with GPR+CONST GPRI STACK gen cmp %2, %1 beq[1] {LABEL, $2} - pat cmu zgt call anycmpb("b.hi") /* Branch if unsigned second > top */ - pat cmu zlt call anycmpb("b.lo") /* Branch if unsigned second < top */ - pat cmu zge call anycmpb("b.hs") /* Branch if unsigned second >= top */ - pat cmu zle call anycmpb("b.ls") /* Branch if unsigned second <= top */ + pat cmu zgt call cmu_z("b.hi") /* Branch if unsigned second > top */ + pat cmu zlt call cmu_z("b.lo") /* Branch if unsigned second < top */ + pat cmu zge call cmu_z("b.hs") /* Branch if unsigned second >= top */ + pat cmu zle call cmu_z("b.ls") /* Branch if unsigned second <= top */ + + proc cmf_z example cmu zeq + with GPRI GPRI STACK + gen + fcmp %2, %2, %1 + beq[1] {LABEL, $2} + + pat cmf zeq call cmf_z("b.eq") /* Branch if float second == top */ + pat cmf zne call cmf_z("b.ne") /* Branch if float second != top */ + pat cmf zgt call cmf_z("b.gt") /* Branch if float second > top */ + pat cmf zlt call cmf_z("b.lt") /* Branch if float second < top */ + pat cmf zge call cmf_z("b.ge") /* Branch if float second >= top */ + pat cmf zle call cmf_z("b.le") /* Branch if float second <= top */ #if 0 @@ -993,54 +1003,58 @@ PATTERNS pat cmp /* Compare pointers */ leaving - cmu INT32 + cmu QUAD - pat cms $1==INT32 /* Compare blocks (word sized) */ + pat cms $1==QUAD /* Compare blocks (word sized) */ leaving - cmi INT32 + cmi QUAD - proc anycmf64 example teq - with STACK - uses REG - gen - bl {LABEL, ".cmf8"} - mov %a, {CONST, 0} - add[1] %a, {CONST, 1} - yields %a - pat cmf tlt $1==FLOAT64 call anyt("add.lo") /* top = unsigned (top < 0) */ - pat cmf tle $1==FLOAT64 call anyt("add.ls") /* top = unsigned (top <= 0) */ - pat cmf tgt $1==FLOAT64 call anyt("add.hi") /* top = unsigned (top > 0) */ - pat cmf tge $1==FLOAT64 call anyt("add.hs") /* top = unsigned (top >= 0) */ - -#if 0 /* Other branching and labelling */ - pat lab topeltsize($1)==4 && !fallthrough($1) +#if 0 + pat lab topeltsize($1)<=4 && !fallthrough($1) gen labeldef $1 - yields R3 + yields R0 - pat lab topeltsize($1)==4 && fallthrough($1) - with GPR3 - gen - labeldef $1 - yields %1 + pat lab topeltsize($1)<=4 && fallthrough($1) + with GPR0 + gen + labeldef $1 + yields %1 - pat lab topeltsize($1)!=4 + pat lab topeltsize($1)>4 with STACK - kills ALL - gen - labeldef $1 + kills ALL + gen + labeldef $1 + + pat bra topeltsize($1)<=4 /* Unconditional jump with TOS register */ + with GPR0 STACK + gen + b {LABEL, $1} + + pat bra topeltsize($1)>4 /* Unconditional jump without TOS register */ + with STACK + gen + b {LABEL, $1} #endif - pat bra /* Unconditional jump */ + pat lab with STACK - gen - b {LABEL, $1} + kills ALL + gen + labeldef $1 + + pat bra + with STACK + gen + b {LABEL, $1} + @@ -1058,31 +1072,27 @@ PATTERNS gen bl %1 - pat lfr $1==INT32 /* Load function result, word */ + pat lfr $1==QUAD /* Load function result, word */ yields R0 - pat lfr $1==INT64 /* Load function result, double-word */ - yields R0 R1 - pat ret $1==0 /* Return from procedure */ gen return - mov SP, FP - pop FP, PC + mov SP, FP + pop FP, PC - pat ret $1==INT32 /* Return from procedure, word */ + pat ret $1<=QUAD /* Return from procedure, word */ with GPR0 - gen - return - mov SP, FP - pop FP, PC - - pat ret $1==INT64 /* Return from procedure, double-word */ - with GPR0 GPR1 - gen - return - mov SP, FP - pop FP, PC + gen + return + mov SP, FP + pop FP, PC + with STACK + gen + pop R0 + return + mov SP, FP + pop FP, PC pat blm /* Block move constant length */ with GPRI GPRI STACK @@ -1253,3 +1263,68 @@ PATTERNS loc $1 ass + + +/* Floating point */ + + pat ngf /* Negate float */ + leaving + loc 0 + exg QUAD + sbf QUAD + + proc simple_f example adf + with GPRI GPRI + uses reusing %1, REG + gen + fadd[1] %a, %2, %1 + yields %a + + pat adf call simple_f("fadd") /* Float subtract (second + top) */ + pat sbf call simple_f("fsub") /* Float subtract (second - top) */ + pat mlf call simple_f("fmul") /* Float multiply (second * top) */ + pat dvf call simple_f("fdiv") /* Float divide (second / top) */ + + pat loc loc cff $1==$2 && $1==QUAD /* Convert float to float */ + leaving + nop + + pat loc loc cfi $1==$2 && $1==QUAD /* Convert float -> integer */ + with GPR0 + gen + bl {LABEL, ".cfi"} + yields R0 + + pat loc loc cfu $1==$2 && $1==QUAD /* Convert float -> unsigned */ + with GPR0 + gen + bl {LABEL, ".cfu"} + yields R0 + + pat loc loc cif $1==$2 && $1==QUAD /* Convert integer -> float */ + with GPR0 + gen + bl {LABEL, ".cif"} + yields R0 + + pat loc loc cuf $1==$2 && $1==QUAD /* Convert unsigned -> float */ + with GPR0 + gen + bl {LABEL, ".cuf"} + yields R0 + + pat fef /* Split float */ + with GPR0 + kills GPR1 + gen + bl {LABEL, ".fef"} + yields R0 R1 + + pat fif /* Multiply float and split (?) */ + with GPRI GPRI + kills GPR0, GPR1 + gen + fmul R0, %2, %1 + bl {LABEL, ".fef"} + yields R0 R1 + diff --git a/plat/rpi/build.mk b/plat/rpi/build.mk index 679be61ea..9716eb058 100644 --- a/plat/rpi/build.mk +++ b/plat/rpi/build.mk @@ -12,6 +12,7 @@ OPTIMISATION := -O D := plat/rpi/ platform-headers := \ + unistd.h \ ack/config.h platform-libsys := \ diff --git a/plat/rpi/descr b/plat/rpi/descr index fd9cc4dca..23862840d 100644 --- a/plat/rpi/descr +++ b/plat/rpi/descr @@ -7,7 +7,7 @@ var p=4 var s=2 var l=4 var f=4 -var d=8 +var d=4 var ARCH=vc4 var PLATFORM=rpi var PLATFORMDIR={EM}/share/ack/{PLATFORM} -- 2.34.1