diff --git a/README.md b/README.md
index a39c801..b0a3c73 100644
--- a/README.md
+++ b/README.md
@@ -27,10 +27,6 @@ The loader will fallback to the default PowerPC processor if the Gekko/Broadway
## Usage
- Choose the `Gekko/Broadway/Espresso` language if asked
-# Eclipse
-
-To be able open this module in eclipse, you need to create a new Ghidra Module and copy the `.classpath`, `.project` and `.settings` to the root of this repository.
-
# Credits
- Based on https://github.com/Relys/rpl2elf
diff --git a/build.gradle b/build.gradle
index e20e01e..93ce401 100644
--- a/build.gradle
+++ b/build.gradle
@@ -99,7 +99,12 @@ buildExtension {
exclude 'certification.manifest'
exclude 'dist/**'
exclude 'bin/**'
+ exclude 'lib/**'
+ exclude 'gradle/**'
exclude 'src/**'
+ exclude '.git/**'
+ exclude '.idea/**'
+ exclude '.github/**'
exclude '.gradle/**'
exclude 'gradle/**'
exclude 'gradlew'
@@ -119,6 +124,8 @@ buildExtension {
}
}
+tasks.withType(Copy).all { duplicatesStrategy 'exclude' }
+
jar {
duplicatesStrategy(DuplicatesStrategy.EXCLUDE)
}
diff --git a/data/LICENSE b/data/LICENSE
new file mode 100644
index 0000000..989e2c5
--- /dev/null
+++ b/data/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/data/buildLanguage.xml b/data/buildLanguage.xml
new file mode 100644
index 0000000..1e2a627
--- /dev/null
+++ b/data/buildLanguage.xml
@@ -0,0 +1,50 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/data/languages/RPX.opinion b/data/languages/RPX.opinion
index 5ec29b7..aa33b5d 100644
--- a/data/languages/RPX.opinion
+++ b/data/languages/RPX.opinion
@@ -1,6 +1,6 @@
-
+
diff --git a/data/languages/lmwInstructions.sinc b/data/languages/lmwInstructions.sinc
new file mode 100644
index 0000000..d213a8e
--- /dev/null
+++ b/data/languages/lmwInstructions.sinc
@@ -0,0 +1,101 @@
+LDMR0: is lsmul=1 {}
+LDMR0: is epsilon { loadReg(r0); }
+
+LDMR1: is lsmul=2 {}
+LDMR1: is LDMR0 { build LDMR0; loadReg(r1); }
+
+LDMR2: is lsmul=3 {}
+LDMR2: is LDMR1 { build LDMR1; loadReg(r2); }
+
+LDMR3: is lsmul=4 {}
+LDMR3: is LDMR2 { build LDMR2; loadReg(r3); }
+
+LDMR4: is lsmul=5 {}
+LDMR4: is LDMR3 { build LDMR3; loadReg(r4); }
+
+LDMR5: is lsmul=6 {}
+LDMR5: is LDMR4 { build LDMR4; loadReg(r5); }
+
+LDMR6: is lsmul=7 {}
+LDMR6: is LDMR5 { build LDMR5; loadReg(r6); }
+
+LDMR7: is lsmul=8 {}
+LDMR7: is LDMR6 { build LDMR6; loadReg(r7); }
+
+LDMR8: is lsmul=9 {}
+LDMR8: is LDMR7 { build LDMR7; loadReg(r8); }
+
+LDMR9: is lsmul=10 {}
+LDMR9: is LDMR8 { build LDMR8; loadReg(r9); }
+
+LDMR10: is lsmul=11 {}
+LDMR10: is LDMR9 { build LDMR9; loadReg(r10); }
+
+LDMR11: is lsmul=12 {}
+LDMR11: is LDMR10 { build LDMR10; loadReg(r11); }
+
+LDMR12: is lsmul=13 {}
+LDMR12: is LDMR11 { build LDMR11; loadReg(r12); }
+
+LDMR13: is lsmul=14 {}
+LDMR13: is LDMR12 { build LDMR12; loadReg(r13); }
+
+LDMR14: is lsmul=15 {}
+LDMR14: is LDMR13 { build LDMR13; loadReg(r14); }
+
+LDMR15: is lsmul=16 {}
+LDMR15: is LDMR14 { build LDMR14; loadReg(r15); }
+
+LDMR16: is lsmul=17 {}
+LDMR16: is LDMR15 { build LDMR15; loadReg(r16); }
+
+LDMR17: is lsmul=18 {}
+LDMR17: is LDMR16 { build LDMR16; loadReg(r17); }
+
+LDMR18: is lsmul=19 {}
+LDMR18: is LDMR17 { build LDMR17; loadReg(r18); }
+
+LDMR19: is lsmul=20 {}
+LDMR19: is LDMR18 { build LDMR18; loadReg(r19); }
+
+LDMR20: is lsmul=21 {}
+LDMR20: is LDMR19 { build LDMR19; loadReg(r20); }
+
+LDMR21: is lsmul=22 {}
+LDMR21: is LDMR20 { build LDMR20; loadReg(r21); }
+
+LDMR22: is lsmul=23 {}
+LDMR22: is LDMR21 { build LDMR21; loadReg(r22); }
+
+LDMR23: is lsmul=24 {}
+LDMR23: is LDMR22 { build LDMR22; loadReg(r23); }
+
+LDMR24: is lsmul=25 {}
+LDMR24: is LDMR23 { build LDMR23; loadReg(r24); }
+
+LDMR25: is lsmul=26 {}
+LDMR25: is LDMR24 { build LDMR24; loadReg(r25); }
+
+LDMR26: is lsmul=27 {}
+LDMR26: is LDMR25 { build LDMR25; loadReg(r26); }
+
+LDMR27: is lsmul=28 {}
+LDMR27: is LDMR26 { build LDMR26; loadReg(r27); }
+
+LDMR28: is lsmul=29 {}
+LDMR28: is LDMR27 { build LDMR27; loadReg(r28); }
+
+LDMR29: is lsmul=30 {}
+LDMR29: is LDMR28 { build LDMR28; loadReg(r29); }
+
+LDMR30: is lsmul=31 {}
+LDMR30: is LDMR29 { build LDMR29; loadReg(r30); }
+
+LDMR31: is LDMR30 { build LDMR30; loadReg(r31); }
+
+:lmw D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=46 & D & BITS_21_25 & dPlusRaOrZeroAddress & LDMR31 [ lsmul = BITS_21_25; ]
+{
+ tea = dPlusRaOrZeroAddress;
+ build LDMR31;
+}
+
diff --git a/data/languages/lswInstructions.sinc b/data/languages/lswInstructions.sinc
new file mode 100644
index 0000000..4f3c081
--- /dev/null
+++ b/data/languages/lswInstructions.sinc
@@ -0,0 +1,185 @@
+#lswi r0,0,7 0x7c 00 3c aa
+#lswi r0,r2,7 0x7c 02 3c aa
+
+DYN_D1: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 1)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; }
+DYN_D2: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 2)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; }
+DYN_D3: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 3)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; }
+DYN_D4: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 4)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; }
+DYN_D5: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 5)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; }
+DYN_D6: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 6)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; }
+DYN_D7: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 7)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; }
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=0 & BH=0 & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 & DYN_D7
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+ loadRegister(DYN_D4,ea);
+ loadRegister(DYN_D5,ea);
+ loadRegister(DYN_D6,ea);
+ loadRegister(DYN_D7,ea);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=0 & BH & XOP_1_10=597 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ sa:1 = BH;
+ loadRegisterPartial(D,ea,sa);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=1 & BH=0 & XOP_1_10=597 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=1 & BH & XOP_1_10=597 & BIT_0=0
+ & DYN_D1
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ sa:1 = BH;
+ loadRegisterPartial(DYN_D1,ea,sa);
+}
+
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=2 & BH=0 & XOP_1_10=597 & BIT_0=0
+ & DYN_D1
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=2 & BH & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ sa:1 = BH;
+ loadRegisterPartial(DYN_D2,ea,sa);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=3 & BH=0 & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=3 & BH & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ sa:1 = BH;
+ loadRegisterPartial(DYN_D3,ea,sa);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=4 & BH=0 & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=4 & BH & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+ sa:1 = BH;
+ loadRegisterPartial(DYN_D4,ea,sa);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=5 & BH=0 & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+ loadRegister(DYN_D4,ea);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=5 & BH & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+ loadRegister(DYN_D4,ea);
+ sa:1 = BH;
+ loadRegisterPartial(DYN_D5,ea,sa);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=6 & BH=0 & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+ loadRegister(DYN_D4,ea);
+ loadRegister(DYN_D5,ea);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=6 & BH & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+ loadRegister(DYN_D4,ea);
+ loadRegister(DYN_D5,ea);
+ sa:1 = BH;
+ loadRegisterPartial(DYN_D6,ea,sa);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=7 & BH=0 & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+ loadRegister(DYN_D4,ea);
+ loadRegister(DYN_D5,ea);
+ loadRegister(DYN_D6,ea);
+}
+
+:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=7 & BH & XOP_1_10=597 & BIT_0=0
+ & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 & DYN_D7
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO;
+ loadRegister(D,ea);
+ loadRegister(DYN_D1,ea);
+ loadRegister(DYN_D2,ea);
+ loadRegister(DYN_D3,ea);
+ loadRegister(DYN_D4,ea);
+ loadRegister(DYN_D5,ea);
+ loadRegister(DYN_D6,ea);
+ sa:1 = BH;
+ loadRegisterPartial(DYN_D7,ea,sa);
+}
diff --git a/data/languages/ppc_embedded.sinc b/data/languages/ppc_embedded.sinc
new file mode 100644
index 0000000..be43315
--- /dev/null
+++ b/data/languages/ppc_embedded.sinc
@@ -0,0 +1,219 @@
+# these are identified as part of the PowerPC Embedded Architecture
+
+#dcba 0,r0 0x7c 00 05 ec
+:dcba RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=758 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ dataCacheBlockAllocate(ea);
+}
+
+#dcbf 0,r0 0x7c 00 00 ac
+:dcbf RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=86 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ dataCacheBlockFlush(ea);
+}
+
+#dcbi 0,r0 0x7c 00 03 ac
+:dcbi RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=470 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ dataCacheBlockInvalidate(ea);
+}
+
+#dcbst 0,r0 0x7c 00 00 6c
+:dcbst RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=54 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ dataCacheBlockStore(ea);
+}
+
+#dcbt 0,r0 0x7c 00 02 2c
+:dcbt RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=278 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ dataCacheBlockTouch(ea);
+}
+
+#dcbtst 0,r0 0x7c 00 01 ec
+:dcbtst RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=246 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ dataCacheBlockTouchForStore(ea);
+}
+
+#dcbz 0,r0 0x7c 00 07 ec
+:dcbz RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=1014 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ dataCacheBlockClearToZero(ea);
+}
+
+@ifndef IS_ISA
+# this is equilent to "mbar 0"
+#eieio 0x7c 00 06 ac
+:eieio is $(NOTVLE) & OP=31 & BITS_21_25=0 & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=854 & BIT_0=0
+{
+ enforceInOrderExecutionIO();
+}
+@endif
+
+#icbi r0,r0 0x7c 00 07 ac
+:icbi RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=982 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ instructionCacheBlockInvalidate(ea);
+}
+
+#icbt 0,r0 0x7c 00 02 0c
+:icbt BITS_21_24,RA_OR_ZERO,B is OP=31 & BIT_25=0 & BITS_21_24 & RA_OR_ZERO & B & XOP_1_10=22 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ instructionCacheBlockTouch(ea);
+}
+
+#isync 0x4c 00 01 2c
+:isync is $(NOTVLE) & OP=19 & BITS_21_25=0 & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=150 & BIT_0=0
+{
+ instructionSynchronize();
+}
+
+#mfdcr r0,DCRN 0x7c 00 02 86
+:mfdcr D, DCRN is OP=31 & D & DCRN & XOP_1_10=323 & BIT_0=0
+{
+ D = DCRN;
+}
+
+#mfmsr r0 0x7c 00 00 a6
+:mfmsr D is OP=31 & D & BITS_11_20=0 & XOP_1_10=83 & BIT_0=0
+{
+ D = MSR;
+}
+
+#mfspr r0 0x7c 00 02 a6
+:mfspr D,SPRVAL is OP=31 & D & SPRVAL & XOP_1_10=339 & BIT_0=0
+{
+ D = SPRVAL;
+}
+
+#mftb r0,TBLr 0x7c 0c 42 e6
+:mftb D,TBLr is $(NOTVLE) & OP=31 & D & TBR=392 & TBLr & XOP_1_10=371 & BIT_0=0
+{
+ D = TBLr;
+}
+#mftb r0,TBUr 0x7c 0d 42 e6
+:mftb D,TBUr is $(NOTVLE) & OP=31 & D & TBR=424 & TBUr & XOP_1_10=371 & BIT_0=0
+{
+ D = TBUr;
+}
+
+#mtdcr DCRN,r0 0x7c 00 03 86
+:mtdcr DCRN, D is OP=31 & D & DCRN & XOP_1_10=451 & BIT_0=0
+{
+ DCRN = D;
+}
+
+#mtmsr r0,0 0x7c 00 01 24
+:mtmsr S,0 is OP=31 & S & BITS_17_20=0 & MSR_L=0 & BITS_11_15=0 & XOP_1_10=146 & BIT_0=0
+{
+ bit58:$(REGISTER_SIZE) = (S >> 5) & 1; #bit 58
+ bit49:$(REGISTER_SIZE) = (S >> 14)& 1; #bit 49
+ bit59:$(REGISTER_SIZE) = (S >> 4) & 1; #bit 59
+@ifdef BIT_64
+ tmp:8 = S & 0x00000000ffff6fcf; #0b00000000000000000000000000000000 1111 1111 1111 1111 0110 1111 1100 1111
+ tmp = tmp & ((bit58 | bit49) << 5);
+ tmp = tmp & ((bit59 | bit49) << 4);
+ MSR = MSR & 0xffffffff00009030 | tmp;
+@else
+ tmp:4 = S & 0xffff6fcf;
+ tmp = tmp & ((bit58 | bit49) << 5);
+ tmp = tmp & ((bit59 | bit49) << 4);
+ MSR = MSR & 0x00009000 | tmp;
+@endif
+}
+
+#mtmsr r0,1 0x7c 01 01 24
+:mtmsr S,1 is OP=31 & S & BITS_17_20=0 & MSR_L=1 & BITS_11_15=0 & XOP_1_10=146 & BIT_0=0
+{
+@ifdef BIT_64
+ mask:8 = 0x000000000000fffe;
+@else
+ mask:4 = 0x0000fffe;
+@endif
+ MSR = (MSR & ~mask) | (S & mask);
+}
+
+#mtspr spr000,r0 0x7c 00 02 a6
+:mtspr SPRVAL,S is OP=31 & SPRVAL & S & XOP_1_10=467 & BIT_0=0
+{
+ SPRVAL = S;
+}
+
+:mtspr SPRVAL,S is OP=31 & BITS_11_20=0x100 & BITS_21_25=0 & SPRVAL & S & XOP_1_10=467 & BIT_0=0
+ [ linkreg=1; globalset(inst_next,linkreg); ]
+{
+ SPRVAL = S;
+}
+
+:mtspr SPRVAL,S is linkreg=1 & OP=31 & BITS_11_20=0x100 & BITS_21_25=0 & SPRVAL & S & XOP_1_10=467 & BIT_0=0
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ SPRVAL = S;
+}
+
+:rfci is $(NOTVLE) & OP=19 & BITS_21_25=0 & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=51 & BIT_0=0
+{
+ MSR = returnFromCriticalInterrupt(MSR, CSRR1);
+ local ra = CSRR0;
+ return[ra];
+
+}
+
+#rfi 0x4c 00 00 64
+:rfi is $(NOTVLE) & OP=19 & BITS_11_25=0 & XOP_1_10=50 & BIT_0=0
+{
+ MSR = returnFromInterrupt(MSR, SRR1);
+ local ra = SRR0;
+ return[ra];
+}
+
+
+#tlbre 0x7c 00 07 64
+:tlbre is OP=31 & XOP_1_10=946
+{
+ TLBRead();
+}
+
+#tlbsx r0,r0,r0 0x7c 00 07 24
+:tlbsx D,RA_OR_ZERO,B is OP=31 & D & B & XOP_1_10=914 & RA_OR_ZERO & Rc=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ D = TLBSearchIndexed(D,ea);
+}
+
+#tlbsx. r0,r0,r0 0x7c 00 07 25
+:tlbsx. D,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & D & B & XOP_1_10=914 & RA_OR_ZERO & Rc=1
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ D = TLBSearchIndexed(D,ea);
+ cr0flags(D);
+}
+
+#tlbwe 0x7c 00 07 a4
+:tlbwe D,A,B_BITS is OP=31 & D & A & B_BITS & XOP_1_10=978
+{
+ D = TLBWrite(D,A,B_BITS:1);
+}
+
+
+#wrtee r0 0x7c 00 01 06
+:wrtee S is OP=31 & S & XOP_1_10=131
+{
+ WriteExternalEnable(S);
+}
+
+#wrteei 0 0x7c 00 01 46
+:wrteei BIT_15 is OP=31 & BIT_15 & XOP_1_10=163
+{
+ WriteExternalEnableImmediate(BIT_15:1);
+}
diff --git a/data/languages/ppc_gekko_broadway.cspec b/data/languages/ppc_gekko_broadway.cspec
new file mode 100644
index 0000000..be9f188
--- /dev/null
+++ b/data/languages/ppc_gekko_broadway.cspec
@@ -0,0 +1,117 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/data/languages/ppc_gekko_broadway.ldefs b/data/languages/ppc_gekko_broadway.ldefs
new file mode 100644
index 0000000..4e1b276
--- /dev/null
+++ b/data/languages/ppc_gekko_broadway.ldefs
@@ -0,0 +1,19 @@
+
+
+
+
+ PowerPC 32-bit big endian (Gekko/Broadway/Espresso variant)
+
+
+
+
+
+
diff --git a/data/languages/ppc_gekko_broadway.pspec b/data/languages/ppc_gekko_broadway.pspec
new file mode 100644
index 0000000..c89ae13
--- /dev/null
+++ b/data/languages/ppc_gekko_broadway.pspec
@@ -0,0 +1,1156 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/data/languages/ppc_gekko_broadway.slaspec b/data/languages/ppc_gekko_broadway.slaspec
new file mode 100644
index 0000000..6324b8d
--- /dev/null
+++ b/data/languages/ppc_gekko_broadway.slaspec
@@ -0,0 +1,1824 @@
+# PowerPC assembly SLA spec for the Gekko and Broadway variant
+
+@define ENDIAN "big"
+
+define endian=$(ENDIAN);
+define alignment=2;
+
+@define REGISTER_SIZE "4"
+
+@define CTR_OFFSET "32"
+
+# -size: How many bytes make up an address
+define space ram type=ram_space size=$(REGISTER_SIZE) default;
+
+# -size: How many bytes do we need for register addressing
+define space register type=register_space size=4;
+
+# General registers (some pcode that follows depends on these registers being at
+# offset 0
+
+define register offset=0 size=$(REGISTER_SIZE) [
+ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15
+ r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 r27 r28 r29 r30 r31 ];
+
+# XER flags
+define register offset=0x400 size=1 [ xer_so xer_ov xer_ov32 xer_ca xer_ca32 xer_count ];
+
+define register offset=0x500 size=1 [ fp_fx fp_fex fp_vx fp_ox
+ fp_ux fp_zx fp_xx fp_vxsnan
+ fp_vxisi fp_vxidi fp_vxzdz fp_vximz
+ fp_vxvc fp_fr fp_fi fp_c
+ fp_cc0 fp_cc1 fp_cc2 fp_cc3
+ fp_reserve1 fp_vxsoft fp_vxsqrt fp_vxcvi
+ fp_ve fp_oe fp_ue fp_ze
+ fp_xe fp_ni fp_rn0 fp_rn1 ];
+
+define register offset = 0x700 size =$(REGISTER_SIZE) [MSR];
+define register offset = 0x720 size=$(REGISTER_SIZE) [RESERVE_ADDRESS];
+define register offset = 0x728 size=1 [RESERVE];
+define register offset = 0x730 size=1 [RESERVE_LENGTH];
+
+# Program Counter register: This register is not actually visible in the
+# API for powerpc but it is needed to create a consistent model for the debugger
+define register offset=0x780 size=$(REGISTER_SIZE) pc;
+
+@define SEG_REGISTER_BASE "0x800"
+# Segment Registers
+define register offset=$(SEG_REGISTER_BASE) size=4 [ sr0 sr1 sr2 sr3 sr4 sr5 sr6 sr7 sr8 sr9 sr10 sr11 sr12 sr13 sr14 sr15 ];
+
+# Condition register flags
+define register offset=0x900 size=1 [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 ];
+define register offset=0x900 size=8 [ crall ];
+
+define register offset=0x980 size=$(REGISTER_SIZE) [ tea ];
+
+# Fake storage used to help preserve r2 across function calls within the decompiler (see appropriate cspec)
+define register offset=0x988 size=$(REGISTER_SIZE) [ r2Save ];
+
+# Special Purpose Registers are defined with generic names with the exception of XER, LR, CTR, SRR0, SRR1, TBL(r/w), TBU(r/w)
+# These names may be replaced within register_data section within a PPC variant's pspec file
+define register offset=0x1000 size=$(REGISTER_SIZE)
+ [ spr000 XER spr002 spr003 spr004 spr005 spr006 spr007 LR CTR spr00a spr00b spr00c spr00d spr00e spr00f
+ spr010 spr011 spr012 spr013 spr014 spr015 spr016 spr017 spr018 spr019 SRR0 SRR1 spr01c spr01d spr01e spr01f
+ spr020 spr021 spr022 spr023 spr024 spr025 spr026 spr027 spr028 spr029 spr02a spr02b spr02c spr02d spr02e spr02f
+ spr030 spr031 spr032 spr033 spr034 spr035 spr036 spr037 spr038 spr039 CSRR0 CSRR1 spr03c spr03d spr03e spr03f
+ spr040 spr041 spr042 spr043 spr044 spr045 spr046 spr047 spr048 spr049 spr04a spr04b spr04c spr04d spr04e spr04f
+ spr050 spr051 spr052 spr053 spr054 spr055 spr056 spr057 spr058 spr059 spr05a spr05b spr05c spr05d spr05e spr05f
+ spr060 spr061 spr062 spr063 spr064 spr065 spr066 spr067 spr068 spr069 spr06a spr06b spr06c spr06d spr06e spr06f
+ spr070 spr071 spr072 spr073 spr074 spr075 spr076 spr077 spr078 spr079 spr07a spr07b spr07c spr07d spr07e spr07f
+ spr080 spr081 spr082 spr083 spr084 spr085 spr086 spr087 spr088 spr089 spr08a spr08b spr08c spr08d spr08e spr08f
+ spr090 spr091 spr092 spr093 spr094 spr095 spr096 spr097 spr098 spr099 spr09a spr09b spr09c spr09d spr09e spr09f
+ spr0a0 spr0a1 spr0a2 spr0a3 spr0a4 spr0a5 spr0a6 spr0a7 spr0a8 spr0a9 spr0aa spr0ab spr0ac spr0ad spr0ae spr0af
+ spr0b0 spr0b1 spr0b2 spr0b3 spr0b4 spr0b5 spr0b6 spr0b7 spr0b8 spr0b9 spr0ba spr0bb spr0bc spr0bd spr0be spr0bf
+ spr0c0 spr0c1 spr0c2 spr0c3 spr0c4 spr0c5 spr0c6 spr0c7 spr0c8 spr0c9 spr0ca spr0cb spr0cc spr0cd spr0ce spr0cf
+ spr0d0 spr0d1 spr0d2 spr0d3 spr0d4 spr0d5 spr0d6 spr0d7 spr0d8 spr0d9 spr0da spr0db spr0dc spr0dd spr0de spr0df
+ spr0e0 spr0e1 spr0e2 spr0e3 spr0e4 spr0e5 spr0e6 spr0e7 spr0e8 spr0e9 spr0ea spr0eb spr0ec spr0ed spr0ee spr0ef
+ spr0f0 spr0f1 spr0f2 spr0f3 spr0f4 spr0f5 spr0f6 spr0f7 spr0f8 spr0f9 spr0fa spr0fb spr0fc spr0fd spr0fe spr0ff
+ spr100 spr101 spr102 spr103 spr104 spr105 spr106 spr107 spr108 spr109 spr10a spr10b TBLr TBUr spr10e spr10f
+ spr110 spr111 spr112 spr113 spr114 spr115 spr116 spr117 spr118 spr119 spr11a spr11b TBLw TBUw spr11e spr11f
+ spr120 spr121 spr122 spr123 spr124 spr125 spr126 spr127 spr128 spr129 spr12a spr12b spr12c spr12d spr12e spr12f
+ spr130 spr131 spr132 spr133 spr134 spr135 spr136 spr137 spr138 spr139 spr13a spr13b spr13c spr13d spr13e spr13f
+ spr140 spr141 spr142 spr143 spr144 spr145 spr146 spr147 spr148 spr149 spr14a spr14b spr14c spr14d spr14e spr14f
+ spr150 spr151 spr152 spr153 spr154 spr155 spr156 spr157 spr158 spr159 spr15a spr15b spr15c spr15d spr15e spr15f
+ spr160 spr161 spr162 spr163 spr164 spr165 spr166 spr167 spr168 spr169 spr16a spr16b spr16c spr16d spr16e spr16f
+ spr170 spr171 spr172 spr173 spr174 spr175 spr176 spr177 spr178 spr179 spr17a spr17b spr17c spr17d spr17e spr17f
+ spr180 spr181 spr182 spr183 spr184 spr185 spr186 spr187 spr188 spr189 spr18a spr18b spr18c spr18d spr18e spr18f
+ spr190 spr191 spr192 spr193 spr194 spr195 spr196 spr197 spr198 spr199 spr19a spr19b spr19c spr19d spr19e spr19f
+ spr1a0 spr1a1 spr1a2 spr1a3 spr1a4 spr1a5 spr1a6 spr1a7 spr1a8 spr1a9 spr1aa spr1ab spr1ac spr1ad spr1ae spr1af
+ spr1b0 spr1b1 spr1b2 spr1b3 spr1b4 spr1b5 spr1b6 spr1b7 spr1b8 spr1b9 spr1ba spr1bb spr1bc spr1bd spr1be spr1bf
+ spr1c0 spr1c1 spr1c2 spr1c3 spr1c4 spr1c5 spr1c6 spr1c7 spr1c8 spr1c9 spr1ca spr1cb spr1cc spr1cd spr1ce spr1cf
+ spr1d0 spr1d1 spr1d2 spr1d3 spr1d4 spr1d5 spr1d6 spr1d7 spr1d8 spr1d9 spr1da spr1db spr1dc spr1dd spr1de spr1df
+ spr1e0 spr1e1 spr1e2 spr1e3 spr1e4 spr1e5 spr1e6 spr1e7 spr1e8 spr1e9 spr1ea spr1eb spr1ec spr1ed spr1ee spr1ef
+ spr1f0 spr1f1 spr1f2 spr1f3 spr1f4 spr1f5 spr1f6 spr1f7 spr1f8 spr1f9 spr1fa spr1fb spr1fc spr1fd spr1fe spr1ff
+ spr200 spr201 spr202 spr203 spr204 spr205 spr206 spr207 spr208 spr209 spr20a spr20b spr20c spr20d spr20e spr20f
+ spr210 spr211 spr212 spr213 spr214 spr215 spr216 spr217 spr218 spr219 spr21a spr21b spr21c spr21d spr21e spr21f
+ spr220 spr221 spr222 spr223 spr224 spr225 spr226 spr227 spr228 spr229 spr22a spr22b spr22c spr22d spr22e spr22f
+ spr230 spr231 spr232 spr233 spr234 spr235 spr236 spr237 spr238 spr239 spr23a spr23b spr23c spr23d spr23e spr23f
+ spr240 spr241 spr242 spr243 spr244 spr245 spr246 spr247 spr248 spr249 spr24a spr24b spr24c spr24d spr24e spr24f
+ spr250 spr251 spr252 spr253 spr254 spr255 spr256 spr257 spr258 spr259 spr25a spr25b spr25c spr25d spr25e spr25f
+ spr260 spr261 spr262 spr263 spr264 spr265 spr266 spr267 spr268 spr269 spr26a spr26b spr26c spr26d spr26e spr26f
+ spr270 spr271 spr272 spr273 spr274 spr275 spr276 spr277 spr278 spr279 spr27a spr27b spr27c spr27d spr27e spr27f
+ spr280 spr281 spr282 spr283 spr284 spr285 spr286 spr287 spr288 spr289 spr28a spr28b spr28c spr28d spr28e spr28f
+ spr290 spr291 spr292 spr293 spr294 spr295 spr296 spr297 spr298 spr299 spr29a spr29b spr29c spr29d spr29e spr29f
+ spr2a0 spr2a1 spr2a2 spr2a3 spr2a4 spr2a5 spr2a6 spr2a7 spr2a8 spr2a9 spr2aa spr2ab spr2ac spr2ad spr2ae spr2af
+ spr2b0 spr2b1 spr2b2 spr2b3 spr2b4 spr2b5 spr2b6 spr2b7 spr2b8 spr2b9 spr2ba spr2bb spr2bc spr2bd spr2be spr2bf
+ spr2c0 spr2c1 spr2c2 spr2c3 spr2c4 spr2c5 spr2c6 spr2c7 spr2c8 spr2c9 spr2ca spr2cb spr2cc spr2cd spr2ce spr2cf
+ spr2d0 spr2d1 spr2d2 spr2d3 spr2d4 spr2d5 spr2d6 spr2d7 spr2d8 spr2d9 spr2da spr2db spr2dc spr2dd spr2de spr2df
+ spr2e0 spr2e1 spr2e2 spr2e3 spr2e4 spr2e5 spr2e6 spr2e7 spr2e8 spr2e9 spr2ea spr2eb spr2ec spr2ed spr2ee spr2ef
+ spr2f0 spr2f1 spr2f2 spr2f3 spr2f4 spr2f5 spr2f6 spr2f7 spr2f8 spr2f9 spr2fa spr2fb spr2fc spr2fd spr2fe spr2ff
+ spr300 spr301 spr302 spr303 spr304 spr305 spr306 spr307 spr308 spr309 spr30a spr30b spr30c spr30d spr30e spr30f
+ spr310 spr311 spr312 spr313 spr314 spr315 spr316 spr317 spr318 spr319 spr31a spr31b spr31c spr31d spr31e spr31f
+ spr320 spr321 spr322 spr323 spr324 spr325 spr326 spr327 spr328 spr329 spr32a spr32b spr32c spr32d spr32e TAR
+ spr330 spr331 spr332 spr333 spr334 spr335 spr336 spr337 spr338 spr339 spr33a spr33b spr33c spr33d spr33e spr33f
+ spr340 spr341 spr342 spr343 spr344 spr345 spr346 spr347 spr348 spr349 spr34a spr34b spr34c spr34d spr34e spr34f
+ spr350 spr351 spr352 spr353 spr354 spr355 spr356 spr357 spr358 spr359 spr35a spr35b spr35c spr35d spr35e spr35f
+ spr360 spr361 spr362 spr363 spr364 spr365 spr366 spr367 spr368 spr369 spr36a spr36b spr36c spr36d spr36e spr36f
+ spr370 spr371 spr372 spr373 spr374 spr375 spr376 spr377 spr378 spr379 spr37a spr37b spr37c spr37d spr37e spr37f
+ spr380 spr381 spr382 spr383 spr384 spr385 spr386 spr387 spr388 spr389 spr38a spr38b spr38c spr38d spr38e spr38f
+ GQR0 GQR1 GQR2 GQR3 GQR4 GQR5 GQR6 GQR7 spr398 spr399 spr39a spr39b spr39c spr39d spr39e spr39f
+ spr3a0 spr3a1 spr3a2 spr3a3 spr3a4 spr3a5 spr3a6 spr3a7 spr3a8 spr3a9 spr3aa spr3ab spr3ac spr3ad spr3ae spr3af
+ spr3b0 spr3b1 spr3b2 spr3b3 spr3b4 spr3b5 spr3b6 spr3b7 spr3b8 spr3b9 spr3ba spr3bb spr3bc spr3bd spr3be spr3bf
+ spr3c0 spr3c1 spr3c2 spr3c3 spr3c4 spr3c5 spr3c6 spr3c7 spr3c8 spr3c9 spr3ca spr3cb spr3cc spr3cd spr3ce spr3cf
+ spr3d0 spr3d1 spr3d2 spr3d3 spr3d4 spr3d5 spr3d6 spr3d7 spr3d8 spr3d9 spr3da spr3db spr3dc spr3dd spr3de spr3df
+ spr3e0 spr3e1 spr3e2 spr3e3 spr3e4 spr3e5 spr3e6 spr3e7 spr3e8 spr3e9 spr3ea spr3eb spr3ec spr3ed spr3ee spr3ef
+ spr3f0 spr3f1 spr3f2 spr3f3 spr3f4 spr3f5 spr3f6 spr3f7 spr3f8 spr3f9 spr3fa spr3fb spr3fc spr3fd spr3fe spr3ff
+ ];
+
+# Floating point registers
+define register offset=0x4000 size=8 [
+ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15
+ f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 ];
+
+# ps1 registers
+# Note: in gekko and broadway those are actually 32-bit, but use the same size as ps0 for definition convenience and noisy casts in decompilation.
+define register offset=0x5000 size=8 [
+ ps0_1 ps1_1 ps2_1 ps3_1 ps4_1 ps5_1 ps6_1 ps7_1
+ ps8_1 ps9_1 ps10_1 ps11_1 ps12_1 ps13_1 ps14_1 ps15_1
+ ps16_1 ps17_1 ps18_1 ps19_1 ps20_1 ps21_1 ps22_1 ps23_1
+ ps24_1 ps25_1 ps26_1 ps27_1 ps28_1 ps29_1 ps30_1 ps31_1];
+
+# Define context bits
+define register offset=0x6000 size=4 contextreg;
+define context contextreg
+ linkreg=(0,1) # 0 - no LR set, 1 - LR set (used to flag branch instructions to be treated as calls)
+ vle=(2,2) # Used to control inclusion/disassembly of vle instructions. '1' means use vle see NOTVLE/ISVLE @define below
+ # FIXME! while allowing vle context to flow is incorrect, the PowerPC disassembly action will not work at all without it
+ # and could easily flow the incorrect context when traversing between VLE and non-VLE sections.
+
+ # transient context
+ lsmul=(3,7) noflow # Used for Load/store multiple parsing
+ regp=(8,12) noflow # Used in powerISA quad word instructions
+ regpset=(8,12) noflow # Used in powerISA quad word instructions
+;
+
+@define NOTVLE "vle=0"
+@define ISVLE "vle=1"
+
+# Define Device Control Registers (specific to IBM PowerPC Embedded Controller, see instructions mfdcr/mtdcr)
+# Device Control Registers are defined with generic names
+# These names may be replaced within register_data section within a PPC variant's pspec file
+define register offset=0x7000 size=$(REGISTER_SIZE)
+ [ dcr000 dcr001 dcr002 dcr003 dcr004 dcr005 dcr006 dcr007 dcr008 dcr009 dcr00a dcr00b dcr00c dcr00d dcr00e dcr00f
+ dcr010 dcr011 dcr012 dcr013 dcr014 dcr015 dcr016 dcr017 dcr018 dcr019 dcr01a dcr01b dcr01c dcr01d dcr01e dcr01f
+ dcr020 dcr021 dcr022 dcr023 dcr024 dcr025 dcr026 dcr027 dcr028 dcr029 dcr02a dcr02b dcr02c dcr02d dcr02e dcr02f
+ dcr030 dcr031 dcr032 dcr033 dcr034 dcr035 dcr036 dcr037 dcr038 dcr039 dcr03a dcr03b dcr03c dcr03d dcr03e dcr03f
+ dcr040 dcr041 dcr042 dcr043 dcr044 dcr045 dcr046 dcr047 dcr048 dcr049 dcr04a dcr04b dcr04c dcr04d dcr04e dcr04f
+ dcr050 dcr051 dcr052 dcr053 dcr054 dcr055 dcr056 dcr057 dcr058 dcr059 dcr05a dcr05b dcr05c dcr05d dcr05e dcr05f
+ dcr060 dcr061 dcr062 dcr063 dcr064 dcr065 dcr066 dcr067 dcr068 dcr069 dcr06a dcr06b dcr06c dcr06d dcr06e dcr06f
+ dcr070 dcr071 dcr072 dcr073 dcr074 dcr075 dcr076 dcr077 dcr078 dcr079 dcr07a dcr07b dcr07c dcr07d dcr07e dcr07f
+ dcr080 dcr081 dcr082 dcr083 dcr084 dcr085 dcr086 dcr087 dcr088 dcr089 dcr08a dcr08b dcr08c dcr08d dcr08e dcr08f
+ dcr090 dcr091 dcr092 dcr093 dcr094 dcr095 dcr096 dcr097 dcr098 dcr099 dcr09a dcr09b dcr09c dcr09d dcr09e dcr09f
+ dcr0a0 dcr0a1 dcr0a2 dcr0a3 dcr0a4 dcr0a5 dcr0a6 dcr0a7 dcr0a8 dcr0a9 dcr0aa dcr0ab dcr0ac dcr0ad dcr0ae dcr0af
+ dcr0b0 dcr0b1 dcr0b2 dcr0b3 dcr0b4 dcr0b5 dcr0b6 dcr0b7 dcr0b8 dcr0b9 dcr0ba dcr0bb dcr0bc dcr0bd dcr0be dcr0bf
+ dcr0c0 dcr0c1 dcr0c2 dcr0c3 dcr0c4 dcr0c5 dcr0c6 dcr0c7 dcr0c8 dcr0c9 dcr0ca dcr0cb dcr0cc dcr0cd dcr0ce dcr0cf
+ dcr0d0 dcr0d1 dcr0d2 dcr0d3 dcr0d4 dcr0d5 dcr0d6 dcr0d7 dcr0d8 dcr0d9 dcr0da dcr0db dcr0dc dcr0dd dcr0de dcr0df
+ dcr0e0 dcr0e1 dcr0e2 dcr0e3 dcr0e4 dcr0e5 dcr0e6 dcr0e7 dcr0e8 dcr0e9 dcr0ea dcr0eb dcr0ec dcr0ed dcr0ee dcr0ef
+ dcr0f0 dcr0f1 dcr0f2 dcr0f3 dcr0f4 dcr0f5 dcr0f6 dcr0f7 dcr0f8 dcr0f9 dcr0fa dcr0fb dcr0fc dcr0fd dcr0fe dcr0ff
+ dcr100 dcr101 dcr102 dcr103 dcr104 dcr105 dcr106 dcr107 dcr108 dcr109 dcr10a dcr10b dcr10c dcr10d dcr10e dcr10f
+ dcr110 dcr111 dcr112 dcr113 dcr114 dcr115 dcr116 dcr117 dcr118 dcr119 dcr11a dcr11b dcr11c dcr11d dcr11e dcr11f
+ dcr120 dcr121 dcr122 dcr123 dcr124 dcr125 dcr126 dcr127 dcr128 dcr129 dcr12a dcr12b dcr12c dcr12d dcr12e dcr12f
+ dcr130 dcr131 dcr132 dcr133 dcr134 dcr135 dcr136 dcr137 dcr138 dcr139 dcr13a dcr13b dcr13c dcr13d dcr13e dcr13f
+ dcr140 dcr141 dcr142 dcr143 dcr144 dcr145 dcr146 dcr147 dcr148 dcr149 dcr14a dcr14b dcr14c dcr14d dcr14e dcr14f
+ dcr150 dcr151 dcr152 dcr153 dcr154 dcr155 dcr156 dcr157 dcr158 dcr159 dcr15a dcr15b dcr15c dcr15d dcr15e dcr15f
+ dcr160 dcr161 dcr162 dcr163 dcr164 dcr165 dcr166 dcr167 dcr168 dcr169 dcr16a dcr16b dcr16c dcr16d dcr16e dcr16f
+ dcr170 dcr171 dcr172 dcr173 dcr174 dcr175 dcr176 dcr177 dcr178 dcr179 dcr17a dcr17b dcr17c dcr17d dcr17e dcr17f
+ dcr180 dcr181 dcr182 dcr183 dcr184 dcr185 dcr186 dcr187 dcr188 dcr189 dcr18a dcr18b dcr18c dcr18d dcr18e dcr18f
+ dcr190 dcr191 dcr192 dcr193 dcr194 dcr195 dcr196 dcr197 dcr198 dcr199 dcr19a dcr19b dcr19c dcr19d dcr19e dcr19f
+ dcr1a0 dcr1a1 dcr1a2 dcr1a3 dcr1a4 dcr1a5 dcr1a6 dcr1a7 dcr1a8 dcr1a9 dcr1aa dcr1ab dcr1ac dcr1ad dcr1ae dcr1af
+ dcr1b0 dcr1b1 dcr1b2 dcr1b3 dcr1b4 dcr1b5 dcr1b6 dcr1b7 dcr1b8 dcr1b9 dcr1ba dcr1bb dcr1bc dcr1bd dcr1be dcr1bf
+ dcr1c0 dcr1c1 dcr1c2 dcr1c3 dcr1c4 dcr1c5 dcr1c6 dcr1c7 dcr1c8 dcr1c9 dcr1ca dcr1cb dcr1cc dcr1cd dcr1ce dcr1cf
+ dcr1d0 dcr1d1 dcr1d2 dcr1d3 dcr1d4 dcr1d5 dcr1d6 dcr1d7 dcr1d8 dcr1d9 dcr1da dcr1db dcr1dc dcr1dd dcr1de dcr1df
+ dcr1e0 dcr1e1 dcr1e2 dcr1e3 dcr1e4 dcr1e5 dcr1e6 dcr1e7 dcr1e8 dcr1e9 dcr1ea dcr1eb dcr1ec dcr1ed dcr1ee dcr1ef
+ dcr1f0 dcr1f1 dcr1f2 dcr1f3 dcr1f4 dcr1f5 dcr1f6 dcr1f7 dcr1f8 dcr1f9 dcr1fa dcr1fb dcr1fc dcr1fd dcr1fe dcr1ff
+ dcr200 dcr201 dcr202 dcr203 dcr204 dcr205 dcr206 dcr207 dcr208 dcr209 dcr20a dcr20b dcr20c dcr20d dcr20e dcr20f
+ dcr210 dcr211 dcr212 dcr213 dcr214 dcr215 dcr216 dcr217 dcr218 dcr219 dcr21a dcr21b dcr21c dcr21d dcr21e dcr21f
+ dcr220 dcr221 dcr222 dcr223 dcr224 dcr225 dcr226 dcr227 dcr228 dcr229 dcr22a dcr22b dcr22c dcr22d dcr22e dcr22f
+ dcr230 dcr231 dcr232 dcr233 dcr234 dcr235 dcr236 dcr237 dcr238 dcr239 dcr23a dcr23b dcr23c dcr23d dcr23e dcr23f
+ dcr240 dcr241 dcr242 dcr243 dcr244 dcr245 dcr246 dcr247 dcr248 dcr249 dcr24a dcr24b dcr24c dcr24d dcr24e dcr24f
+ dcr250 dcr251 dcr252 dcr253 dcr254 dcr255 dcr256 dcr257 dcr258 dcr259 dcr25a dcr25b dcr25c dcr25d dcr25e dcr25f
+ dcr260 dcr261 dcr262 dcr263 dcr264 dcr265 dcr266 dcr267 dcr268 dcr269 dcr26a dcr26b dcr26c dcr26d dcr26e dcr26f
+ dcr270 dcr271 dcr272 dcr273 dcr274 dcr275 dcr276 dcr277 dcr278 dcr279 dcr27a dcr27b dcr27c dcr27d dcr27e dcr27f
+ dcr280 dcr281 dcr282 dcr283 dcr284 dcr285 dcr286 dcr287 dcr288 dcr289 dcr28a dcr28b dcr28c dcr28d dcr28e dcr28f
+ dcr290 dcr291 dcr292 dcr293 dcr294 dcr295 dcr296 dcr297 dcr298 dcr299 dcr29a dcr29b dcr29c dcr29d dcr29e dcr29f
+ dcr2a0 dcr2a1 dcr2a2 dcr2a3 dcr2a4 dcr2a5 dcr2a6 dcr2a7 dcr2a8 dcr2a9 dcr2aa dcr2ab dcr2ac dcr2ad dcr2ae dcr2af
+ dcr2b0 dcr2b1 dcr2b2 dcr2b3 dcr2b4 dcr2b5 dcr2b6 dcr2b7 dcr2b8 dcr2b9 dcr2ba dcr2bb dcr2bc dcr2bd dcr2be dcr2bf
+ dcr2c0 dcr2c1 dcr2c2 dcr2c3 dcr2c4 dcr2c5 dcr2c6 dcr2c7 dcr2c8 dcr2c9 dcr2ca dcr2cb dcr2cc dcr2cd dcr2ce dcr2cf
+ dcr2d0 dcr2d1 dcr2d2 dcr2d3 dcr2d4 dcr2d5 dcr2d6 dcr2d7 dcr2d8 dcr2d9 dcr2da dcr2db dcr2dc dcr2dd dcr2de dcr2df
+ dcr2e0 dcr2e1 dcr2e2 dcr2e3 dcr2e4 dcr2e5 dcr2e6 dcr2e7 dcr2e8 dcr2e9 dcr2ea dcr2eb dcr2ec dcr2ed dcr2ee dcr2ef
+ dcr2f0 dcr2f1 dcr2f2 dcr2f3 dcr2f4 dcr2f5 dcr2f6 dcr2f7 dcr2f8 dcr2f9 dcr2fa dcr2fb dcr2fc dcr2fd dcr2fe dcr2ff
+ dcr300 dcr301 dcr302 dcr303 dcr304 dcr305 dcr306 dcr307 dcr308 dcr309 dcr30a dcr30b dcr30c dcr30d dcr30e dcr30f
+ dcr310 dcr311 dcr312 dcr313 dcr314 dcr315 dcr316 dcr317 dcr318 dcr319 dcr31a dcr31b dcr31c dcr31d dcr31e dcr31f
+ dcr320 dcr321 dcr322 dcr323 dcr324 dcr325 dcr326 dcr327 dcr328 dcr329 dcr32a dcr32b dcr32c dcr32d dcr32e dcr32f
+ dcr330 dcr331 dcr332 dcr333 dcr334 dcr335 dcr336 dcr337 dcr338 dcr339 dcr33a dcr33b dcr33c dcr33d dcr33e dcr33f
+ dcr340 dcr341 dcr342 dcr343 dcr344 dcr345 dcr346 dcr347 dcr348 dcr349 dcr34a dcr34b dcr34c dcr34d dcr34e dcr34f
+ dcr350 dcr351 dcr352 dcr353 dcr354 dcr355 dcr356 dcr357 dcr358 dcr359 dcr35a dcr35b dcr35c dcr35d dcr35e dcr35f
+ dcr360 dcr361 dcr362 dcr363 dcr364 dcr365 dcr366 dcr367 dcr368 dcr369 dcr36a dcr36b dcr36c dcr36d dcr36e dcr36f
+ dcr370 dcr371 dcr372 dcr373 dcr374 dcr375 dcr376 dcr377 dcr378 dcr379 dcr37a dcr37b dcr37c dcr37d dcr37e dcr37f
+ dcr380 dcr381 dcr382 dcr383 dcr384 dcr385 dcr386 dcr387 dcr388 dcr389 dcr38a dcr38b dcr38c dcr38d dcr38e dcr38f
+ dcr390 dcr391 dcr392 dcr393 dcr394 dcr395 dcr396 dcr397 dcr398 dcr399 dcr39a dcr39b dcr39c dcr39d dcr39e dcr39f
+ dcr3a0 dcr3a1 dcr3a2 dcr3a3 dcr3a4 dcr3a5 dcr3a6 dcr3a7 dcr3a8 dcr3a9 dcr3aa dcr3ab dcr3ac dcr3ad dcr3ae dcr3af
+ dcr3b0 dcr3b1 dcr3b2 dcr3b3 dcr3b4 dcr3b5 dcr3b6 dcr3b7 dcr3b8 dcr3b9 dcr3ba dcr3bb dcr3bc dcr3bd dcr3be dcr3bf
+ dcr3c0 dcr3c1 dcr3c2 dcr3c3 dcr3c4 dcr3c5 dcr3c6 dcr3c7 dcr3c8 dcr3c9 dcr3ca dcr3cb dcr3cc dcr3cd dcr3ce dcr3cf
+ dcr3d0 dcr3d1 dcr3d2 dcr3d3 dcr3d4 dcr3d5 dcr3d6 dcr3d7 dcr3d8 dcr3d9 dcr3da dcr3db dcr3dc dcr3dd dcr3de dcr3df
+ dcr3e0 dcr3e1 dcr3e2 dcr3e3 dcr3e4 dcr3e5 dcr3e6 dcr3e7 dcr3e8 dcr3e9 dcr3ea dcr3eb dcr3ec dcr3ed dcr3ee dcr3ef
+ dcr3f0 dcr3f1 dcr3f2 dcr3f3 dcr3f4 dcr3f5 dcr3f6 dcr3f7 dcr3f8 dcr3f9 dcr3fa dcr3fb dcr3fc dcr3fd dcr3fe dcr3ff
+ ];
+
+# ACC and SPEFSCR are part of the "EREF: A Reference for Motorola Book E and e500 Core" spec
+# SPEFSCR is a reperposed spr200
+define register offset=0x10000 size=8 [ACC];
+
+# OP=17 & BITS_21_25=0 & BITS_16_20=0(ok) & BITS_5_11=LEV & BITS_2_4=0 & BIT_1=1 & BIT_0=0
+
+define token instr(32)
+ A=(16,20)
+ AA=(1,1)
+ A_BITS=(16,20)
+ A_BITSS=(16,20) signed
+ AX=(2,2)
+ B=(11,15)
+ B_BITS=(11,15)
+ BD=(2,15) signed
+ BF=(17,24)
+ BFA=(0,2)
+ BFA2=(18,20)
+ BF2=(23,25)
+ BH=(11,12)
+ BH_BITS=(11,12)
+ BH_RBE=(11,20)
+ BH_RET=(11,11)
+ BI_BITS=(16,20)
+ BI_CC=(16,17)
+ BI_CR=(18,20)
+ BIT_A=(25,25)
+ BIT_L=(21,21)
+ BIT_R=(21,21)
+ BIT_0=(0,0)
+ BIT_10=(10,10)
+ BIT_1=(1,1)
+ BIT_11=(11,11)
+ BIT_15=(15,15)
+ BIT_16=(16,16)
+ BIT_17=(17,17)
+ BIT_18=(18,18)
+ BIT_20=(20,20)
+ BIT_22=(22,22)
+ BIT_25=(25,25)
+ BIT_9=(9,9)
+ BIT_6=(6,6)
+ BITS_0_1=(0,1)
+ BITS_0_17=(0,17)
+ BITS_0_2=(0,2)
+ BITS_0_3=(0,3)
+ BITS_1_10=(1,10)
+ BITS_11_13=(11,13)
+ BITS_11_15=(11,15)
+ BITS_11_17=(11,17)
+ BITS_11_20=(11,20)
+ BITS_11_22=(11,22)
+ BITS_11_24=(11,24)
+ BITS_11_25=(11,25)
+ BITS_12_15=(12,15)
+ BITS_12_19=(12,19)
+ BITS_12_25=(12,25)
+ BITS_13_15=(13,15)
+ BITS_14_15=(14,15)
+ BITS_16_17=(12,15)
+ BITS_16_18=(16,18)
+ BITS_16_19=(16,19)
+ BITS_16_20=(16,20)
+ BITS_16_22=(16,22)
+ BITS_16_25=(16,25)
+ BITS_17_20=(17,20)
+ BITS_17_24=(17,24)
+ BITS_18_19=(18,19)
+ BITS_18_20=(18,20)
+ BITS_1_9=(1,9)
+ BITS_19_20=(19,20)
+ BITS_20_20=(20,20)
+ BITS_21_22=(21,22)
+ BITS_21_23=(21,23)
+ BITS_21_24=(7,10)
+ BITS_21_25=(21,25)
+ BITS_21_28=(21,28)
+ BITS_22_24=(22,24)
+ BITS_22_25=(22,25)
+ BITS_22_26=(22,26)
+ BITS_2_25=(2,25)
+ BITS_23_24=(23,24)
+ BITS_23_25=(23,25)
+ BITS_2_4=(2,4)
+ BITS_24_25=(24,25)
+ BITS_3_7=(3,7)
+ BITS_4_5=(4,5)
+ BITS_6_10=(6,10)
+ BO_0=(25,25)
+ BO_1=(24,24)
+ BO=(21,25)
+ BO_2=(23,23)
+ BO_3=(22, 22)
+ BO_BITS=(21,25)
+ BX=(1,1)
+ C=(6,10)
+ COND_BRANCH_CTRL=(22,25)
+ CR_A=(18,20)
+ CR_A_CC=(16,17)
+ CR_B=(13,15)
+ CR_B_CC=(11,12)
+ CRBD=(21,25)
+ CRBR=(6,10)
+ CR_D=(23,25)
+ CR_D_CC=(21,22)
+ crfD=(23,25)
+ CRFD=(23,25)
+ CRFS=(18,20)
+ CRM0=(19,19)
+ CRM1=(18,18)
+ CRM=(12,19)
+ CRM2=(17,17)
+ CRM3=(16,16)
+ CRM4=(15,15)
+ CRM5=(14,14)
+ CRM6=(13,13)
+ CRM7=(12,12)
+ CR_X=(8,10)
+ CR_X_CC=(6,7)
+ CT=(21,25)
+ CT2=(21,24)
+ CX=(3,3)
+ D0=(6,15) signed
+ D1=(16,20)
+ D2=(0,0)
+ D=(21,25)
+ Dp=(21,25)
+ DC6=(6,6)
+ DCM=(10,15)
+ DCMX=(16,22)
+ DCRN=(11,20)
+ DGM=(10,15)
+ DM=(8,9)
+ DM2=(2,2)
+ DQ=(4,15)
+ DQs=(4,15) signed
+ DS=(2,15)
+ DSs=(2,15) signed
+ DX=(16,20)
+ DUI=(21,25)
+ DUIS=(11,20)
+ EX=(0,0)
+ fA=(16,20)
+ ps0A=(16,20)
+ ps1A=(16,20)
+ fB=(11,15)
+ ps0B=(11,15)
+ ps1B=(11,15)
+ fC=(6,10)
+ ps0C=(6,10)
+ ps1C=(6,10)
+ fD=(21,25)
+ ps0D=(21,25)
+ ps1D=(21,25)
+ FM0=(24,24)
+ FM1=(23,23)
+ FM=(17,24)
+ FM2=(22,22)
+ FM3=(21,21)
+ FM4=(20,20)
+ FM5=(19,19)
+ FM6=(18,18)
+ FM7=(17,17)
+ FNC=(11,15)
+ fS=(21,25)
+ ps0S=(21,25)
+ ps1S=(21,25)
+ fT=(21,25)
+
+ # Hack to set ps1 in single fp instruction
+ ps1T=(21,25)
+ I=(12,14)
+ IX=(7,9)
+ IMM=(11,15)
+
+ EVUIMM=(11,15)
+ BU_UIMM=(16,20)
+ BU_SIMM=(16,20)
+ EVUIMM_8=(11,15)
+ EVUIMM_4=(11,15)
+ EVUIMM_2=(11,15)
+
+ L= (21,22)
+ L2=(21,21)
+ L16=(16,17)
+ LEV=(5,11)
+ LI=(2,25) signed
+ LK=(0,0)
+ MBH=(5,5)
+ MBL=(6,10)
+ ME=(1,5)
+ MO=(21,25)
+ MSR_L=(16,16)
+ NB= (11,15)
+ O=(9,9)
+ OE=(10,10)
+ OP=(26,31)
+ PS=(9,9)
+ Rc=(0,0)
+ Rc2=(10,10)
+ RMC=(9,10)
+
+ RA=(16,20)
+ RB=(11,15)
+ RS=(21,25)
+ RT=(21,25)
+ R0=(0,0)
+ R16=(16,16)
+
+ S=(21,25)
+ SBE=(11,11)
+ SH16=(10,15)
+ SHB=(6,9)
+ SHH=(1,1)
+ SHL=(11,15)
+ SHW=(8,9)
+ S8IMM=(0,7) signed
+ S5IMM=(11,15) signed
+ SIMM=(0,15) signed
+ SIMM_PS=(0,11) signed
+ SIMM_DS=(2,15) signed
+ SIMM_SIGN=(15,15)
+ SIX=(11,14)
+ SP=(19,20)
+ SPRVAL=(11,20)
+ SR=(16,19)
+ ST=(15,15)
+ STRM=(21,22)
+ SX=(0,0)
+ SX3=(3,3)
+ T=(21,25)
+ TOA=(21,25)
+ TBR=(11,20)
+ TH=(21,25)
+ TMP_6_10=(21,25)
+ TO=(21,25)
+ TX=(0,0)
+ TX3=(3,3)
+ UI=(11,15)
+ UI_11_s8=(16,20)
+ UI_16_s8=(11,15)
+ UI_16_s16=(0,15)
+ UIMM8=(11,18)
+ UIMM=(0,15)
+ UIM=(16,17)
+ UIMB=(16,19)
+ UIMH=(16,18)
+ UIMW=(16,17)
+ UIMT=(16,21)
+
+ W=(15,15)
+ WX=(10,10)
+ WC=(21,22)
+
+ XOP_0_10=(0,10)
+ XOP_0_5=(0,5)
+ XOP_0_8=(0,8)
+ XOP_0_9=(0,9)
+ XOP_1_10=(1,10)
+ XOP_1_4=(1,4)
+ XOP_1_5=(1,5)
+ XOP_1_6=(1,6)
+ XOP_1_8=(1,8)
+ XOP_1_9=(1,9)
+ XOP_2_10=(2,10)
+ XOP_2_4=(2,4)
+ XOP_3_5=(3,5)
+ XOP_3_10=(3,10)
+ XOP_3_9=(3,9)
+ XOP_7_10=(7,10)
+
+ BD15_VLE=(1,15) signed
+ BD24_VLE=(1,24) signed
+ BF_VLE=(21,22)
+ BI_CC_VLE=(16,17)
+ BI_CR_VLE=(18,19)
+ BO_VLE=(20,21)
+
+ IMM8=(0,7)
+ IMM_0_10_VLE=(0,10)
+ IMM_11_15_VLE=(11,14)
+ IMM_16_20_VLE=(16,20)
+ IMM_21_25_VLE=(21,25)
+ SIMM_0_10_VLE=(0,10) signed
+ SIMM_11_14_VLE=(11,14) signed
+ SIMM_21_25_VLE=(21,25) signed
+ SCL_VLE=(8,9)
+
+ LEV_VLE=(11,15)
+ XOP_8_VLE=(8,15)
+ XOP_11_VLE=(11,15)
+ XOP_12_VLE=(12,15)
+
+ XOP_VLE=(22,25)
+;
+
+define token instrvle(16)
+ OP4_VLE=(12,15)
+ OP5_VLE=(11,15)
+ OP6_VLE=(10,15)
+ OP15_VLE=(1,15)
+ OP16_VLE=(0,15)
+
+ OIM5_VLE=(4,8)
+ OIM7_VLE=(4,10)
+ SD4_VLE=(8,11)
+ UI7_VLE=(4,10)
+ UI5_VLE=(4,8)
+ XORR_VLE=(8,9)
+ XOR_VLE=(4,9)
+
+ ARX_VLE=(0,3)
+ ARY_VLE=(4,7)
+ RY_VLE=(4,7)
+ RZ_VLE=(4,7)
+ RX_VLE=(0,3)
+
+ BO16_VLE=(10,10)
+ BIT9_VLE=(9,9)
+ BIT8_VLE=(8,8)
+ BI16_VLE=(8,9)
+ BITS_8_9=(8,9)
+ BD8_VLE=(0,7) signed
+
+ LK8_VLE=(8,8)
+ LK0_VLE=(0,0)
+;
+
+EVUIMM_2_RAt: val^"("^A^")" is A & EVUIMM_2 [ val = EVUIMM_2*2; ] { tmp:4 = A+(EVUIMM_2*2); export tmp; }
+EVUIMM_4_RAt: val^"("^A^")" is A & EVUIMM_4 [ val = EVUIMM_4*4; ] { tmp:4 = A+(EVUIMM_4*4); export tmp; }
+EVUIMM_8_RAt: val^"("^A^")" is A & EVUIMM_8 [ val = EVUIMM_8*8; ] { tmp:4 = A+(EVUIMM_8*8); export tmp; }
+
+attach variables [ RX_VLE RY_VLE RZ_VLE]
+ [ r0 r1 r2 r3 r4 r5 r6 r7 r24 r25 r26 r27 r28 r29 r30 r31];
+
+attach variables [ ARX_VLE ARY_VLE]
+ [ r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23];
+
+attach variables [ D A B C S TH RA RB RS RT regp]
+ [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15
+ r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 r27 r28 r29 r30 r31 ];
+
+attach variables [ BFA BI_CR CRFD CRFS CR_A CR_B CR_D CR_X ]
+ [cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7] ;
+
+attach variables [ BI_CR_VLE ]
+ [cr0 cr1 cr2 cr3 ] ;
+
+attach variables [ fD fB fA fC fS fT ]
+ [ f0 f1 f2 f3 f4 f5 f6 f7
+ f8 f9 f10 f11 f12 f13 f14 f15
+ f16 f17 f18 f19 f20 f21 f22 f23
+ f24 f25 f26 f27 f28 f29 f30 f31 ];
+
+# Paired singles (individual components)
+attach variables [ ps0D ps0B ps0A ps0C ps0S ]
+ [ f0 f1 f2 f3 f4 f5 f6 f7
+ f8 f9 f10 f11 f12 f13 f14 f15
+ f16 f17 f18 f19 f20 f21 f22 f23
+ f24 f25 f26 f27 f28 f29 f30 f31 ];
+
+attach variables [ ps1D ps1B ps1A ps1C ps1S ps1T ]
+ [ ps0_1 ps1_1 ps2_1 ps3_1 ps4_1 ps5_1 ps6_1 ps7_1
+ ps8_1 ps9_1 ps10_1 ps11_1 ps12_1 ps13_1 ps14_1 ps15_1
+ ps16_1 ps17_1 ps18_1 ps19_1 ps20_1 ps21_1 ps22_1 ps23_1
+ ps24_1 ps25_1 ps26_1 ps27_1 ps28_1 ps29_1 ps30_1 ps31_1 ];
+
+attach variables [ CRBD CRBR ]
+ [ fp_fx fp_fex fp_vx fp_ox
+ fp_ux fp_zx fp_xx fp_vxsnan
+ fp_vxisi fp_vxidi fp_vxzdz fp_vximz
+ fp_vxvc fp_fr fp_fi fp_c
+ fp_cc0 fp_cc1 fp_cc2 fp_cc3
+ fp_reserve1 fp_vxsoft fp_vxsqrt fp_vxcvi
+ fp_ve fp_oe fp_ue fp_ze
+ fp_xe fp_ni fp_rn0 fp_rn1
+ ];
+
+attach variables SR [
+ sr0 sr1 sr2 sr3 sr4 sr5 sr6 sr7 sr8 sr9 sr10 sr11 sr12 sr13 sr14 sr15 ];
+
+attach variables [I IX]
+ [GQR0 GQR1 GQR2 GQR3 GQR4 GQR5 GQR6 GQR7];
+
+##
+## Attach the spr register to the token SPRVAL made up of the bits sprL/sprH
+## the low bits are shifted up, so the table is inverted and indexed by sprH,sprL
+## This could have been done by computing sprVal = sprH * 32 + sprL but it would
+## have resulted in multiple instructions instead of the original single prototype.
+## Thus this massive inverted table.
+attach variables SPRVAL [
+ spr000 spr020 spr040 spr060 spr080 spr0a0 spr0c0 spr0e0 spr100 spr120 spr140 spr160 spr180 spr1a0 spr1c0 spr1e0 spr200 spr220 spr240 spr260 spr280 spr2a0 spr2c0 spr2e0 spr300 spr320 spr340 spr360 spr380 spr3a0 spr3c0 spr3e0
+ XER spr021 spr041 spr061 spr081 spr0a1 spr0c1 spr0e1 spr101 spr121 spr141 spr161 spr181 spr1a1 spr1c1 spr1e1 spr201 spr221 spr241 spr261 spr281 spr2a1 spr2c1 spr2e1 spr301 spr321 spr341 spr361 spr381 spr3a1 spr3c1 spr3e1
+ spr002 spr022 spr042 spr062 spr082 spr0a2 spr0c2 spr0e2 spr102 spr122 spr142 spr162 spr182 spr1a2 spr1c2 spr1e2 spr202 spr222 spr242 spr262 spr282 spr2a2 spr2c2 spr2e2 spr302 spr322 spr342 spr362 spr382 spr3a2 spr3c2 spr3e2
+ spr003 spr023 spr043 spr063 spr083 spr0a3 spr0c3 spr0e3 spr103 spr123 spr143 spr163 spr183 spr1a3 spr1c3 spr1e3 spr203 spr223 spr243 spr263 spr283 spr2a3 spr2c3 spr2e3 spr303 spr323 spr343 spr363 spr383 spr3a3 spr3c3 spr3e3
+ spr004 spr024 spr044 spr064 spr084 spr0a4 spr0c4 spr0e4 spr104 spr124 spr144 spr164 spr184 spr1a4 spr1c4 spr1e4 spr204 spr224 spr244 spr264 spr284 spr2a4 spr2c4 spr2e4 spr304 spr324 spr344 spr364 spr384 spr3a4 spr3c4 spr3e4
+ spr005 spr025 spr045 spr065 spr085 spr0a5 spr0c5 spr0e5 spr105 spr125 spr145 spr165 spr185 spr1a5 spr1c5 spr1e5 spr205 spr225 spr245 spr265 spr285 spr2a5 spr2c5 spr2e5 spr305 spr325 spr345 spr365 spr385 spr3a5 spr3c5 spr3e5
+ spr006 spr026 spr046 spr066 spr086 spr0a6 spr0c6 spr0e6 spr106 spr126 spr146 spr166 spr186 spr1a6 spr1c6 spr1e6 spr206 spr226 spr246 spr266 spr286 spr2a6 spr2c6 spr2e6 spr306 spr326 spr346 spr366 spr386 spr3a6 spr3c6 spr3e6
+ spr007 spr027 spr047 spr067 spr087 spr0a7 spr0c7 spr0e7 spr107 spr127 spr147 spr167 spr187 spr1a7 spr1c7 spr1e7 spr207 spr227 spr247 spr267 spr287 spr2a7 spr2c7 spr2e7 spr307 spr327 spr347 spr367 spr387 spr3a7 spr3c7 spr3e7
+ LR spr028 spr048 spr068 spr088 spr0a8 spr0c8 spr0e8 spr108 spr128 spr148 spr168 spr188 spr1a8 spr1c8 spr1e8 spr208 spr228 spr248 spr268 spr288 spr2a8 spr2c8 spr2e8 spr308 spr328 spr348 spr368 spr388 spr3a8 spr3c8 spr3e8
+ CTR spr029 spr049 spr069 spr089 spr0a9 spr0c9 spr0e9 spr109 spr129 spr149 spr169 spr189 spr1a9 spr1c9 spr1e9 spr209 spr229 spr249 spr269 spr289 spr2a9 spr2c9 spr2e9 spr309 spr329 spr349 spr369 spr389 spr3a9 spr3c9 spr3e9
+ spr00a spr02a spr04a spr06a spr08a spr0aa spr0ca spr0ea spr10a spr12a spr14a spr16a spr18a spr1aa spr1ca spr1ea spr20a spr22a spr24a spr26a spr28a spr2aa spr2ca spr2ea spr30a spr32a spr34a spr36a spr38a spr3aa spr3ca spr3ea
+ spr00b spr02b spr04b spr06b spr08b spr0ab spr0cb spr0eb spr10b spr12b spr14b spr16b spr18b spr1ab spr1cb spr1eb spr20b spr22b spr24b spr26b spr28b spr2ab spr2cb spr2eb spr30b spr32b spr34b spr36b spr38b spr3ab spr3cb spr3eb
+ spr00c spr02c spr04c spr06c spr08c spr0ac spr0cc spr0ec TBLr spr12c spr14c spr16c spr18c spr1ac spr1cc spr1ec spr20c spr22c spr24c spr26c spr28c spr2ac spr2cc spr2ec spr30c spr32c spr34c spr36c spr38c spr3ac spr3cc spr3ec
+ spr00d spr02d spr04d spr06d spr08d spr0ad spr0cd spr0ed TBUr spr12d spr14d spr16d spr18d spr1ad spr1cd spr1ed spr20d spr22d spr24d spr26d spr28d spr2ad spr2cd spr2ed spr30d spr32d spr34d spr36d spr38d spr3ad spr3cd spr3ed
+ spr00e spr02e spr04e spr06e spr08e spr0ae spr0ce spr0ee spr10e spr12e spr14e spr16e spr18e spr1ae spr1ce spr1ee spr20e spr22e spr24e spr26e spr28e spr2ae spr2ce spr2ee spr30e spr32e spr34e spr36e spr38e spr3ae spr3ce spr3ee
+ spr00f spr02f spr04f spr06f spr08f spr0af spr0cf spr0ef spr10f spr12f spr14f spr16f spr18f spr1af spr1cf spr1ef spr20f spr22f spr24f spr26f spr28f spr2af spr2cf spr2ef spr30f TAR spr34f spr36f spr38f spr3af spr3cf spr3ef
+ spr010 spr030 spr050 spr070 spr090 spr0b0 spr0d0 spr0f0 spr110 spr130 spr150 spr170 spr190 spr1b0 spr1d0 spr1f0 spr210 spr230 spr250 spr270 spr290 spr2b0 spr2d0 spr2f0 spr310 spr330 spr350 spr370 GQR0 spr3b0 spr3d0 spr3f0
+ spr011 spr031 spr051 spr071 spr091 spr0b1 spr0d1 spr0f1 spr111 spr131 spr151 spr171 spr191 spr1b1 spr1d1 spr1f1 spr211 spr231 spr251 spr271 spr291 spr2b1 spr2d1 spr2f1 spr311 spr331 spr351 spr371 GQR1 spr3b1 spr3d1 spr3f1
+ spr012 spr032 spr052 spr072 spr092 spr0b2 spr0d2 spr0f2 spr112 spr132 spr152 spr172 spr192 spr1b2 spr1d2 spr1f2 spr212 spr232 spr252 spr272 spr292 spr2b2 spr2d2 spr2f2 spr312 spr332 spr352 spr372 GQR2 spr3b2 spr3d2 spr3f2
+ spr013 spr033 spr053 spr073 spr093 spr0b3 spr0d3 spr0f3 spr113 spr133 spr153 spr173 spr193 spr1b3 spr1d3 spr1f3 spr213 spr233 spr253 spr273 spr293 spr2b3 spr2d3 spr2f3 spr313 spr333 spr353 spr373 GQR3 spr3b3 spr3d3 spr3f3
+ spr014 spr034 spr054 spr074 spr094 spr0b4 spr0d4 spr0f4 spr114 spr134 spr154 spr174 spr194 spr1b4 spr1d4 spr1f4 spr214 spr234 spr254 spr274 spr294 spr2b4 spr2d4 spr2f4 spr314 spr334 spr354 spr374 GQR4 spr3b4 spr3d4 spr3f4
+ spr015 spr035 spr055 spr075 spr095 spr0b5 spr0d5 spr0f5 spr115 spr135 spr155 spr175 spr195 spr1b5 spr1d5 spr1f5 spr215 spr235 spr255 spr275 spr295 spr2b5 spr2d5 spr2f5 spr315 spr335 spr355 spr375 GQR5 spr3b5 spr3d5 spr3f5
+ spr016 spr036 spr056 spr076 spr096 spr0b6 spr0d6 spr0f6 spr116 spr136 spr156 spr176 spr196 spr1b6 spr1d6 spr1f6 spr216 spr236 spr256 spr276 spr296 spr2b6 spr2d6 spr2f6 spr316 spr336 spr356 spr376 GQR6 spr3b6 spr3d6 spr3f6
+ spr017 spr037 spr057 spr077 spr097 spr0b7 spr0d7 spr0f7 spr117 spr137 spr157 spr177 spr197 spr1b7 spr1d7 spr1f7 spr217 spr237 spr257 spr277 spr297 spr2b7 spr2d7 spr2f7 spr317 spr337 spr357 spr377 GQR7 spr3b7 spr3d7 spr3f7
+ spr018 spr038 spr058 spr078 spr098 spr0b8 spr0d8 spr0f8 spr118 spr138 spr158 spr178 spr198 spr1b8 spr1d8 spr1f8 spr218 spr238 spr258 spr278 spr298 spr2b8 spr2d8 spr2f8 spr318 spr338 spr358 spr378 spr398 spr3b8 spr3d8 spr3f8
+ spr019 spr039 spr059 spr079 spr099 spr0b9 spr0d9 spr0f9 spr119 spr139 spr159 spr179 spr199 spr1b9 spr1d9 spr1f9 spr219 spr239 spr259 spr279 spr299 spr2b9 spr2d9 spr2f9 spr319 spr339 spr359 spr379 spr399 spr3b9 spr3d9 spr3f9
+ SRR0 CSRR0 spr05a spr07a spr09a spr0ba spr0da spr0fa spr11a spr13a spr15a spr17a spr19a spr1ba spr1da spr1fa spr21a spr23a spr25a spr27a spr29a spr2ba spr2da spr2fa spr31a spr33a spr35a spr37a spr39a spr3ba spr3da spr3fa
+ SRR1 CSRR1 spr05b spr07b spr09b spr0bb spr0db spr0fb spr11b spr13b spr15b spr17b spr19b spr1bb spr1db spr1fb spr21b spr23b spr25b spr27b spr29b spr2bb spr2db spr2fb spr31b spr33b spr35b spr37b spr39b spr3bb spr3db spr3fb
+ spr01c spr03c spr05c spr07c spr09c spr0bc spr0dc spr0fc TBLw spr13c spr15c spr17c spr19c spr1bc spr1dc spr1fc spr21c spr23c spr25c spr27c spr29c spr2bc spr2dc spr2fc spr31c spr33c spr35c spr37c spr39c spr3bc spr3dc spr3fc
+ spr01d spr03d spr05d spr07d spr09d spr0bd spr0dd spr0fd TBUw spr13d spr15d spr17d spr19d spr1bd spr1dd spr1fd spr21d spr23d spr25d spr27d spr29d spr2bd spr2dd spr2fd spr31d spr33d spr35d spr37d spr39d spr3bd spr3dd spr3fd
+ spr01e spr03e spr05e spr07e spr09e spr0be spr0de spr0fe spr11e spr13e spr15e spr17e spr19e spr1be spr1de spr1fe spr21e spr23e spr25e spr27e spr29e spr2be spr2de spr2fe spr31e spr33e spr35e spr37e spr39e spr3be spr3de spr3fe
+ spr01f spr03f spr05f spr07f spr09f spr0bf spr0df spr0ff spr11f spr13f spr15f spr17f spr19f spr1bf spr1df spr1ff spr21f spr23f spr25f spr27f spr29f spr2bf spr2df spr2ff spr31f spr33f spr35f spr37f spr39f spr3bf spr3df spr3ff
+];
+
+##
+## Attach the dcr register to the token DCRN made up of the bits dcrnL/dcrnH
+## the low bits are shifted up, so the table is inverted and indexed by dcrnH,dcrnL
+## This could have been done by computing DCRN = dcrnH * 32 + dcrnL but it would
+## have resulted in multiple instructions instead of the original single prototype.
+## Thus this massive inverted table.
+attach variables DCRN [
+ dcr000 dcr020 dcr040 dcr060 dcr080 dcr0a0 dcr0c0 dcr0e0 dcr100 dcr120 dcr140 dcr160 dcr180 dcr1a0 dcr1c0 dcr1e0 dcr200 dcr220 dcr240 dcr260 dcr280 dcr2a0 dcr2c0 dcr2e0 dcr300 dcr320 dcr340 dcr360 dcr380 dcr3a0 dcr3c0 dcr3e0
+ dcr001 dcr021 dcr041 dcr061 dcr081 dcr0a1 dcr0c1 dcr0e1 dcr101 dcr121 dcr141 dcr161 dcr181 dcr1a1 dcr1c1 dcr1e1 dcr201 dcr221 dcr241 dcr261 dcr281 dcr2a1 dcr2c1 dcr2e1 dcr301 dcr321 dcr341 dcr361 dcr381 dcr3a1 dcr3c1 dcr3e1
+ dcr002 dcr022 dcr042 dcr062 dcr082 dcr0a2 dcr0c2 dcr0e2 dcr102 dcr122 dcr142 dcr162 dcr182 dcr1a2 dcr1c2 dcr1e2 dcr202 dcr222 dcr242 dcr262 dcr282 dcr2a2 dcr2c2 dcr2e2 dcr302 dcr322 dcr342 dcr362 dcr382 dcr3a2 dcr3c2 dcr3e2
+ dcr003 dcr023 dcr043 dcr063 dcr083 dcr0a3 dcr0c3 dcr0e3 dcr103 dcr123 dcr143 dcr163 dcr183 dcr1a3 dcr1c3 dcr1e3 dcr203 dcr223 dcr243 dcr263 dcr283 dcr2a3 dcr2c3 dcr2e3 dcr303 dcr323 dcr343 dcr363 dcr383 dcr3a3 dcr3c3 dcr3e3
+ dcr004 dcr024 dcr044 dcr064 dcr084 dcr0a4 dcr0c4 dcr0e4 dcr104 dcr124 dcr144 dcr164 dcr184 dcr1a4 dcr1c4 dcr1e4 dcr204 dcr224 dcr244 dcr264 dcr284 dcr2a4 dcr2c4 dcr2e4 dcr304 dcr324 dcr344 dcr364 dcr384 dcr3a4 dcr3c4 dcr3e4
+ dcr005 dcr025 dcr045 dcr065 dcr085 dcr0a5 dcr0c5 dcr0e5 dcr105 dcr125 dcr145 dcr165 dcr185 dcr1a5 dcr1c5 dcr1e5 dcr205 dcr225 dcr245 dcr265 dcr285 dcr2a5 dcr2c5 dcr2e5 dcr305 dcr325 dcr345 dcr365 dcr385 dcr3a5 dcr3c5 dcr3e5
+ dcr006 dcr026 dcr046 dcr066 dcr086 dcr0a6 dcr0c6 dcr0e6 dcr106 dcr126 dcr146 dcr166 dcr186 dcr1a6 dcr1c6 dcr1e6 dcr206 dcr226 dcr246 dcr266 dcr286 dcr2a6 dcr2c6 dcr2e6 dcr306 dcr326 dcr346 dcr366 dcr386 dcr3a6 dcr3c6 dcr3e6
+ dcr007 dcr027 dcr047 dcr067 dcr087 dcr0a7 dcr0c7 dcr0e7 dcr107 dcr127 dcr147 dcr167 dcr187 dcr1a7 dcr1c7 dcr1e7 dcr207 dcr227 dcr247 dcr267 dcr287 dcr2a7 dcr2c7 dcr2e7 dcr307 dcr327 dcr347 dcr367 dcr387 dcr3a7 dcr3c7 dcr3e7
+ dcr008 dcr028 dcr048 dcr068 dcr088 dcr0a8 dcr0c8 dcr0e8 dcr108 dcr128 dcr148 dcr168 dcr188 dcr1a8 dcr1c8 dcr1e8 dcr208 dcr228 dcr248 dcr268 dcr288 dcr2a8 dcr2c8 dcr2e8 dcr308 dcr328 dcr348 dcr368 dcr388 dcr3a8 dcr3c8 dcr3e8
+ dcr009 dcr029 dcr049 dcr069 dcr089 dcr0a9 dcr0c9 dcr0e9 dcr109 dcr129 dcr149 dcr169 dcr189 dcr1a9 dcr1c9 dcr1e9 dcr209 dcr229 dcr249 dcr269 dcr289 dcr2a9 dcr2c9 dcr2e9 dcr309 dcr329 dcr349 dcr369 dcr389 dcr3a9 dcr3c9 dcr3e9
+ dcr00a dcr02a dcr04a dcr06a dcr08a dcr0aa dcr0ca dcr0ea dcr10a dcr12a dcr14a dcr16a dcr18a dcr1aa dcr1ca dcr1ea dcr20a dcr22a dcr24a dcr26a dcr28a dcr2aa dcr2ca dcr2ea dcr30a dcr32a dcr34a dcr36a dcr38a dcr3aa dcr3ca dcr3ea
+ dcr00b dcr02b dcr04b dcr06b dcr08b dcr0ab dcr0cb dcr0eb dcr10b dcr12b dcr14b dcr16b dcr18b dcr1ab dcr1cb dcr1eb dcr20b dcr22b dcr24b dcr26b dcr28b dcr2ab dcr2cb dcr2eb dcr30b dcr32b dcr34b dcr36b dcr38b dcr3ab dcr3cb dcr3eb
+ dcr00c dcr02c dcr04c dcr06c dcr08c dcr0ac dcr0cc dcr0ec dcr10c dcr12c dcr14c dcr16c dcr18c dcr1ac dcr1cc dcr1ec dcr20c dcr22c dcr24c dcr26c dcr28c dcr2ac dcr2cc dcr2ec dcr30c dcr32c dcr34c dcr36c dcr38c dcr3ac dcr3cc dcr3ec
+ dcr00d dcr02d dcr04d dcr06d dcr08d dcr0ad dcr0cd dcr0ed dcr10d dcr12d dcr14d dcr16d dcr18d dcr1ad dcr1cd dcr1ed dcr20d dcr22d dcr24d dcr26d dcr28d dcr2ad dcr2cd dcr2ed dcr30d dcr32d dcr34d dcr36d dcr38d dcr3ad dcr3cd dcr3ed
+ dcr00e dcr02e dcr04e dcr06e dcr08e dcr0ae dcr0ce dcr0ee dcr10e dcr12e dcr14e dcr16e dcr18e dcr1ae dcr1ce dcr1ee dcr20e dcr22e dcr24e dcr26e dcr28e dcr2ae dcr2ce dcr2ee dcr30e dcr32e dcr34e dcr36e dcr38e dcr3ae dcr3ce dcr3ee
+ dcr00f dcr02f dcr04f dcr06f dcr08f dcr0af dcr0cf dcr0ef dcr10f dcr12f dcr14f dcr16f dcr18f dcr1af dcr1cf dcr1ef dcr20f dcr22f dcr24f dcr26f dcr28f dcr2af dcr2cf dcr2ef dcr30f dcr32f dcr34f dcr36f dcr38f dcr3af dcr3cf dcr3ef
+ dcr010 dcr030 dcr050 dcr070 dcr090 dcr0b0 dcr0d0 dcr0f0 dcr110 dcr130 dcr150 dcr170 dcr190 dcr1b0 dcr1d0 dcr1f0 dcr210 dcr230 dcr250 dcr270 dcr290 dcr2b0 dcr2d0 dcr2f0 dcr310 dcr330 dcr350 dcr370 dcr390 dcr3b0 dcr3d0 dcr3f0
+ dcr011 dcr031 dcr051 dcr071 dcr091 dcr0b1 dcr0d1 dcr0f1 dcr111 dcr131 dcr151 dcr171 dcr191 dcr1b1 dcr1d1 dcr1f1 dcr211 dcr231 dcr251 dcr271 dcr291 dcr2b1 dcr2d1 dcr2f1 dcr311 dcr331 dcr351 dcr371 dcr391 dcr3b1 dcr3d1 dcr3f1
+ dcr012 dcr032 dcr052 dcr072 dcr092 dcr0b2 dcr0d2 dcr0f2 dcr112 dcr132 dcr152 dcr172 dcr192 dcr1b2 dcr1d2 dcr1f2 dcr212 dcr232 dcr252 dcr272 dcr292 dcr2b2 dcr2d2 dcr2f2 dcr312 dcr332 dcr352 dcr372 dcr392 dcr3b2 dcr3d2 dcr3f2
+ dcr013 dcr033 dcr053 dcr073 dcr093 dcr0b3 dcr0d3 dcr0f3 dcr113 dcr133 dcr153 dcr173 dcr193 dcr1b3 dcr1d3 dcr1f3 dcr213 dcr233 dcr253 dcr273 dcr293 dcr2b3 dcr2d3 dcr2f3 dcr313 dcr333 dcr353 dcr373 dcr393 dcr3b3 dcr3d3 dcr3f3
+ dcr014 dcr034 dcr054 dcr074 dcr094 dcr0b4 dcr0d4 dcr0f4 dcr114 dcr134 dcr154 dcr174 dcr194 dcr1b4 dcr1d4 dcr1f4 dcr214 dcr234 dcr254 dcr274 dcr294 dcr2b4 dcr2d4 dcr2f4 dcr314 dcr334 dcr354 dcr374 dcr394 dcr3b4 dcr3d4 dcr3f4
+ dcr015 dcr035 dcr055 dcr075 dcr095 dcr0b5 dcr0d5 dcr0f5 dcr115 dcr135 dcr155 dcr175 dcr195 dcr1b5 dcr1d5 dcr1f5 dcr215 dcr235 dcr255 dcr275 dcr295 dcr2b5 dcr2d5 dcr2f5 dcr315 dcr335 dcr355 dcr375 dcr395 dcr3b5 dcr3d5 dcr3f5
+ dcr016 dcr036 dcr056 dcr076 dcr096 dcr0b6 dcr0d6 dcr0f6 dcr116 dcr136 dcr156 dcr176 dcr196 dcr1b6 dcr1d6 dcr1f6 dcr216 dcr236 dcr256 dcr276 dcr296 dcr2b6 dcr2d6 dcr2f6 dcr316 dcr336 dcr356 dcr376 dcr396 dcr3b6 dcr3d6 dcr3f6
+ dcr017 dcr037 dcr057 dcr077 dcr097 dcr0b7 dcr0d7 dcr0f7 dcr117 dcr137 dcr157 dcr177 dcr197 dcr1b7 dcr1d7 dcr1f7 dcr217 dcr237 dcr257 dcr277 dcr297 dcr2b7 dcr2d7 dcr2f7 dcr317 dcr337 dcr357 dcr377 dcr397 dcr3b7 dcr3d7 dcr3f7
+ dcr018 dcr038 dcr058 dcr078 dcr098 dcr0b8 dcr0d8 dcr0f8 dcr118 dcr138 dcr158 dcr178 dcr198 dcr1b8 dcr1d8 dcr1f8 dcr218 dcr238 dcr258 dcr278 dcr298 dcr2b8 dcr2d8 dcr2f8 dcr318 dcr338 dcr358 dcr378 dcr398 dcr3b8 dcr3d8 dcr3f8
+ dcr019 dcr039 dcr059 dcr079 dcr099 dcr0b9 dcr0d9 dcr0f9 dcr119 dcr139 dcr159 dcr179 dcr199 dcr1b9 dcr1d9 dcr1f9 dcr219 dcr239 dcr259 dcr279 dcr299 dcr2b9 dcr2d9 dcr2f9 dcr319 dcr339 dcr359 dcr379 dcr399 dcr3b9 dcr3d9 dcr3f9
+ dcr01a dcr03a dcr05a dcr07a dcr09a dcr0ba dcr0da dcr0fa dcr11a dcr13a dcr15a dcr17a dcr19a dcr1ba dcr1da dcr1fa dcr21a dcr23a dcr25a dcr27a dcr29a dcr2ba dcr2da dcr2fa dcr31a dcr33a dcr35a dcr37a dcr39a dcr3ba dcr3da dcr3fa
+ dcr01b dcr03b dcr05b dcr07b dcr09b dcr0bb dcr0db dcr0fb dcr11b dcr13b dcr15b dcr17b dcr19b dcr1bb dcr1db dcr1fb dcr21b dcr23b dcr25b dcr27b dcr29b dcr2bb dcr2db dcr2fb dcr31b dcr33b dcr35b dcr37b dcr39b dcr3bb dcr3db dcr3fb
+ dcr01c dcr03c dcr05c dcr07c dcr09c dcr0bc dcr0dc dcr0fc dcr11c dcr13c dcr15c dcr17c dcr19c dcr1bc dcr1dc dcr1fc dcr21c dcr23c dcr25c dcr27c dcr29c dcr2bc dcr2dc dcr2fc dcr31c dcr33c dcr35c dcr37c dcr39c dcr3bc dcr3dc dcr3fc
+ dcr01d dcr03d dcr05d dcr07d dcr09d dcr0bd dcr0dd dcr0fd dcr11d dcr13d dcr15d dcr17d dcr19d dcr1bd dcr1dd dcr1fd dcr21d dcr23d dcr25d dcr27d dcr29d dcr2bd dcr2dd dcr2fd dcr31d dcr33d dcr35d dcr37d dcr39d dcr3bd dcr3dd dcr3fd
+ dcr01e dcr03e dcr05e dcr07e dcr09e dcr0be dcr0de dcr0fe dcr11e dcr13e dcr15e dcr17e dcr19e dcr1be dcr1de dcr1fe dcr21e dcr23e dcr25e dcr27e dcr29e dcr2be dcr2de dcr2fe dcr31e dcr33e dcr35e dcr37e dcr39e dcr3be dcr3de dcr3fe
+ dcr01f dcr03f dcr05f dcr07f dcr09f dcr0bf dcr0df dcr0ff dcr11f dcr13f dcr15f dcr17f dcr19f dcr1bf dcr1df dcr1ff dcr21f dcr23f dcr25f dcr27f dcr29f dcr2bf dcr2df dcr2ff dcr31f dcr33f dcr35f dcr37f dcr39f dcr3bf dcr3df dcr3ff
+];
+
+################################################################
+# Pseudo Instructions
+################################################################
+
+define pcodeop clearHistory;
+define pcodeop countLeadingZeros;
+define pcodeop countTrailingZeros;
+define pcodeop dataCacheBlockAllocate;
+define pcodeop dataCacheBlockFlush;
+define pcodeop dataCacheBlockInvalidate;
+define pcodeop dataCacheBlockStore;
+define pcodeop dataCacheBlockTouch;
+define pcodeop dataCacheBlockTouchForStore;
+define pcodeop dataCacheBlockClearToZero;
+define pcodeop dataCacheBlockSetToZeroLocked;
+define pcodeop dataCacheCongruenceClassInvalidate;
+define pcodeop dataCacheRead;
+define pcodeop externalControlIn;
+define pcodeop externalControlOut;
+define pcodeop enforceInOrderExecutionIO;
+define pcodeop instructionCacheBlockInvalidate;
+define pcodeop instructionCacheBlockTouch;
+define pcodeop instructionCacheCongruenceClassInvalidate;
+define pcodeop instructionCacheRead;
+define pcodeop instructionSynchronize;
+
+
+define pcodeop floatAddOverflow;
+define pcodeop floatDivOverflow;
+define pcodeop floatAddRoundedUp;
+define pcodeop floatDivRoundedUp;
+define pcodeop floatAddInexact;
+define pcodeop floatDivInexact;
+define pcodeop floatAddUnderflow;
+define pcodeop floatDivUnderflow;
+define pcodeop floatInfinityAdd;
+define pcodeop intToFloatRoundedUp;
+define pcodeop intToFloatInexact;
+define pcodeop invalidFloatToInt;
+define pcodeop floatToIntRoundedUp;
+define pcodeop floatToIntInexact;
+define pcodeop floatInfinityDivide;
+define pcodeop floatMaddInexact;
+define pcodeop floatMaddRoundedUp;
+define pcodeop floatMaddOverflow;
+define pcodeop floatMaddUnderflow;
+define pcodeop floatInfinityMulZero;
+
+define pcodeop floatMsubInexact;
+define pcodeop floatMsubRoundedUp;
+define pcodeop floatMsubOverflow;
+define pcodeop floatMsubUnderflow;
+define pcodeop floatInfinitySub;
+
+define pcodeop floatSubRoundedUp;
+define pcodeop floatSubInexact;
+define pcodeop floatSubOverflow;
+define pcodeop floatSubUnderflow;
+
+define pcodeop floatMulRoundedUp;
+define pcodeop floatMulOverflow;
+define pcodeop floatMulUnderflow;
+define pcodeop floatMulInexact;
+define pcodeop sqrtInvalid;
+define pcodeop floatSqrtRoundedUp;
+define pcodeop floatSqrtInexact;
+
+define pcodeop eventInterrupt;
+define pcodeop illegal;
+define pcodeop message;
+define pcodeop movebuffer;
+define pcodeop stopT;
+define pcodeop waitT;
+
+define pcodeop mematom;
+
+define pcodeop random;
+define pcodeop returnFromInterrupt;
+define pcodeop returnFromCriticalInterrupt;
+define pcodeop syscall;
+define pcodeop slbInvalidateAll;
+define pcodeop slbInvalidateEntry;
+define pcodeop slbMoveFromEntryESID;
+define pcodeop slbMoveFromEntryVSID;
+define pcodeop slbMoveToEntry;
+define pcodeop storeDoubleWordConditionalIndexed;
+define pcodeop storeWordConditionalIndexed;
+define pcodeop trapWord;
+define pcodeop trapDoubleWordImmediate;
+define pcodeop trapDoubleWord;
+define pcodeop sync;
+define pcodeop loadString;
+define pcodeop storeString;
+
+define pcodeop xer_mac_update;
+
+define pcodeop mullhw;
+define pcodeop mullhwu;
+
+define pcodeop copytrans;
+define pcodeop pastetrans;
+define pcodeop transaction;
+define pcodeop TLBRead;
+define pcodeop TLBSearchIndexed;
+define pcodeop TLBWrite;
+define pcodeop WriteExternalEnable;
+define pcodeop WriteExternalEnableImmediate;
+
+################################################################
+# Macros
+################################################################
+
+macro shiftCarry(value, sa)
+{
+ local mask = value; # force mask to have same size as value (may vary)
+ mask = (1 << sa) - 1;
+ xer_ca = (value s< 0) && ((value & mask)!=0);
+}
+macro getCrBit(crReg, bitIndex, result)
+{
+ tmp:1 = crReg >> (3-bitIndex);
+ result = tmp & 1;
+}
+macro setCrBit(crReg, bitIndex, bit)
+{
+ shift:1 = 3-bitIndex;
+ mask:1 = ~(1< 0));
+ setCrBit(cr0, 2, (result == 0));
+ setCrBit(cr0, 3, (xer_so & 1));
+}
+
+macro addOverflow(a,b) {
+ xer_ov = scarry(a,b);
+ xer_so = xer_so || xer_ov;
+}
+macro addOverflowAgain(a,b) {
+ xer_ov = scarry(a,b) || xer_ov;
+ xer_so = xer_so || xer_ov;
+}
+
+macro subOverflow(a,b) {
+ xer_ov = sborrow(a,b);
+ xer_so = xer_so || xer_ov;
+}
+
+# check b=0 or (a=0x80000000 and b=-1)
+macro divOverflow(a,b) {
+ xer_ov = (b==0) || ((b==-1) && (a==0x80000000));
+ xer_so = xer_so || xer_ov;
+}
+macro divZero(a,b) {
+ xer_ov = (b==0);
+ xer_so = xer_so || xer_ov;
+}
+
+macro mulOverflow64(result) {
+ local tmp:4 = result(4);
+ xer_ov = tmp != 0 && tmp != 0xffff;
+ xer_so = xer_so || xer_ov;
+}
+
+macro mulOverflow128(result) {
+ local tmp:8 = result(8);
+ xer_ov = tmp != 0 && tmp != 0xffff;
+ xer_so = xer_so || xer_ov;
+}
+
+macro cr1flags() {
+ setCrBit(cr1, 0, fp_fx);
+ setCrBit(cr1, 1, fp_fex);
+ setCrBit(cr1, 2, fp_vx);
+ setCrBit(cr3, 2, fp_ox);
+}
+macro setFPRF(result) {
+ fp_cc0 = result f< 0;
+ fp_cc1 = result f> 0;
+ fp_cc2 = result f== 0;
+ fp_cc3 = nan(result);
+}
+
+macro setSummaryFPSCR() {
+ fp_vx = fp_vxsnan | fp_vxisi | fp_vxidi | fp_vxzdz | fp_vximz | fp_vxvc | fp_vxsoft | fp_vxsqrt | fp_vxcvi;
+ fp_fx = fp_fx | fp_ox | fp_ux | fp_zx | fp_xx;
+ fp_fex = (fp_vx & fp_ve) ^ (fp_ox & fp_oe) ^ (fp_ux & fp_ue) ^ (fp_zx & fp_ze) ^ (fp_xx & fp_xe);
+}
+
+macro setFPAddFlags(op1, op2, result) {
+ setFPRF(result);
+# fp_fr = floatAddRoundedUp(op1, op2);
+# fp_fi = floatAddInexact(op1, op2);
+# fp_ox = fp_ox | floatAddOverflow(op1, op2);
+# fp_ux = fp_ux | floatAddUnderflow(op1, op2);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(op1, op2);
+ setSummaryFPSCR();
+}
+macro setFPDivFlags(op1, op2, result) {
+ setFPRF(result);
+# fp_fr = floatDivRoundedUp(op1, op2);
+# fp_fi = floatDivInexact(op1, op2);
+# fp_ox = fp_ox | floatDivOverflow(op1, op2);
+# fp_ux = fp_ux | floatDivUnderflow(op1, op2);
+ fp_zx = fp_zx | (op2 f== 0);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2);
+# fp_vxidi = fp_vxidi | floatInfinityDivide(op1, op2);
+ fp_vxzdz = fp_vxzdz | ((op1 f== 0) && (op2 f== 0));
+ setSummaryFPSCR();
+}
+macro setFPMulFlags(op1, op2, result) {
+ setFPRF(result);
+# fp_fr = floatMulRoundedUp(op1, op2);
+# fp_fi = floatMulInexact(op1, op2);
+# fp_ox = fp_ox | floatMulOverflow(op1, op2);
+# fp_ux = fp_ux | floatMulUnderflow(op1, op2);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2);
+# fp_vximz = fp_vximz | floatInfinityMulZero(op1, op2);
+ setSummaryFPSCR();
+}
+macro setFPSubFlags(op1, op2, result) {
+ setFPRF(result);
+# fp_fr = floatSubRoundedUp(op1, op2);
+# fp_fi = floatSubInexact(op1, op2);
+# fp_ox = fp_ox | floatSubOverflow(op1, op2);
+# fp_ux = fp_ux | floatSubUnderflow(op1, op2);
+# fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2);
+# fp_vxisi = fp_vxisi | floatInfinitySub(op1, op2);
+ setSummaryFPSCR();
+}
+
+macro loadRegister(reg, ea) {
+ reg = *:4(ea);
+ ea = ea+4;
+}
+
+macro loadReg(reg) {
+ reg = *:4(tea);
+ tea = tea+4;
+}
+
+macro loadRegisterPartial(reg, ea, sa) {
+ mask:$(REGISTER_SIZE) = 0xffffffff;
+ sa = ((4-sa) & 3) * 8;
+ mask = mask << sa;
+ reg = *:4(ea);
+ reg = reg & mask;
+ ea = ea + 4;
+}
+
+macro storeRegister(reg, ea) {
+ *:4(ea) = reg;
+ ea = ea+4;
+}
+
+macro storeReg(reg) {
+ *:4(tea) = reg;
+ tea = tea+4;
+}
+
+macro storeRegisterPartial(reg, ea, sa) {
+ *:4(ea) = reg;
+ ea = ea + 4;
+}
+
+
+macro packbits( D,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,
+ a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30,a31) {
+ D = zext(a31) & 1;
+ D=D|(zext(a0)&1)<<31; D=D|(zext(a1)&1)<<30; D=D|(zext(a2)&1)<<29; D=D|(zext(a3)&1)<<28;
+ D=D|(zext(a4)&1)<<27; D=D|(zext(a5)&1)<<26; D=D|(zext(a6)&1)<<25; D=D|(zext(a7)&1)<<24;
+ D=D|(zext(a8)&1)<<23; D=D|(zext(a9)&1)<<22; D=D|(zext(a10)&1)<<21; D=D|(zext(a11)&1)<<20;
+ D=D|(zext(a12)&1)<<19; D=D|(zext(a13)&1)<<18; D=D|(zext(a14)&1)<<17; D=D|(zext(a15)&1)<<16;
+ D=D|(zext(a16)&1)<<15; D=D|(zext(a17)&1)<<14; D=D|(zext(a18)&1)<<13; D=D|(zext(a19)&1)<<12;
+ D=D|(zext(a20)&1)<<11; D=D|(zext(a21)&1)<<10; D=D|(zext(a22)&1)<<9; D=D|(zext(a23)&1)<<8;
+ D=D|(zext(a24)&1)<<7; D=D|(zext(a25)&1)<<6; D=D|(zext(a26)&1)<<5; D=D|(zext(a27)&1)<<4;
+ D=D|(zext(a28)&1)<<3; D=D|(zext(a29)&1)<<2; D=D|(zext(a30)&1)<<1;
+ }
+
+macro unpackbits(D,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,
+ a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30,a31) {
+ a0=(D&0x80000000)!=0; a1=(D&0x40000000)!=0; a2=(D&0x20000000)!=0; a3=(D&0x10000000)!=0;
+ a4=(D&0x8000000)!=0; a5=(D&0x4000000)!=0; a6=(D&0x2000000)!=0; a7=(D&0x1000000)!=0;
+ a8=(D&0x800000)!=0; a9=(D&0x400000)!=0; a10=(D&0x200000)!=0; a11=(D&0x100000)!=0;
+ a12=(D&0x80000)!=0; a13=(D&0x40000)!=0; a14=(D&0x20000)!=0; a15=(D&0x10000)!=0;
+ a16=(D&0x8000)!=0; a17=(D&0x4000)!=0; a18=(D&0x2000)!=0; a19=(D&0x1000)!=0;
+ a20=(D&0x800)!=0; a21=(D&0x400)!=0; a22=(D&0x200)!=0; a23=(D&0x100)!=0;
+ a24=(D&0x80)!=0; a25=(D&0x40)!=0; a26=(D&0x20)!=0; a27=(D&0x10)!=0;
+ a28=(D&0x8)!=0; a29=(D&0x4)!=0; a30=(D&0x2)!=0; a31=(D&0x1)!=0; }
+
+macro packFPSCR(tmp) {
+ packbits(tmp, fp_fx, fp_fex, fp_vx, fp_ox, fp_ux, fp_zx, fp_xx, fp_vxsnan,
+ fp_vxisi, fp_vxidi, fp_vxzdz, fp_vximz, fp_vxvc, fp_fr, fp_fi, fp_c,
+ fp_cc0, fp_cc1, fp_cc2, fp_cc3, fp_reserve1, fp_vxsoft, fp_vxsqrt,
+ fp_vxcvi, fp_ve, fp_oe, fp_ue, fp_ze, fp_xe, fp_ni, fp_rn0, fp_rn1);
+}
+macro unpackFPSCR(tmp) {
+ unpackbits(tmp, fp_fx, fp_fex, fp_vx, fp_ox,
+ fp_ux, fp_zx, fp_xx, fp_vxsnan,
+ fp_vxisi, fp_vxidi, fp_vxzdz, fp_vximz,
+ fp_vxvc, fp_fr, fp_fi, fp_c,
+ fp_cc0, fp_cc1, fp_cc2, fp_cc3,
+ fp_reserve1, fp_vxsoft, fp_vxsqrt, fp_vxcvi,
+ fp_ve, fp_oe, fp_ue, fp_ze,
+ fp_xe, fp_ni, fp_rn0, fp_rn1);
+}
+
+################################################################
+# Sub-Constructors
+################################################################
+REL_ABS: "a" is AA = 1 {}
+REL_ABS: is AA = 0 {}
+
+addressLI: reloc is LI & AA=0 [ reloc = inst_start + LI*4;] { export *[ram]:4 reloc; }
+addressLI: reloc is LI & AA=1 [ reloc = LI*4; ] { export *[ram]:4 reloc; }
+addressBD: reloc is BD & AA=0 [ reloc = inst_start + BD*4; ] { export *[ram]:4 reloc; }
+addressBD: reloc is BD & AA=1 [ reloc = BD*4; ] { export *[ram]:4 reloc; }
+
+OFF16SH: val is D0 & D1 & D2 [ val = (D0 << 6) | (D1 << 1) | D2; ] { export *[const]:4 val;}
+
+# X 00-------------------------------06 07-07 08-----------10 11-----------13 14------15 16----------------------------------------------------------------------------31
+# X -----------------?-----------------|BO_1=1|-------?-------|-----BI_CR-----|--BI_CC---|---------------------------------------?----------------------------------------|
+CC: "lt" is BI_CC=0 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; }
+CC: "le" is BI_CC=1 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; }
+CC: "eq" is BI_CC=2 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; }
+CC: "ge" is BI_CC=0 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; }
+CC: "gt" is BI_CC=1 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; }
+CC: "ne" is BI_CC=2 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; }
+CC: "so" is BI_CC=3 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; }
+CC: "ns" is BI_CC=3 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; }
+
+TOm: "lt" is TO=16 { }
+TOm: "le" is TO=20 { }
+TOm: "eq" is TO=4 { }
+TOm: "ge" is TO=12 { }
+TOm: "gt" is TO=8 { }
+TOm: "ne" is TO=24 { }
+TOm: "llt" is TO=2 { }
+TOm: "lle" is TO=6 { }
+TOm: "lge" is TO=5 { }
+TOm: "lgt" is TO=1 { }
+TOm: "" is TO { }
+
+CTR_DEC: "z" is BO_3=1 {CTR = CTR-1; tmp:1 = (CTR == 0); export tmp; }
+CTR_DEC: "nz" is BO_3=0 {CTR = CTR-1; tmp:1 = (CTR != 0); export tmp; }
+
+CC_TF: "t" is BO_1=1 {}
+CC_TF: "f" is BO_1=0 {}
+
+# X 00---------------------------------------------------10 11-----------13 14------15 16----------------------------------------------------------------------------31
+# X ---------------------------?---------------------------|----BI_CR=0----|--BI_CC---|---------------------------------------?----------------------------------------|
+CC_OP: "lt" is BI_CC=0 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; }
+CC_OP: "eq" is BI_CC=2 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; }
+CC_OP: "gt" is BI_CC=1 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; }
+CC_OP: "so" is BI_CC=3 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; }
+CC_OP: "4*"^BI_CR^"+lt" is BI_CC=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; }
+CC_OP: "4*"^BI_CR^"+eq" is BI_CC=2 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; }
+CC_OP: "4*"^BI_CR^"+gt" is BI_CC=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; }
+CC_OP: "4*"^BI_CR^"+so" is BI_CC=3 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; }
+
+# X 00----------------------------------------------------------------------------15 16-----------18 19------20 21---------------------------------------------------31
+# X ---------------------------------------?----------------------------------------|----CR_B=0-----|-CR_B_CC--|---------------------------?---------------------------|
+CC_B_OP: "lt" is CR_B_CC=0 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; }
+CC_B_OP: "eq" is CR_B_CC=2 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; }
+CC_B_OP: "gt" is CR_B_CC=1 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; }
+CC_B_OP: "so" is CR_B_CC=3 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; }
+CC_B_OP: "4*"^CR_B^"+lt" is CR_B_CC=0 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; }
+CC_B_OP: "4*"^CR_B^"+eq" is CR_B_CC=2 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; }
+CC_B_OP: "4*"^CR_B^"+gt" is CR_B_CC=1 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; }
+CC_B_OP: "4*"^CR_B^"+so" is CR_B_CC=3 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; }
+
+# X 00-----------------------------------------------------------------------------------------------------20 21-----------23 24------25 26--------------------------31
+# X ----------------------------------------------------?----------------------------------------------------|----CR_X=0-----|-CR_X_CC--|--------------?---------------|
+CC_X_OP: cr0 is CR_X_CC=0 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; }
+CC_X_OP: cr0 is CR_X_CC=1 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; }
+CC_X_OP: cr0 is CR_X_CC=2 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; }
+CC_X_OP: cr0 is CR_X_CC=3 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; }
+CC_X_OP: CR_X is CR_X_CC=0 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; }
+CC_X_OP: CR_X is CR_X_CC=1 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; }
+CC_X_OP: CR_X is CR_X_CC=2 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; }
+CC_X_OP: CR_X is CR_X_CC=3 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; }
+
+CC_X_OPm: "lt" is CR_X_CC=0 & CR_X=0 & CR_X_CC { }
+CC_X_OPm: "gt" is CR_X_CC=1 & CR_X=0 & CR_X_CC { }
+CC_X_OPm: "eq" is CR_X_CC=2 & CR_X=0 & CR_X_CC { }
+CC_X_OPm: "so" is CR_X_CC=3 & CR_X=0 & CR_X_CC { }
+CC_X_OPm: "lt" is CR_X_CC=0 & CR_X & CR_X_CC { }
+CC_X_OPm: "gt" is CR_X_CC=1 & CR_X & CR_X_CC { }
+CC_X_OPm: "eq" is CR_X_CC=2 & CR_X & CR_X_CC { }
+CC_X_OPm: "so" is CR_X_CC=3 & CR_X & CR_X_CC { }
+
+# X 00--------------------------05 06-----------08 09------10 11-----------------------------------------------------------------------------------------------------31
+# X --------------?---------------|----CR_D=0-----|-CR_D_CC--|----------------------------------------------------?----------------------------------------------------|
+CC_D_OP: "lt" is CR_D_CC=0 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; }
+CC_D_OP: "eq" is CR_D_CC=2 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; }
+CC_D_OP: "gt" is CR_D_CC=1 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; }
+CC_D_OP: "so" is CR_D_CC=3 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; }
+CC_D_OP: "4*"^CR_D^"+lt" is CR_D_CC=0 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; }
+CC_D_OP: "4*"^CR_D^"+eq" is CR_D_CC=2 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; }
+CC_D_OP: "4*"^CR_D^"+gt" is CR_D_CC=1 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; }
+CC_D_OP: "4*"^CR_D^"+so" is CR_D_CC=3 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; }
+
+RA_OR_ZERO: A is A { export A; }
+RA_OR_ZERO: 0 is A=0 { export 0:$(REGISTER_SIZE); }
+
+RB_OR_ZERO: B is B { export B; }
+RB_OR_ZERO: 0 is B=0 { export 0:$(REGISTER_SIZE); }
+
+RS_OR_ZERO: S is S { export S; }
+RS_OR_ZERO: 0 is S=0 { export 0:$(REGISTER_SIZE); }
+
+rotmask: mask is MBL & ME [ mask = ((((ME-MBL)>>8) $and 1)*0xffffffff) $xor (0x7fffffff>>ME) $xor (0xffffffff>>MBL); ] { export *[const]:4 mask; }
+
+DSIZE: "w" is L {} # L is a don't care bit in 32-bit languages although it should always be 0
+
+REG_A: is A { export A; }
+REG_B: is B { export B; }
+
+UREG_A: is A { export A; }
+UREG_B: is B { export B; }
+
+dPlusRaOrZeroAddress: SIMM(RA_OR_ZERO) is SIMM & RA_OR_ZERO { tmp:$(REGISTER_SIZE) = RA_OR_ZERO+SIMM; export tmp; }
+dPlusRaOrZeroAddressPS: SIMM_PS(RA_OR_ZERO) is SIMM_PS & RA_OR_ZERO { tmp:4 = RA_OR_ZERO+SIMM_PS; export tmp; }
+dPlusRaAddress: SIMM(A) is SIMM & A {tmp:$(REGISTER_SIZE) = A+SIMM; export tmp; }
+dPlusRaAddressPS: SIMM_PS(A) is SIMM_PS & A {tmp:4 = A+SIMM_PS; export tmp; }
+
+dUI16PlusRAOrZeroAddress: val^"("^RA_OR_ZERO^")" is RA_OR_ZERO & UI_16_s8 [ val = UI_16_s8 << 3; ] { ea:$(REGISTER_SIZE) = RA_OR_ZERO + val; export ea; }
+
+FPSCR_CRFS: is CRFS=0 {tmp:1 = fp_fx<<3 | fp_fex<<2 | fp_vx<<1 | fp_ox; fp_fx=0; fp_ox=0; export tmp;}
+FPSCR_CRFS: is CRFS=1 {tmp:1 = fp_ux<<3 | fp_zx<<2 | fp_xx<<1 | fp_vxsnan; fp_ux=0; fp_zx=0; fp_xx=0; fp_ux=0;export tmp;}
+FPSCR_CRFS: is CRFS=2 {tmp:1 = fp_vxisi<<3 | fp_vxidi<<2 | fp_vxzdz<<1 | fp_vximz; fp_vxisi=0; fp_vxidi=0; fp_vxzdz=0; fp_vximz=0; export tmp;}
+FPSCR_CRFS: is CRFS=3 {tmp:1 = fp_vxvc<<3 | fp_fr<<2 | fp_fi<<1 | fp_c; fp_vxvc=0; export tmp;}
+FPSCR_CRFS: is CRFS=4 {tmp:1 = fp_cc0<<3 | fp_cc1<<2 | fp_cc2<<1 | fp_cc3; export tmp;}
+FPSCR_CRFS: is CRFS=5 {tmp:1 = fp_vxsoft<<2 | fp_vxsqrt<<1 | fp_vxcvi; fp_vxsoft=0; fp_vxsqrt=0; fp_vxcvi=0; export tmp;}
+FPSCR_CRFS: is CRFS=6 {tmp:1 = fp_ve<<3 | fp_oe <<2 | fp_ue<<1 | fp_ze; export tmp;}
+FPSCR_CRFS: is CRFS=7 {tmp:1 = fp_xe<<3 | fp_ni<<2 | fp_rn0<<1 | fp_rn1; export tmp;}
+
+CRM_CR: cr7 is CRM=1 & cr7 {tmp:4 = zext(cr7);export tmp;}
+CRM_CR: cr6 is CRM=2 & cr6 {tmp:4 = zext(cr6) << 4;export tmp;}
+CRM_CR: cr5 is CRM=4 & cr5 {tmp:4 = zext(cr5) << 8;export tmp;}
+CRM_CR: cr4 is CRM=8 & cr4 {tmp:4 = zext(cr4) << 12;export tmp;}
+CRM_CR: cr3 is CRM=16 & cr3 {tmp:4 = zext(cr3) << 16;export tmp;}
+CRM_CR: cr2 is CRM=32 & cr2 {tmp:4 = zext(cr2) << 20;export tmp;}
+CRM_CR: cr1 is CRM=64 & cr1 {tmp:4 = zext(cr1) << 24;export tmp;}
+CRM_CR: cr0 is CRM=128 & cr0 {tmp:4 = zext(cr0) << 28;export tmp;}
+
+################################################################
+# Instructions
+################################################################
+
+@include "ppc_instructions_gekko_broadway.sinc"
+@include "ppc_embedded.sinc"
+
+################################################################
+# Paired singles quantized load and store instructions
+################################################################
+
+define pcodeop ldexpf;
+
+macro load_ps(ps0D, ps1D, EA, W, I) {
+ lt:1 = 0;
+ ls:1 = 0;
+ wFlag:1 = 0;
+ lt[0,3] = I[16,3];
+ ls[0,3] = I[24,6];
+ wFlag = W;
+ one:1 = 1;
+ floatOne:8 = int2float(one);
+
+ local scale;
+ if (ls == 0) goto ;
+ scale = ldexpf(-ls);
+ goto ;
+
+ scale = floatOne;
+
+
+
+ if (lt == 4 || lt == 6) goto ;
+ if (lt == 5 || lt == 7) goto ;
+ goto ;
+
+
+ local x81 = *:1(EA);
+ local x82 = *:1(EA + 1);
+ ps0D = scale * int2float(x81);
+ if (wFlag) goto ;
+ ps1D = scale * int2float(x82);
+ goto inst_next;
+
+ ps1D = floatOne;
+ goto inst_next;
+
+
+ local x61 = *:2(EA);
+ local x62 = *:2(EA + 2);
+ ps0D = scale * int2float(x61);
+ if (wFlag) goto ;
+ ps1D = scale * int2float(x62);
+ goto inst_next;
+
+ ps1D = floatOne;
+ goto inst_next;
+
+
+ ps0D = float2float(*:4(EA));
+ if (wFlag) goto ;
+ ps1D = float2float(*:4(EA + 4));
+ goto inst_next;
+
+ ps1D = floatOne;
+}
+
+:psq_l fD,dPlusRaOrZeroAddressPS,W,I is OP=56 & fD & W & I & dPlusRaOrZeroAddressPS & ps0D & ps1D
+{
+ EA:4 = dPlusRaOrZeroAddressPS;
+ load_ps(ps0D, ps1D, EA, W, I);
+}
+
+:psq_lu fD,dPlusRaAddressPS,W,I is OP=57 & fD & A & W & I & dPlusRaAddressPS & ps0D & ps1D
+{
+ EA:4 = dPlusRaAddressPS;
+ A = EA;
+ load_ps(ps0D, ps1D, EA, W, I);
+}
+
+:psq_lux fD,RA_OR_ZERO,B,WX,IX is OP=4 & fD & RA_OR_ZERO & A & B & WX & IX & XOP_1_6=38 & BIT_0=0 & ps0D & ps1D
+{
+ EA:4 = RA_OR_ZERO + B;
+ A = EA;
+ load_ps(ps0D, ps1D, EA, WX, IX);
+}
+
+:psq_lx fD,RA_OR_ZERO,B,WX,IX is OP=4 & fD & RA_OR_ZERO & B & WX & IX & XOP_1_6=6 & BIT_0=0 & ps0D & ps1D
+{
+ EA:4 = RA_OR_ZERO + B;
+ load_ps(ps0D, ps1D, EA, WX, IX);
+}
+
+macro store_ps(ps0S, ps1S, EA, W, I) {
+ lt:1 = 0;
+ ls:1 = 0;
+ lt[0,3] = I[0,3];
+ ls[0,3] = I[8,6];
+ wFlag:1 = W;
+
+ local scale;
+ if (ls == 0) goto ;
+ scale = ldexpf(ls);
+ goto ;
+
+ one:1 = 1;
+ floatOne:8 = int2float(one);
+ scale = floatOne;
+
+
+
+ if (lt == 4 || lt == 6) goto ;
+ if (lt == 5 || lt == 7) goto ;
+ goto ;
+
+
+ *:1(EA) = trunc(scale f* ps0S);
+ if (wFlag == 1) goto inst_next;
+ *:1(EA + 1) = trunc(scale f* ps1S);
+ goto inst_next;
+
+
+ *:2(EA) = trunc(scale f* ps0S);
+ if (wFlag == 1) goto inst_next;
+ *:2(EA + 2) = trunc(scale f* ps1S);
+ goto inst_next;
+
+
+ *:4(EA) = float2float(ps0S);
+ if (wFlag == 1) goto inst_next;
+ *:4(EA + 4) = float2float(ps1S);
+}
+
+:psq_st fS,dPlusRaOrZeroAddressPS,W,I is OP=60 & fS & A & W & I & dPlusRaOrZeroAddressPS & ps0S & ps1S
+{
+ EA:4 = dPlusRaOrZeroAddressPS;
+ store_ps(ps0S, ps1S, EA, W, I);
+}
+
+:psq_stu fS,dPlusRaAddressPS,W,I is OP=61 & fS & A & W & I & dPlusRaAddressPS & ps0S & ps1S
+{
+ EA:4 = dPlusRaAddressPS;
+ A = EA;
+ store_ps(ps0S, ps1S, EA, W, I);
+}
+
+:psq_stux fS,RA_OR_ZERO,B,WX,IX is OP=4 & fS & RA_OR_ZERO & A & B & WX & IX & XOP_1_6=39 & BIT_0=0 & ps0S & ps1S
+{
+ EA:4 = RA_OR_ZERO + B;
+ A = EA;
+ store_ps(ps0S, ps1S, EA, WX, IX);
+}
+
+:psq_stx fS,RA_OR_ZERO,B,WX,IX is OP=4 & fS & RA_OR_ZERO & B & WX & IX & XOP_1_6=7 & BIT_0=0 & ps0S & ps1S
+{
+ EA:4 = RA_OR_ZERO + B;
+ store_ps(ps0S, ps1S, EA, WX, IX);
+}
+
+################################################################
+# Paired singles instructions
+################################################################
+
+:ps_abs fD,fB is OP=4 & fD & BITS_16_20=0 & fB & XOP_1_10=264 & Rc=0 & ps0D & ps1D & ps0B & ps1B
+{
+ ps0D = abs(ps0B);
+ ps1D = abs(ps1B);
+}
+
+:ps_abs. fD,fB is OP=4 & fD & BITS_16_20=0 & fB & XOP_1_10=264 & Rc=1 & ps0D & ps1D & ps0B & ps1B
+{
+ ps0D = abs(ps0B);
+ ps1D = abs(ps1B);
+ cr1flags();
+}
+
+:ps_add fD,fA,fB is OP=4 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A f+ ps0B;
+ setFPAddFlags(ps0A, ps0B, ps0D);
+ ps1D = ps1A f+ ps1B;
+}
+
+:ps_add. fD,fA,fB is OP=4 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A f+ ps0B;
+ setFPAddFlags(ps0A, ps0B, ps0D);
+ ps1D = ps1A f+ ps1B;
+ cr1flags();
+}
+
+:ps_cmpo0 CRFD,fA,fB is OP=4 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=32 & BIT_0=0 & ps0B & ps1B & ps0A & ps1A
+{
+ tmp:1 = nan(ps0A) | nan(ps0B);
+ fp_cc0 = (ps0A f< ps0B);
+ fp_cc1 = (ps0A f> ps0B);
+ fp_cc2 = (ps0A f== ps0B);
+ CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp;
+}
+
+:ps_cmpo1 CRFD,fA,fB is OP=4 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=96 & BIT_0=0 & ps0B & ps1B & ps0A & ps1A
+{
+ tmp:1 = nan(ps1A) | nan(ps1B);
+ fp_cc0 = (ps1A f< ps1B);
+ fp_cc1 = (ps1A f> ps1B);
+ fp_cc2 = (ps1A f== ps1B);
+ CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp;
+}
+
+:ps_cmpu0 CRFD,fA,fB is OP=4 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=0 & BIT_0=0 & ps0B & ps1B & ps0A & ps1A
+{
+ tmp:1 = nan(ps0A) | nan(ps0B);
+ fp_cc0 = (ps0A f< ps0B);
+ fp_cc1 = (ps0A f> ps0B);
+ fp_cc2 = (ps0A f== ps0B);
+ CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp;
+}
+
+:ps_cmpu1 CRFD,fA,fB is OP=4 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=64 & BIT_0=0 & ps0B & ps1B & ps0A & ps1A
+{
+ tmp:1 = nan(ps1A) | nan(ps1B);
+ fp_cc0 = (ps1A f< ps1B);
+ fp_cc1 = (ps1A f> ps1B);
+ fp_cc2 = (ps1A f== ps1B);
+ CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp;
+}
+
+:ps_div fD,fA,fB is OP=4 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A f/ ps0B;
+ setFPDivFlags(ps0A, ps0B, ps0D);
+ ps1D = ps1A f/ ps1B;
+}
+
+:ps_div. fD,fA,fB is OP=4 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A f/ ps0B;
+ setFPDivFlags(ps0A, ps0B, ps0D);
+ ps1D = ps1A f/ ps1B;
+ cr1flags();
+}
+
+:ps_madd fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=29 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ ps0D = tmp1 f+ ps0B;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0A) | nan(ps0C) | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ ps1D = tmp2 f+ ps1B;
+}
+
+:ps_madd. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=29 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ ps0D = tmp1 f+ ps0B;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0A) | nan(ps0C) | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ ps1D = tmp2 f+ ps1B;
+ cr1flags();
+}
+
+:ps_madds0 fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=14 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ ps0D = tmp1 f+ ps0B;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0A) | nan(ps0C) | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps0C;
+ ps1D = tmp2 f+ ps1B;
+}
+
+:ps_madds0. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=14 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ ps0D = tmp1 f+ ps0B;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0A) | nan(ps0C) | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps0C;
+ ps1D = tmp2 f+ ps1B;
+ cr1flags();
+}
+
+:ps_madds1 fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=15 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps1C;
+ ps0D = tmp1 f+ ps0B;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0A) | nan(ps1C) | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ ps1D = tmp2 f+ ps1B;
+}
+
+:ps_madds1. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=15 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps1C;
+ ps0D = tmp1 f+ ps0B;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0A) | nan(ps1C) | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ ps1D = tmp2 f+ ps1B;
+ cr1flags();
+}
+
+:ps_merge00 fD,fA,fB is OP=4 & fD & fA & fB & XOP_1_10=528 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A;
+ ps1D = ps0B;
+}
+
+:ps_merge00. fD,fA,fB is OP=4 & fD & fA & fB & XOP_1_10=528 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A;
+ ps1D = ps0B;
+ cr1flags();
+}
+
+:ps_merge01 fD,fA,fB is OP=4 & fD & fA & fB & XOP_1_10=560 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A;
+ ps1D = ps1B;
+}
+
+:ps_merge01. fD,fA,fB is OP=4 & fD & fA & fB & XOP_1_10=560 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A;
+ ps1D = ps1B;
+ cr1flags();
+}
+
+:ps_merge10 fD,fA,fB is OP=4 & fD & fA & fB & XOP_1_10=592 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps1A;
+ ps1D = ps0B;
+}
+
+:ps_merge10. fD,fA,fB is OP=4 & fD & fA & fB & XOP_1_10=592 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps1A;
+ ps1D = ps0B;
+ cr1flags();
+}
+
+:ps_merge11 fD,fA,fB is OP=4 & fD & fA & fB & XOP_1_10=624 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps1A;
+ ps1D = ps1B;
+}
+
+:ps_merge11. fD,fA,fB is OP=4 & fD & fA & fB & XOP_1_10=624 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps1A;
+ ps1D = ps1B;
+ cr1flags();
+}
+
+:ps_mr fD,fB is OP=4 & fD & BITS_16_20=0 & fB & XOP_1_10=72 & Rc=0 & ps0D & ps1D & ps0B & ps1B
+{
+ ps0D = ps0B;
+ ps1D = ps1B;
+}
+
+:ps_mr. fD,fB is OP=4 & fD & BITS_16_20=0 & fB & XOP_1_10=72 & Rc=1 & ps0D & ps1D & ps0B & ps1B
+{
+ ps0D = ps0B;
+ ps1D = ps1B;
+ cr1flags();
+}
+
+:ps_msub fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=28 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ ps0D = tmp1 f- ps0B;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ ps1D = tmp2 f- ps1B;
+}
+
+:ps_msub. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=28 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ ps0D = tmp1 f- ps0B;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ ps1D = tmp2 f- ps1B;
+ cr1flags();
+}
+
+:ps_mul fD,fA,fC is OP=4 & fD & fA & BITS_11_15=0 & fC & XOP_1_5=25 & Rc=0 & ps0D & ps1D & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0A f* ps0C;
+ setFPMulFlags(ps0A, ps0C, ps0D);
+ ps1D = ps1A f* ps1C;
+}
+
+:ps_mul. fD,fA,fC is OP=4 & fD & fA & BITS_11_15=0 & fC & XOP_1_5=25 & Rc=1 & ps0D & ps1D & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0A f* ps0C;
+ setFPMulFlags(ps0A, ps0C, ps0D);
+ ps1D = ps1A f* ps1C;
+ cr1flags();
+}
+
+:ps_muls0 fD,fA,fC is OP=4 & fD & fA & BITS_11_15=0 & fC & XOP_1_5=12 & Rc=0 & ps0D & ps1D & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0A f* ps0C;
+ setFPMulFlags(ps0A, ps0C, ps0D);
+ ps1D = ps1A f* ps0C;
+}
+
+:ps_muls0. fD,fA,fC is OP=4 & fD & fA & BITS_11_15=0 & fC & XOP_1_5=12 & Rc=1 & ps0D & ps1D & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0A f* ps0C;
+ setFPMulFlags(ps0A, ps0C, ps0D);
+ ps1D = ps1A f* ps0C;
+ cr1flags();
+}
+
+:ps_muls1 fD,fA,fC is OP=4 & fD & fA & BITS_11_15=0 & fC & XOP_1_5=13 & Rc=0 & ps0D & ps1D & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0A f* ps1C;
+ setFPMulFlags(ps0A, ps1C, ps0D);
+ ps1D = ps1A f* ps1C;
+}
+
+:ps_muls1. fD,fA,fC is OP=4 & fD & fA & BITS_11_15=0 & fC & XOP_1_5=13 & Rc=1 & ps0D & ps1D & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0A f* ps1C;
+ setFPMulFlags(ps0A, ps1C, ps0D);
+ ps1D = ps1A f* ps1C;
+ cr1flags();
+}
+
+:ps_nabs fD,fB is OP=4 & fD & BITS_16_20=0 & fB & XOP_1_10=136 & Rc=0 & ps0D & ps1D & ps0B & ps1B
+{
+ ps0D = f- (abs(ps0B));
+ ps1D = f- (abs(ps1B));
+}
+
+:ps_nabs. fD,fB is OP=4 & fD & BITS_16_20=0 & fB & XOP_1_10=136 & Rc=1 & ps0D & ps1D & ps0B & ps1B
+{
+ ps0D = f- (abs(ps0B));
+ ps1D = f- (abs(ps1B));
+ cr1flags();
+}
+
+:ps_neg fD,fB is OP=4 & fD & BITS_16_20=0 & fB & XOP_1_10=40 & Rc=0 & ps0D & ps1D & ps0B & ps1B
+{
+ ps0D = f- ps0B;
+ ps1D = f- ps1B;
+}
+
+:ps_neg. fD,fB is OP=4 & fD & BITS_16_20=0 & fB & XOP_1_10=40 & Rc=1 & ps0D & ps1D & ps0B & ps1B
+{
+ ps0D = f- ps0B;
+ ps1D = f- ps1B;
+ cr1flags();
+}
+
+:ps_nmadd fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=31 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ tmp1 = tmp1 f+ ps0B;
+ ps0D = f- tmp1;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0A) | nan(ps0C) | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ tmp2 = tmp2 f+ ps1B;
+ ps1D = f- tmp2;
+}
+
+:ps_nmadd. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=31 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ tmp1 = tmp1 f+ ps0B;
+ ps0D = f- tmp1;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0A) | nan(ps0C) | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ tmp2 = tmp2 f+ ps1B;
+ ps1D = f- tmp2;
+ cr1flags();
+}
+
+:ps_nmsub fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=30 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ tmp1 = tmp1 f- ps0B;
+ ps0D = f- tmp1;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ tmp2 = tmp2 f- ps1B;
+ ps1D = f- tmp2;
+}
+
+:ps_nmsub. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=30 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ tmp1:8 = ps0A f* ps0C;
+ tmp1 = tmp1 f- ps0B;
+ ps0D = f- tmp1;
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+ setSummaryFPSCR();
+ tmp2:8 = ps1A f* ps1C;
+ tmp2 = tmp2 f- ps1B;
+ ps1D = f- tmp2;
+ cr1flags();
+}
+
+:ps_res fD,fB is OP=4 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=0 & ps0D & ps1D & ps0B & ps1B
+{
+ one:8 = 1;
+ floatOne:8 = int2float(one);
+ tmp1:8 = float2float(floatOne f/ ps0B);
+ ps0D = float2float(tmp1);
+ setFPRF(fD);
+ fp_zx = fp_zx | (fB f== 0);
+ fp_vxsnan = fp_vxsnan | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = float2float(floatOne f/ ps1B);
+ ps1D = float2float(tmp2);
+}
+
+:ps_res. fD,fB is OP=4 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=1 & ps0D & ps1D & ps0B & ps1B
+{
+ one:8 = 1;
+ floatOne:8 = int2float(one);
+ tmp1:8 = float2float(floatOne f/ ps0B);
+ ps0D = float2float(tmp1);
+ setFPRF(fD);
+ fp_zx = fp_zx | (fB f== 0);
+ fp_vxsnan = fp_vxsnan | nan(ps0B);
+ setSummaryFPSCR();
+ tmp2:8 = float2float(floatOne f/ ps1B);
+ ps1D = float2float(tmp2);
+ cr1flags();
+}
+
+:ps_rsqrte fD,fB is OP=4 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=26 & Rc=0 & ps0D & ps1D & ps0B & ps1B
+{
+ one:8 = 1;
+ floatOne:8 = int2float(one);
+ tmpSqrt1:8 = sqrt(ps0B);
+ ps0D = (floatOne f/ tmpSqrt1);
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0B);
+ setSummaryFPSCR();
+ tmpSqrt2:8 = sqrt(ps1B);
+ ps1D = (floatOne f/ tmpSqrt2);
+}
+
+:ps_rsqrte. fD,fB is OP=4 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=26 & Rc=1 & ps0D & ps1D & ps0B & ps1B
+{
+ one:8 = 1;
+ floatOne:8 = int2float(one);
+ tmpSqrt1:8 = sqrt(ps0B);
+ ps0D = (floatOne f/ tmpSqrt1);
+ setFPRF(ps0D);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(ps0B);
+ setSummaryFPSCR();
+ tmpSqrt2:8 = sqrt(ps1B);
+ ps1D = (floatOne f/ tmpSqrt2);
+ cr1flags();
+}
+
+:ps_sel fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=23 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ zero:8 = 0;
+ zeroFloat:8 = int2float(zero);
+ ps0D = ps0C;
+ if (ps0A f>= zeroFloat) goto ;
+ ps0D = ps0B;
+
+ ps1D = ps1C;
+ if (ps1A f>= zeroFloat) goto inst_next;
+ ps1D = ps1B;
+}
+
+:ps_sel. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=23 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ zero:8 = 0;
+ zeroFloat:8 = int2float(zero);
+ ps0D = ps0C;
+ if (ps0A f>= zeroFloat) goto ;
+ ps0D = ps0B;
+
+ ps1D = ps1C;
+ cr1flags();
+ if (ps1A f>= zeroFloat) goto inst_next;
+ ps1D = ps1B;
+ cr1flags();
+}
+
+:ps_sub fD,fA,fB is OP=4 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A f- ps0B;
+ setFPSubFlags(ps0A, ps0B, ps0D);
+ ps1D = ps1A f- ps1B;
+}
+
+:ps_sub. fD,fA,fB is OP=4 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A
+{
+ ps0D = ps0A f- ps0B;
+ setFPSubFlags(ps0A, ps0B, ps0D);
+ ps1D = ps1A f- ps1B;
+ cr1flags();
+}
+
+:ps_sum0 fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=10 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0A f+ ps1B;
+ setFPAddFlags(ps0A, ps1B, ps0D);
+ ps1D = ps1C;
+}
+
+:ps_sum0. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=10 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0A f+ ps1B;
+ setFPAddFlags(ps0A, ps1B, ps0D);
+ ps1D = ps1C;
+ cr1flags();
+}
+
+:ps_sum1 fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=11 & Rc=0 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0C;
+ ps1D = ps0A f+ ps1B;
+ setFPAddFlags(ps0A, ps1B, ps1D);
+}
+
+:ps_sum1. fD,fA,fC,fB is OP=4 & fD & fA & fB & fC & XOP_1_5=11 & Rc=1 & ps0D & ps1D & ps0B & ps1B & ps0A & ps1A & ps0C & ps1C
+{
+ ps0D = ps0C;
+ ps1D = ps0A f+ ps1B;
+ setFPAddFlags(ps0A, ps1B, ps1D);
+ cr1flags();
+}
+
+################################################################
+# Cache instructions
+################################################################
+
+:dcbz_l RA_OR_ZERO,B is OP=4 & BITS_21_25=0 & RA_OR_ZERO & B & XOP_1_10=1014 & BIT_0=0
+{
+ ea:4 = RA_OR_ZERO + B;
+ dataCacheBlockSetToZeroLocked(ea);
+}
diff --git a/data/languages/ppc_instructions_gekko_broadway.sinc b/data/languages/ppc_instructions_gekko_broadway.sinc
new file mode 100644
index 0000000..b16a1dd
--- /dev/null
+++ b/data/languages/ppc_instructions_gekko_broadway.sinc
@@ -0,0 +1,3815 @@
+#===========================================================
+# ADD
+#===========================================================
+
+#add r1,r2,r3 0x7c 22 1a 14
+:add D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=266 & Rc=0
+{
+ D = A + B;
+}
+
+#add. r1,r2,r3 0x7c 22 1a 15
+:add. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=266 & Rc=1
+{
+ D = A + B;
+ cr0flags(D);
+}
+
+#addo r1,r2,r3 0x7c 22 1e 14
+:addo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=266 & Rc=0
+{
+ D = A + B;
+ addOverflow(A,B);
+}
+
+#addo. r1,r2,r3 0x7c 22 1e 15
+:addo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=266 & Rc=1
+{
+ D = A + B;
+ addOverflow( A, B );
+ cr0flags(D);
+}
+
+#addc r1,r2,r3 0x7c 22 18 14
+:addc D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=10 & Rc=0
+{
+ xer_ca = carry(A,B);
+ D = A + B;
+}
+
+#addc. r1,r2,r3 0x7c 22 18 15
+:addc. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=10 & Rc=1
+{
+ xer_ca = carry(A,B);
+ D = A + B;
+ cr0flags(D);
+}
+
+#addco r1,r2,r3 0x7c 22 1c 14
+:addco D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=10 & Rc=0
+{
+ xer_ca = carry(A,B);
+ addOverflow( A, B );
+ D = A + B;
+}
+
+#addco. r1,r2,r3 0x7c 22 1c 15
+:addco. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=10 & Rc=1
+{
+ xer_ca = carry(A,B);
+ addOverflow( A, B );
+ D = A + B;
+ cr0flags(D);
+}
+
+#adde r1,r2,r3 0x7c 22 19 14
+:adde D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=138 & Rc=0
+{
+ zextCarry:$(REGISTER_SIZE) = zext(xer_ca);
+ xer_ca = carry(B, zextCarry);
+ tmp:$(REGISTER_SIZE)=B + zextCarry;
+ xer_ca = xer_ca || carry(A, tmp);
+ D=A+tmp;
+}
+
+#adde. r1,r2,r3 0x7c 22 19 15
+:adde. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=138 & Rc=1
+{
+ zextCarry:$(REGISTER_SIZE) = zext(xer_ca);
+ xer_ca = carry(B, zextCarry);
+ tmp:$(REGISTER_SIZE)=B + zextCarry;
+ xer_ca = xer_ca || carry(A, tmp);
+ D=A+tmp;
+ cr0flags(D);
+}
+
+#addeo r1,r2,r3 0x7c 22 1d 14
+:addeo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=138 & Rc=0
+{
+ zextCarry:$(REGISTER_SIZE) = zext(xer_ca);
+ xer_ca = carry(B, zextCarry);
+ addOverflow(B, zextCarry);
+ tmp:$(REGISTER_SIZE)=B + zextCarry;
+ addOverflowAgain(A,tmp);
+ xer_ca = xer_ca || carry(A, tmp);
+ D=A+tmp;
+}
+
+#addeo. r1,r2,r3 0x7c 22 1d 15
+:addeo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=138 & Rc=1
+{
+ zextCarry:$(REGISTER_SIZE) = zext(xer_ca);
+ xer_ca = carry(B, zextCarry);
+ addOverflow(B, zextCarry);
+ tmp:$(REGISTER_SIZE)=B + zextCarry;
+ addOverflowAgain(A,tmp);
+ xer_ca = xer_ca || carry(A, tmp);
+ D=A+tmp;
+ cr0flags(D);
+}
+#addi r0,0x7fff 0x38 00 7f ff
+#addi r0,1 0x38 01 00 01
+:addi D,A,SIMM is $(NOTVLE) & OP=14 & D & A & SIMM_SIGN=0 & SIMM
+{
+ D = A + SIMM;
+}
+
+#li r0,1 0x38 00 00 01 # addi simplified mnemonic
+:li D,SIMM is $(NOTVLE) & OP=14 & D & A=0 & SIMM_SIGN=1 & SIMM
+{
+ D = SIMM;
+}
+
+#li r0,-0x1 0x38 00 FF FF # addi simplified mnemonic
+:li D,SIMM is $(NOTVLE) & OP=14 & D & A=0 & SIMM_SIGN=0 & SIMM
+{
+ D = SIMM;
+}
+
+#subi r0,r1,1 0x38 01 FF FF # addi simplified mnemonic
+:subi D,A,tmp is $(NOTVLE) & OP=14 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ]
+{
+ D = A + SIMM;
+}
+
+#addic r0,r0,2 0x30 00 00 02
+:addic D,A,SIMM is $(NOTVLE) & OP=12 & D & A & SIMM_SIGN=0 & SIMM
+{
+ xer_ca=carry(A,SIMM);
+ D = A + SIMM;
+}
+
+#subic r0,r0,2 0x30 00 FF FE # addi simplified mnemonic
+:subic D,A,tmp is $(NOTVLE) & OP=12 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ]
+{
+ xer_ca=carry(A,SIMM);
+ D = A + SIMM;
+}
+
+#addic. r0,r0,5 0x34 00 00 05
+:addic. D,A,SIMM is $(NOTVLE) & OP=13 & D & A & SIMM_SIGN=0 & SIMM
+{
+ xer_ca = carry(A,SIMM);
+ D = A + SIMM;
+ cr0flags( D );
+}
+
+#subic. r0,r0,1 0x34 00 FF FF # addic. simplified mnemonic
+:subic. D,A,tmp is $(NOTVLE) & OP=13 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ]
+{
+ xer_ca=carry(A,SIMM);
+ D = A + SIMM;
+ cr0flags( D );
+}
+
+#addis r0,r1,1 0x3c 01 00 01
+:addis D,A,SIMM is $(NOTVLE) & OP=15 & D & A & SIMM_SIGN=0 & SIMM
+{
+ D = A + (SIMM:$(REGISTER_SIZE) << 16);
+}
+
+#lis r0,1 0x3c 00 00 01 # addis simplified mnemonic
+:lis D,SIMM is $(NOTVLE) & OP=15 & D & A=0 & SIMM_SIGN=1 & SIMM
+{
+ D = SIMM:$(REGISTER_SIZE) << 16;
+}
+
+#lis r0,-1 0x3c 00 FF FF # addis simplified mnemonic
+:lis D,SIMM is $(NOTVLE) & OP=15 & D & A=0 & SIMM_SIGN=0 & SIMM
+{
+ D = SIMM:$(REGISTER_SIZE) << 16;
+}
+
+#subis r0,r1,1 0x3c 01 FF FF # addis simplified mnemonic
+:subis D,A,tmp is $(NOTVLE) & OP=15 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ]
+{
+ D = A + (SIMM:$(REGISTER_SIZE) << 16);
+}
+
+#addme r0,r0 0x7c 00 01 D4
+:addme D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=234 & Rc=0
+{
+ tmp:$(REGISTER_SIZE) = zext(xer_ca) - 1;
+ xer_ca = carry(A, tmp);
+ D = A + tmp;
+}
+
+#addme. r0,r0 0x7c 00 01 D5
+:addme. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=234 & Rc=1
+{
+ tmp:$(REGISTER_SIZE) = zext(xer_ca) - 1;
+ xer_ca = carry(A, tmp);
+ D = A + tmp;
+ cr0flags(D);
+}
+
+#addmeo r0,r0 0x7C 00 05 D4
+:addmeo D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=234 & Rc=0
+{
+ tmp:$(REGISTER_SIZE) = zext(xer_ca) - 1;
+ xer_ca = carry(A, tmp);
+ addOverflow(A, tmp);
+ D = A + tmp;
+}
+
+#addmeo. r0,r0 0x7C 00 05 D5
+:addmeo. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=234 & Rc=1
+{
+ tmp:$(REGISTER_SIZE) = zext(xer_ca) - 1;
+ xer_ca = carry(A, tmp);
+ addOverflow(A, tmp);
+ D = A + tmp;
+ cr0flags(D);
+}
+
+#addze r0,r0 0x7C 00 01 94
+:addze D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=202 & Rc=0
+{
+ zextedCarry:$(REGISTER_SIZE) = zext( xer_ca );
+ xer_ca = carry(A,zextedCarry);
+ D = A + zextedCarry;
+}
+
+#addze. r0,r0 0x7C 00 01 95
+:addze. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=202 & Rc=1
+{
+ zextedCarry:$(REGISTER_SIZE) = zext( xer_ca );
+ xer_ca=carry(A,zextedCarry);
+ D = A + zextedCarry;
+ cr0flags( D );
+}
+
+#addzeo r0,r0 0x7C 00 05 94
+:addzeo D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=202 & Rc=0
+{
+ zextedCarry:$(REGISTER_SIZE) = zext( xer_ca );
+ xer_ca=carry(A,zextedCarry);
+ addOverflow(A,zextedCarry);
+ D = A + zextedCarry;
+}
+
+#addzeo. r0,r0 0x7C 00 05 95
+:addzeo. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=202 & Rc=1
+{
+ zextedCarry:$(REGISTER_SIZE) = zext( xer_ca );
+ xer_ca=carry(A,zextedCarry);
+ addOverflow(A,zextedCarry);
+ D = A + zextedCarry;
+ cr0flags( D );
+}
+
+#===========================================================
+# AND
+#===========================================================
+
+#and r0,r0,r0 0x7C 00 00 38
+:and A,S,B is OP=31 & S & A & B & XOP_1_10=28 & Rc=0
+{
+ A = S & B;
+}
+
+#and. r0,r0,r0 0x7C 00 00 39
+:and. A,S,B is OP=31 & S & A & B & XOP_1_10=28 & Rc=1
+{
+ A = S & B;
+ cr0flags( A );
+}
+
+#andc r0,r0,r0 0x7C 00 00 78
+:andc A,S,B is OP=31 & S & A & B & XOP_1_10=60 & Rc=0
+{
+ A = S & ~B;
+}
+
+#andc. r0,r0,r0 0x7C 00 00 79
+:andc. A,S,B is OP=31 & S & A & B & XOP_1_10=60 & Rc=1
+{
+ A = S & ~B;
+ cr0flags( A );
+}
+
+#andi. r0,r0,0xffff 0x70 00 ff ff
+:andi. A,S,UIMM is $(NOTVLE) & OP=28 & S & A & UIMM
+{
+ A = S & UIMM:$(REGISTER_SIZE);
+ cr0flags( A );
+}
+
+#andis. r0,r0,1 0x74 00 00 01
+:andis. A,S,UIMM is $(NOTVLE) & OP=29 & A & S & UIMM
+{
+ A = S & (UIMM:$(REGISTER_SIZE) << 16);
+ cr0flags( A );
+}
+
+#===========================================================
+# Branch (op=18)
+#===========================================================
+
+#b 1008 0x48 00 00 08 (assuming a starting address of 1000)
+#ba LAB_00000158 0x48 00 01 5a
+:b^REL_ABS addressLI is $(NOTVLE) & OP=18 & REL_ABS & addressLI & LK=0
+{
+ goto addressLI;
+}
+
+:b^REL_ABS addressLI is linkreg=1 & OP=18 & REL_ABS & addressLI & LK=0
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ # don't do this anymore, detect another way
+ # call addressLI;
+ # return [LR];
+ goto addressLI;
+}
+
+#bl 0x48 00 00 09
+#bla 0x48 00 10 0f
+:bl^REL_ABS addressLI is $(NOTVLE) & OP=18 & REL_ABS & addressLI & LK=1
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ r2Save = r2; # Save r2 (needed for branch to ppc64 call stub)
+ LR = inst_next;
+ call addressLI;
+}
+
+# special case when branch is to fall-through instruction, just loading the link register
+#bl 0x48 00 00 05
+:bl addressLI is $(NOTVLE) & OP=18 & REL_ABS & AA=0 & addressLI & LK=1 & LI=1
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ LR = inst_next;
+ goto addressLI;
+}
+
+#===========================================================
+# Branch Conditional (op=16)
+#===========================================================
+
+#b sameAddr 0x42 80 00 00
+#ba LAB_0000 0x42 80 00 02
+:b^REL_ABS addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=0
+{
+ goto addressBD;
+}
+
+:b^REL_ABS addressBD is linkreg=1 & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=0
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ # don't do this anymore, detect another way
+ # call addressBD;
+ # return [LR];
+ goto addressBD;
+}
+
+#bl LAB_0000 0x42 80 00 01
+#bla LAB_0000 0x42 80 00 03
+:bl^REL_ABS addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=1
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ LR = inst_next;
+ call addressBD;
+}
+
+# special case when branch is to fall-through instruction, just loading the link register
+#bl (Load LR)
+:bl addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & BD=1 & LK=1
+{
+ LR = inst_next;
+ goto addressBD;
+}
+
+
+
+#blt LAB_0000 0x41 80 00 00
+:b^CC^REL_ABS addressBD is $(NOTVLE) & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 &
+ REL_ABS & LK=0
+ [ linkreg=0; globalset(inst_start,linkreg); ] # affects both flows, but not at this instruction
+{
+ if (CC) goto addressBD;
+}
+## do a special linkreg setting only if linkreg is set, since this happens all over the code
+:b^CC^REL_ABS addressBD is linkreg=1 & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 &
+ REL_ABS & LK=0
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (CC) goto addressBD;
+}
+
+#bltl LAB_0000 0x41 80 00 01
+:b^CC^"l"^REL_ABS addressBD is $(NOTVLE) & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 &
+ REL_ABS & LK=1
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ LR = inst_next;
+ call addressBD;
+}
+
+#bne cr2,LAB_xxxx 0x40 8a 00 00
+:b^CC^REL_ABS BI_CR,addressBD is $(NOTVLE) & OP=16 & CC & BI_CR & addressBD & BO_0=0 & BO_2=1 &
+ REL_ABS & LK=0
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (CC) goto addressBD;
+}
+
+#bnel cr2,LAB_xxxx 0x40 8a 00 01
+:b^CC^"l"^REL_ABS BI_CR,addressBD is $(NOTVLE) & OP=16 & CC & BI_CR & addressBD & BO_0=0 & BO_2=1 &
+ REL_ABS & LK=1
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ LR = inst_next;
+ call addressBD;
+}
+
+#bdnz LAB_0000 0x42 00 00 00
+:bd^CTR_DEC^REL_ABS addressBD is $(NOTVLE) & OP=16 & CTR_DEC & REL_ABS & addressBD & BO_0=1 & BO_2=0 & LK=0
+{
+ if (CTR_DEC) goto addressBD;
+}
+
+#bdnzl FUN_0xxx 0x42 00 00 01
+#bdzla FUN_0000 0x42 40 00 03
+:bd^CTR_DEC^"l"^REL_ABS addressBD is $(NOTVLE) & OP=16 & CTR_DEC & REL_ABS & addressBD & BO_0=1 & BO_2=0 & LK=1
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CTR_DEC) goto inst_next;
+ LR = inst_next;
+ call addressBD;
+}
+
+#bdnzf lt,LAB_0000 0x40 00 00 00
+#bdnzf 4*cr2+eq,LAB_0000 0x40 0a 00 00
+:bd^CTR_DEC^CC_TF^REL_ABS CC_OP,addressBD is $(NOTVLE) & OP=16 & CC_TF & REL_ABS & CTR_DEC & CC_OP & addressBD & BO_0=0 & BO_2=0 & LK=0
+{
+ if (CTR_DEC && CC_OP) goto addressBD;
+}
+
+#bdzfl lt,FUN_0000 0x40 00 00 01
+#bdnzfl 4*cr2+eq,FUN_0000 0x40 0a 00 01
+:bd^CTR_DEC^CC_TF^"l"^REL_ABS CC_OP,addressBD is $(NOTVLE) & OP=16 & CC_TF & CTR_DEC & REL_ABS & CC_OP & addressBD & BO_0=0 & BO_2=0 & LK=1
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!(CTR_DEC && CC_OP)) goto inst_next;
+ LR = inst_next;
+ call addressBD;
+}
+
+
+#===========================================================
+# Branch Conditional CTR(op=19, xop=528)
+#===========================================================
+
+
+#bctr 0x4E 80 04 20
+:bctr is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=528
+{
+ goto [CTR];
+}
+
+:bctr is $(NOTVLE) & linkreg=1 & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=528
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ # don't do this anymore, detect another way
+ # call [CTR];
+ # return [LR];
+ goto [CTR];
+}
+
+:bctr BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH & XOP_1_10=528
+{
+ goto [CTR];
+}
+
+#bctrl 0x4e 80 04 21
+:bctrl is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH=0 & XOP_1_10=528
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ LR = inst_next;
+ call [CTR];
+}
+:bctrl BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH & XOP_1_10=528
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ LR = inst_next;
+ call [CTR];
+}
+
+#bgectr 0x4c 80 04 20
+:b^CC^"ctr" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528
+{
+ if (!CC) goto inst_next;
+ goto [CTR];
+}
+:b^CC^"ctr" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528
+{
+ if (!CC) goto inst_next;
+ goto [CTR];
+}
+
+#bgectrl 0x4c 80 04 21
+:b^CC^"ctrl" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ LR = inst_next;
+ call [CTR];
+}
+:b^CC^"ctrl" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ LR = inst_next;
+ call [CTR];
+}
+
+#bgectr cr3 0x4c 8c 04 20
+:b^CC^"ctr" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528
+{
+ if (!CC) goto inst_next;
+ goto [CTR];
+}
+
+#bnectr cr2,#0x3 0x4c 8c 1c 20
+:b^CC^"ctr" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=0 & BITS_13_15=0 & XOP_1_10=528
+{
+ if (!CC) goto inst_next;
+ goto [CTR];
+}
+
+#bgectrl cr2,LAB_xxxx 0x4c 8c 04 21
+:b^CC^"ctrl" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ LR = inst_next;
+ call [CTR];
+}
+
+#bnectr cr2,#0x3 0x4c 8c 1c 21
+:b^CC^"ctrl" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=1 & BITS_13_15=0 & XOP_1_10=528
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ LR = inst_next;
+ call [CTR];
+}
+
+#===========================================================
+# Branch Conditional to Link Register (op=19, XOP=16)
+#===========================================================
+
+#bclr 0x4E 80 00 20
+:blr is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=16
+{
+ return [LR];
+}
+:blr BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH & XOP_1_10=16
+{
+ goto [LR];
+}
+
+#blrl 0x4e 80 00 21
+:blrl is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+:blrl BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+
+#bgelr 0x4c 80 00 20
+:b^CC^"lr" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ return [LR];
+}
+:b^CC^"lr" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ goto [LR];
+}
+
+#bgelrl 0x4c 80 00 21
+:b^CC^"lrl" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+:b^CC^"lrl" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH & BH_BITS!=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+
+#bgelr cr2 0x4c 88 00 20
+:b^CC^"lr" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ return [LR];
+}
+
+#bnelr cr2,#0x3 0x4c 8c 18 20
+:b^CC^"lr" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ goto [LR];
+}
+
+#bgelrl cr3 0x4c 8c 00 21
+:b^CC^"lrl" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+
+#bnelr cr2,#0x3 0x4c 8c 18 21
+:b^CC^"lrl" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=1 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CC) goto inst_next;
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+
+######
+
+#bdnzlr 0x4e 00 00 20
+:bd^CTR_DEC^"lr" is $(NOTVLE) & OP=19 & BH=0 & CTR_DEC & BO_0=1 & BO_2=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CTR_DEC) goto inst_next;
+ goto [LR];
+}
+:bd^CTR_DEC^"lr" BH is $(NOTVLE) & OP=19 & BH & CTR_DEC & BO_0=1 & BO_2=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CTR_DEC) goto inst_next;
+ goto [LR];
+}
+
+#bdnzlrl 0x4e 00 00 21
+:bd^CTR_DEC^"lrl" is $(NOTVLE) & OP=19 & CTR_DEC & BH=0 & BO_0=1 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CTR_DEC) goto inst_next;
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+:bd^CTR_DEC^"lrl" BH is $(NOTVLE) & OP=19 & CTR_DEC & BH & BO_0=1 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!CTR_DEC) goto inst_next;
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+
+#bdnzflr lt 0x4c 00 00 20
+#bdnzflr 4*cr2+eq 0x4c 0a 00 20
+:bd^CTR_DEC^CC_TF^"lr" CC_OP is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BO_0=0 & BO_2=0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!(CTR_DEC && CC_OP)) goto inst_next;
+ goto [LR];
+}
+
+#bdnzflr ge 0x4c 00 18 20
+#bdnzflr 4*cr2+eq 0x4c 0a 18 20
+:bd^CTR_DEC^CC_TF^"lr" CC_OP,BH is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BO_0=0 & BO_2=0 & BH & LK=0 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!(CTR_DEC && CC_OP)) goto inst_next;
+ goto [LR];
+}
+
+#bdzflrl lt 0x4c 00 00 21
+#bdnzflrl 4*cr2+eq 0x4c 0a 00 21
+:bd^CTR_DEC^CC_TF^"lrl" CC_OP is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BH=0 & BO_0=0 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!(CTR_DEC && CC_OP)) goto inst_next;
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+
+#bdzflrl lt 0x4c 00 18 21
+#bdnzflrl 4*cr2+eq 0x4c 0a 18 21
+:bd^CTR_DEC^CC_TF^"lrl" CC_OP,BH is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BH & BO_0=0 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16
+ [ linkreg=0; globalset(inst_start,linkreg); ]
+{
+ if (!(CTR_DEC && CC_OP)) goto inst_next;
+ tmp:$(REGISTER_SIZE) = LR;
+ LR = inst_next;
+ call [tmp];
+}
+
+
+#===========================================================
+# CMP
+#===========================================================
+
+#cmpw r0,r1 0x7c 00 08 00
+#cmpd r0,r1 0x7c 20 08 00 (64 bit mode)
+:cmp^DSIZE A,B is $(NOTVLE) & OP=31 & CRFD=0 & BIT_22=0 & DSIZE & A & B & REG_A & REG_B & XOP_1_10=0 & BIT_0=0
+{
+ tmpA:$(REGISTER_SIZE) = REG_A;
+ tmpB:$(REGISTER_SIZE) = REG_B;
+ cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1);
+
+}
+
+#cmpw cr2,r0,r1 0x7d 00 08 00
+#cmpd cr2,r0,r1 0x7d 20 08 00 (64 bit mode)
+:cmp^DSIZE CRFD,A,B is $(NOTVLE) & OP=31 & CRFD & BIT_22=0 & DSIZE & A & B & REG_A & REG_B & XOP_1_10=0 & BIT_0=0
+{
+ tmpA:$(REGISTER_SIZE) = REG_A;
+ tmpB:$(REGISTER_SIZE) = REG_B;
+ CRFD = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1);
+}
+
+###############################
+#cmpwi r0,0x00 0x2c 00 00 00
+#cmpdi r0,0x00 0x2c 20 00 00 (64 bit mode)
+:cmp^DSIZE^"i" A,SIMM is $(NOTVLE) & OP=11 & CRFD=0 & BIT_22=0 & DSIZE & A & REG_A & SIMM
+{
+ tmpA:$(REGISTER_SIZE) = REG_A;
+ tmpB:$(REGISTER_SIZE) = SIMM;
+ cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1);
+
+}
+
+#cmpwi cr2,r0,0x00 0x2d 00 00 00
+#cmpwi cr2,r0,0x00 0x2d 20 00 00 (64 bit mode)
+:cmp^DSIZE^"i" CRFD,A,SIMM is $(NOTVLE) & OP=11 & CRFD & BIT_22=0 & DSIZE & A & B & REG_A & SIMM
+{
+ tmpA:$(REGISTER_SIZE) = REG_A;
+ tmpB:$(REGISTER_SIZE) = SIMM;
+ CRFD = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1);
+}
+
+############################
+#cmplw r0,r1 0x7c 00 08 40
+#cmpld r0,r1 0x7c 20 08 40 (64 bit mode)
+:cmpl^DSIZE A,B is $(NOTVLE) & OP=31 & CRFD=0 & BIT_22=0 & DSIZE & A & B & UREG_A & UREG_B & XOP_1_10=32 & BIT_0=0
+{
+ tmpA:$(REGISTER_SIZE) = UREG_A;
+ tmpB:$(REGISTER_SIZE) = UREG_B;
+ cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1);
+
+}
+
+#cmplw cr2,r0,r1 0x7d 00 08 40
+#cmplw cr2,r0,r1 0x7d 20 08 40 (64 bit mode)
+:cmpl^DSIZE CRFD,A,B is $(NOTVLE) & OP=31 & CRFD & BIT_22=0 & DSIZE & A & B & UREG_A & UREG_B & XOP_1_10=32 & BIT_0=0
+{
+ tmpA:$(REGISTER_SIZE) = UREG_A;
+ tmpB:$(REGISTER_SIZE) = UREG_B;
+ CRFD = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1);
+}
+
+###############################
+#cmplwi r0,0x00 0x28 00 00 00
+#cmpldi r0,0x00 0x28 20 00 00 (64 bit mode)
+:cmpl^DSIZE^"i" A,UIMM is $(NOTVLE) & OP=10 & CRFD=0 & BIT_22=0 & DSIZE & A & UREG_A & UIMM
+{
+ tmpA:$(REGISTER_SIZE) = UREG_A;
+ tmpB:$(REGISTER_SIZE) = UIMM;
+ cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1);
+
+}
+
+#cmplwi cr2,r0,0x00 0x29 00 00 00
+#cmplwi cr2,r0,0x00 0x29 20 00 00 (64 bit mode)
+:cmpl^DSIZE^"i" CRFD,A,UIMM is $(NOTVLE) & OP=10 & CRFD & BIT_22=0 & DSIZE & A & B & UREG_A & UIMM
+{
+ tmpA:$(REGISTER_SIZE) = UREG_A;
+ tmpB:$(REGISTER_SIZE) = UIMM;
+ CRFD = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1);
+}
+#===========================================================
+# CNTLZx
+#===========================================================
+
+@ifdef BIT_64
+#cntlzd r0,r0 0x7c 00 00 74
+:cntlzd A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=58 & Rc=0
+{
+ A = countLeadingZeros(S);
+}
+
+#cntlzd. r0,r0 0x7c 00 00 75
+:cntlzd. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=58 & Rc=1
+{
+ A = countLeadingZeros(S);
+ cr0flags(A);
+}
+@endif
+
+#cntlzw r0,r0 0x7c 00 00 34
+:cntlzw A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=26 & Rc=0
+{
+ A = countLeadingZeros(S:4);
+}
+
+#cntlzw. r0,r0 0x7c 00 00 35
+:cntlzw. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=26 & Rc=1
+{
+ A = countLeadingZeros(S:4);
+ cr0flags(A);
+}
+#===========================================================
+# CRxxx
+#===========================================================
+#crand lt,lt,lt 0x4c 00 02 02
+#crand 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 72 02
+:crand CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=257 & BIT_0=0
+{
+ setCrBit(CR_D,CR_D_CC,CC_OP & CC_B_OP);
+}
+
+#crandc lt,lt,lt 0x4c 00 01 02
+#crandc 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 02
+:crandc CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=129 & BIT_0=0
+{
+ tmp1:1 = !CC_B_OP;
+ setCrBit(CR_D,CR_D_CC,CC_OP & tmp1);
+}
+
+#creqv lt,lt,lt 0x4c 00 02 42
+#creqv 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 72 42
+:creqv CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=289 & BIT_0=0
+{
+ setCrBit(CR_D,CR_D_CC,CC_B_OP == CC_OP);
+}
+
+#crnand lt,lt,lt 0x4c 00 01 c2
+#crnand 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 c2
+:crnand CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=225 & BIT_0=0
+{
+ setCrBit(CR_D,CR_D_CC,!(CC_B_OP & CC_OP));
+}
+
+#crnor lt,lt,lt 0x4c 00 00 42
+#crnor 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 70 42
+:crnor CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=33 & BIT_0=0
+{
+ setCrBit(CR_D,CR_D_CC,!(CC_B_OP | CC_OP));
+}
+
+#cror lt,lt,lt 0x4c 00 03 82
+#cror 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 73 82
+:cror CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=449 & BIT_0=0
+{
+ setCrBit(CR_D,CR_D_CC,(CC_B_OP | CC_OP));
+}
+
+#crorc lt,lt,lt 0x4c 00 03 42
+#crorc 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 73 42
+:crorc CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=417 & BIT_0=0
+{
+ setCrBit(CR_D,CR_D_CC,(CC_B_OP | (!CC_OP)));
+}
+
+#crxor lt,lt,lt 0x4c 00 01 82
+#crxor 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 82
+:crxor CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=193 & BIT_0=0
+{
+ setCrBit(CR_D,CR_D_CC,(CC_B_OP ^ CC_OP));
+}
+
+@ifndef IS_ISA
+# replace with dci command in ISA
+#dccci 0,r0 0x7c 00 03 8c
+:dccci RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=454 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ dataCacheCongruenceClassInvalidate(ea);
+}
+@endif
+
+#===========================================================
+# DIVxx
+#===========================================================
+
+@ifdef BIT_64
+#divd r0,r0,r0 0x7c 00 03 d2
+:divd D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=489 & Rc=0
+{
+ D = A s/ B;
+}
+
+#divd. r0,r0,r0 0x7c 00 03 d3
+:divd. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=489 & Rc=1
+{
+ D = A s/ B;
+ cr0flags(D);
+}
+
+#divdo r0,r0,r0 0x7c 00 07 d2
+:divdo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=489 & Rc=0
+{
+ D = A s/ B;
+ divOverflow(A,B);
+}
+
+#divdo. r0,r0,r0 0x7c 00 07 d3
+:divdo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=489 & Rc=1
+{
+ D = A s/ B;
+ divOverflow(A,B);
+ cr0flags(D);
+}
+
+######################
+#divdu r0,r0,r0 0x7c 00 03 92
+:divdu D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=457 & Rc=0
+{
+ D = A / B;
+}
+
+#divdu. r0,r0,r0 0x7c 00 03 93
+:divdu. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=457 & Rc=1
+{
+ D = A / B;
+ cr0flags(D);
+}
+
+#divduo r0,r0,r0 0x7c 00 07 92
+:divduo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=457 & Rc=0
+{
+ D = A / B;
+ divZero(A,B);
+}
+
+#divduo. r0,r0,r0 0x7c 00 07 93
+:divduo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=457 & Rc=1
+{
+ D = A / B;
+ divZero(A,B);
+ cr0flags(D);
+}
+@endif
+
+#############################3
+#divw r0,r0,r0 0x7c 00 03 d6
+:divw D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=491 & Rc=0
+{
+@ifdef BIT_64
+ D = sext(A:4 s/ B:4);
+@else
+ D = A s/ B;
+@endif
+}
+
+#divw. r0,r0,r0 0x7c 00 03 d7
+:divw. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=491 & Rc=1
+{
+@ifdef BIT_64
+ D = sext(A:4 s/ B:4);
+ divOverflow(A:4,B:4);
+ cr0flags(D:4);
+@else
+ D = A s/ B;
+ divOverflow(A,B);
+ cr0flags(D);
+@endif
+}
+
+#divwo r0,r0,r0 0x7c 00 07 d6
+:divwo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=491 & Rc=0
+{
+@ifdef BIT_64
+ D = sext(A:4 s/ B:4);
+ divOverflow(A:4,B:4);
+@else
+ D = A s/ B;
+ divOverflow(A,B);
+@endif
+}
+
+#divwo. r0,r0,r0 0x7c 00 07 d7
+:divwo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=491 & Rc=1
+{
+@ifdef BIT_64
+ D = sext(A:4 s/ B:4);
+ divOverflow(A:4,B:4);
+ cr0flags(D:4);
+@else
+ D = A s/ B;
+ divOverflow(A,B);
+ cr0flags(D);
+@endif
+}
+
+#########################
+#divwu r0,r0,r0 0x7c 00 03 96
+:divwu D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=459 & Rc=0
+{
+@ifdef BIT_64
+ D = zext(A:4) / zext(B:4);
+@else
+ D = A / B;
+@endif
+}
+
+#divwu. r0,r0,r0 0x7c 00 03 97
+:divwu. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=459 & Rc=1
+{
+@ifdef BIT_64
+ D = zext(A:4) / zext(B:4);
+ cr0flags(D:4);
+@else
+ D = A / B;
+ cr0flags(D);
+@endif
+}
+
+#divwuo r0,r0,r0 0x7c 00 07 96
+:divwuo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=459 & Rc=0
+{
+@ifdef BIT_64
+ D = zext(A:4) / zext(B:4);
+ divZero(A:4,B:4);
+@else
+ D = A / B;
+ divZero(A,B);
+@endif
+}
+
+#divwuo. r0,r0,r0 0x7c 00 07 97
+:divwuo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=459 & Rc=1
+{
+@ifdef BIT_64
+ D = zext(A:4) / zext(B:4);
+ divZero(A:4,B:4);
+ cr0flags(D:4);
+@else
+ D = A / B;
+ divZero(A,B);
+ cr0flags(D);
+@endif
+}
+
+#===========================================================
+# ECxxx,EIxxx
+#===========================================================
+#eciwx r0,r0,r0 0x7c 00 02 6c
+:eciwx D,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & D & B & RA_OR_ZERO & XOP_1_10=310 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ D = externalControlIn(ea);
+}
+
+#ecowx r0,r0,r0 0x7c 00 03 6c
+:ecowx S,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & S & B & RA_OR_ZERO & XOP_1_10=438 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ externalControlOut(ea, S);
+}
+
+#===========================================================
+# EQVx
+#===========================================================
+#eqv r0,r0,r0 0x7c 00 02 38
+:eqv A,S,B is OP=31 & S & A & B & XOP_1_10=284 & Rc=0
+{
+ A = ~(S ^ B);
+}
+
+#eqv. r0,r0,r0 0x7c 00 02 39
+:eqv. A,S,B is OP=31 & S & A & B & XOP_1_10=284 & Rc=1
+{
+ A = ~(S ^ B);
+ cr0flags(A);
+}
+
+#===========================================================
+# EXTSBx
+#===========================================================
+#extsb r0,r0 0x7c 00 07 74
+:extsb A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=954 & Rc=0
+{
+ A = sext(S:1);
+}
+
+#extsb. r0,r0 0x7c 00 07 75
+:extsb. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=954 & Rc=1
+{
+ A = sext(S:1);
+ cr0flags(A);
+}
+
+#===========================================================
+# EXTSHx
+#===========================================================
+#extsh r0,r0 0x7c 00 07 34
+:extsh A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=922 & Rc=0
+{
+ A = sext(S:2);
+}
+
+#extsh. r0,r0 0x7c 00 07 35
+:extsh. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=922 & Rc=1
+{
+ A = sext(S:2);
+ cr0flags(A);
+}
+
+@ifdef BIT_64
+#extsw r0,r0 0x7c 00 07 b4
+:extsw A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=986 & Rc=0
+{
+ A = sext(S:4);
+}
+
+#extsw. r0,r0 0x7c 00 07 b5
+:extsw. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=986 & Rc=1
+{
+ A = sext(S:4);
+ cr0flags(A);
+}
+@endif
+
+#===========================================================
+# FABSx
+#===========================================================
+#fabs fr,f1r 0xfc 00 02 10
+:fabs fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=264 & Rc=0
+{
+ fD = abs(fB);
+}
+
+#fabs. fr0,fr1 0xfc 00 02 11
+:fabs. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=264 & Rc=1
+{
+ fD = abs(fB);
+ cr1flags();
+}
+#fadd fr0,fr0,fr0 0xfc 00 00 2a
+:fadd fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=0
+{
+ fD = fA f+ fB;
+ setFPAddFlags(fA,fB,fD);
+}
+
+#fadd. fr0,fr0,fr0 0xfc 00 00 2b
+:fadd. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=1
+{
+ fD = fA f+ fB;
+ setFPAddFlags(fA,fB,fD);
+ cr1flags();
+}
+
+#fadds fr0,fr0,fr0 0xec 00 00 2a
+:fadds fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=0 & ps1T
+{
+ tmp:4 = float2float(fA f+ fB);
+ fD = float2float(tmp);
+ setFPAddFlags(fA,fB,fD);
+ ps1T = fD;
+}
+
+#fadds. fr0,fr0,fr0 0xec 00 00 2b
+:fadds. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=1 & ps1T
+{
+ tmp:4 = float2float(fA f+ fB);
+ fD = float2float(tmp);
+ setFPAddFlags(fA,fB,fD);
+ ps1T = fD;
+ cr1flags();
+}
+
+#===========================================================
+# FCFIDx
+#===========================================================
+#fcfid fr0,fr0 0xfc 00 06 9c
+:fcfid fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=846 & Rc=0
+{
+ fD = int2float(fB);
+}
+
+#fcfid. fr0,fr0 0xfc 00 06 9d
+:fcfid. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=846 & Rc=1
+{
+ fD = int2float(fB);
+ setFPRF(fD);
+# fp_fr = intToFloatRoundedUp(fB);
+# fp_fi = intToFloatInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#===========================================================
+# FCMPO
+#===========================================================
+#fcmpo fr0,fr0,fr0 0xfc 00 00 40
+:fcmpo CRFD,fA,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=32 & BIT_0=0
+{
+ tmp:1 = nan(fA) | nan(fB);
+ fp_cc0 = (fA f< fB);
+ fp_cc1 = (fA f> fB);
+ fp_cc2 = (fA f== fB);
+ CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp;
+}
+#fcmpu fr0,fr0,fr0 0xfc 00 00 00
+:fcmpu CRFD,fA,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=0 & BIT_0=0
+{
+ tmp:1 = nan(fA) | nan(fB);
+ fp_cc0 = (fA f< fB);
+ fp_cc1 = (fA f> fB);
+ fp_cc2 = (fA f== fB);
+ CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp;
+}
+
+#fctid fr0,fr0 0xfc 00 06 5c
+:fctid fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=814 & Rc=0
+{
+# fp_fr = floatToIntRoundedUp(fB);
+# fp_fi = floatToIntInexact(fB);
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB);
+# fp_xx = fp_xx | fp_fi;
+ fD = trunc(fB);
+}
+#fctid. fr0,fr0 0xfc 00 06 5d
+:fctid. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=814 & Rc=1
+{
+# fp_fr = floatToIntRoundedUp(fB);
+# fp_fi = floatToIntInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+# fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB);
+ setSummaryFPSCR();
+ cr1flags();
+ fD = trunc(fB);
+}
+#fctidz fr0,fr0 0xfc 00 06 5e
+:fctidz fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=815 & Rc=0
+{
+ fp_fr = 0;
+# fp_fi = floatToIntInexact(fB);
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB);
+ fp_xx = fp_xx | fp_fi;
+ fD = trunc(fB);
+}
+#fctidz. fr0,fr0 0xfc 00 06 5f
+:fctidz. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=815 & Rc=1
+{
+ fp_fr = 0;
+# fp_fi = floatToIntInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB);
+ setSummaryFPSCR();
+ cr1flags();
+ fD = trunc(fB);
+}
+
+#fctiw fr0,fr0 0xfc 00 00 1c
+:fctiw fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=14 & Rc=0
+{
+# fp_fr = floatToIntRoundedUp(fB);
+# fp_fi = floatToIntInexact(fB);
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB);
+ fp_xx = fp_xx | fp_fi;
+ local intres:4;
+ intres = trunc(fB);
+ fD = sext(intres);
+}
+#fctiw. fr0,fr0 0xfc 00 00 1d
+:fctiw. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=14 & Rc=1
+{
+# fp_fr = floatToIntRoundedUp(fB);
+# fp_fi = floatToIntInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB);
+ setSummaryFPSCR();
+ cr1flags();
+ local intres:4;
+ intres = trunc(fB);
+ fD = sext(intres);
+}
+#fctiwz fr0,fr0 0xfc 00 00 1e
+:fctiwz fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=15 & Rc=0
+{
+ fp_fr = 0;
+# fp_fi = floatToIntInexact(fB);
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB);
+ fp_xx = fp_xx | fp_fi;
+ local intres:4;
+ intres = trunc(fB);
+ fD = sext(intres);
+}
+#fctiwz. fr0,fr0 0xfc 00 00 1f
+:fctiwz. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=15 & Rc=1
+{
+ fp_fr = 0;
+# fp_fi = floatToIntInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB);
+ setSummaryFPSCR();
+ cr1flags();
+ local intres:4;
+ intres = trunc(fB);
+ fD = sext(intres);
+}
+
+#fdiv fr0,fr0,fr0 0xfc 00 00 24
+:fdiv fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=0
+{
+ fD = fA f/ fB;
+ setFPDivFlags(fA,fB,fD);
+}
+#fdiv. fr0,fr0,fr0 0xfc 00 00 25
+:fdiv. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=1
+{
+ fD = fA f/ fB;
+ setFPDivFlags(fA,fB,fD);
+ cr1flags();
+}
+
+#fdivs fr0,fr0,fr0 0xec 00 00 24
+:fdivs fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=0 & ps1T
+{
+ tmp:4 = float2float(fA f/ fB);
+ fD = float2float(tmp);
+ setFPDivFlags(fA,fB,fD);
+ ps1T = fD;
+}
+#fdivs. fr0,fr0,fr0 0xec 00 00 25
+:fdivs. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=1 & ps1T
+{
+ tmp:4 = float2float(fA f/ fB);
+ fD = float2float(tmp);
+ setFPDivFlags(fA,fB,fD);
+ ps1T = fD;
+ cr1flags();
+}
+
+#fmadd fr0,fr0,fr0,fr0 0xfc 00 00 3a
+:fmadd fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=29 & Rc=0
+{
+ tmp:8 = fA f* fC;
+ fD = tmp f+ fB;
+ setFPRF(fD);
+# fp_fr = floatMaddRoundedUp(fA, fC, fB);
+# fp_fi = floatMaddInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMaddOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMaddUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+}
+
+#fmadd. fr0,fr0,fr0,fr0 0xfc 00 00 3b
+:fmadd. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=29 & Rc=1
+{
+ tmp:8 = fA f* fC;
+ fD = tmp f+ fB;
+ setFPRF(fD);
+# fp_fr = floatMaddRoundedUp(fA, fC, fB);
+# fp_fi = floatMaddInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMaddOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMaddUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#fmadds fr0,fr0,fr0,fr0 0xec 00 00 3a
+:fmadds fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=29 & Rc=0 & ps1T
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f+ fB);
+ fD = float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMaddRoundedUp(fA, fC, fB);
+# fp_fi = floatMaddInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMaddOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMaddUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ ps1T = fD;
+}
+
+#fmadds. fr0,fr0,fr0,fr0 0xec 00 00 3b
+:fmadds. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=29 & Rc=1 & ps1T
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f+ fB);
+ fD = float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMaddRoundedUp(fA, fC, fB);
+# fp_fi = floatMaddInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMaddOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMaddUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ ps1T = fD;
+ cr1flags();
+}
+
+#fmr fr0,fr0 0xfc 00 00 90
+:fmr fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=72 & Rc=0
+{
+ fD = fB;
+}
+#fmr. fr0,fr0 0xfc 00 00 91
+:fmr. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=72 & Rc=1
+{
+ fD = fB;
+ cr1flags();
+}
+#fmsub fr0,fr0,fr0,fr0 0xfc 00 00 38
+:fmsub fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=28 & Rc=0
+{
+ tmp:8 = fA f* fC;
+ fD = tmp f- fB;
+ setFPRF(fD);
+# fp_fr = floatMsubRoundedUp(fA, fC, fB);
+# fp_fi = floatMsubInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMsubOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMsubUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+}
+
+#fmsub. fr0,fr0,fr0,fr0 0xfc 00 00 39
+:fmsub. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=28 & Rc=1
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f- fB);
+ fD = float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMsubRoundedUp(fA, fC, fB);
+# fp_fi = floatMsubInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMsubOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMsubUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#fmsubs fr0,fr0,fr0,fr0 0xec 00 00 38
+:fmsubs fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=28 & Rc=0 & ps1T
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f- fB);
+ fD = float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMsubRoundedUp(fA, fC, fB);
+# fp_fi = floatMsubInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMsubOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMsubUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ ps1T = fD;
+}
+
+#fmsubs. fr0,fr0,fr0,fr0 0xfc 00 00 39
+:fmsubs. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=28 & Rc=1 & ps1T
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f- fB);
+ fD = float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMsubRoundedUp(fA, fC, fB);
+# fp_fi = floatMsubInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMsubOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMsubUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ ps1T = fD;
+ cr1flags();
+}
+
+#fmul fr0,fr0,fr0 0xfc 00 00 32
+:fmul fD,fA,fC is $(NOTVLE) & OP=63 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=0
+{
+ fD = fA f* fC;
+ setFPMulFlags(fA,fC,fD);
+}
+#fmul. fr0,fr0,fr0 0xfc 00 00 33
+:fmul. fD,fA,fC is $(NOTVLE) & OP=63 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=1
+{
+ fD = fA f* fC;
+ setFPMulFlags(fA,fC,fD);
+ cr1flags();
+}
+
+#fmuls fr0,fr0,fr0 0xec 00 00 32
+:fmuls fD,fA,fC is $(NOTVLE) & OP=59 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=0 & ps1T
+{
+ tmp:4 = float2float(fA f* fC);
+ fD = float2float(tmp);
+ setFPMulFlags(fA,fC,fD);
+ ps1T = fD;
+}
+
+#fmuls. fr0,fr0,fr0 0xec 00 00 33
+:fmuls. fD,fA,fC is $(NOTVLE) & OP=59 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=1 & ps1T
+{
+ tmp:4 = float2float(fA f* fC);
+ fD = float2float(tmp);
+ setFPMulFlags(fA,fC,fD);
+ ps1T = fD;
+ cr1flags();
+}
+
+#fnabs fr0,fr0 0xfc 00 01 10
+:fnabs fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=136 & Rc=0
+{
+ fD = fB | 0x8000000000000000;
+}
+
+#fnabs. fr0,fr0 0xfc 00 01 11
+:fnabs. fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=136 & Rc=1
+{
+ fD = fB | 0x8000000000000000;
+ cr1flags();
+}
+
+#fneg fr0,fr0 0xfc 00 00 50
+:fneg fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=40 & Rc=0
+{
+ fD = f- fB;
+}
+
+#fneg. fr0,fr0 0xfc 00 00 51
+:fneg. fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=40 & Rc=1
+{
+ fD = f- fB;
+ cr1flags();
+}
+
+#fnmadd fr0,fr0,fr0,fr0 0xfc 00 00 3e
+:fnmadd fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=31 & Rc=0
+{
+ tmp:8 = fA f* fC;
+ fD = f- (tmp f+ fB);
+ setFPRF(fD);
+# fp_fr = floatMaddRoundedUp(fA, fC, fB);
+# fp_fi = floatMaddInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMaddOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMaddUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+}
+
+#fnmadd. fr0,fr0,fr0,fr0 0xfc 00 00 3f
+:fnmadd. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=31 & Rc=1
+{
+ tmp:8 = fA f* fC;
+ fD = f- (tmp f+ fB);
+ setFPRF(fD);
+# fp_fr = floatMaddRoundedUp(fA, fC, fB);
+# fp_fi = floatMaddInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMaddOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMaddUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#fnmadds fr0,fr0,fr0,fr0 0xec 00 00 3e
+:fnmadds fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=31 & Rc=0 & ps1T
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f+ fB);
+ fD = f- float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMaddRoundedUp(fA, fC, fB);
+# fp_fi = floatMaddInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMaddOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMaddUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ ps1T = fD;
+}
+
+#fnmadds. fr0,fr0,fr0,fr0 0xec 00 00 3f
+:fnmadds. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=31 & Rc=1 & ps1T
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f+ fB);
+ fD = f- float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMaddRoundedUp(fA, fC, fB);
+# fp_fi = floatMaddInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMaddOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMaddUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ ps1T = fD;
+ cr1flags();
+}
+
+#fnmsub fr0,fr0,fr0,fr0 0xfc 00 00 3c
+:fnmsub fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=30 & Rc=0
+{
+ tmp:8 = fA f* fC;
+ fD = f- (tmp f- fB);
+ setFPRF(fD);
+# fp_fr = floatMsubRoundedUp(fA, fC, fB);
+# fp_fi = floatMsubInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMsubOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMsubUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+}
+
+#fnmsub. fr0,fr0,fr0,fr0 0xfc 00 00 3d
+:fnmsub. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=30 & Rc=1
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f- fB);
+ fD = f- float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMsubRoundedUp(fA, fC, fB);
+# fp_fi = floatMsubInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMsubOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMsubUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#fnmsubs fr0,fr0,fr0,fr0 0xec 00 00 3c
+:fnmsubs fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=30 & Rc=0 & ps1T
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f- fB);
+ fD = f- float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMsubRoundedUp(fA, fC, fB);
+# fp_fi = floatMsubInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMsubOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMsubUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ ps1T = fD;
+}
+
+#fnmsubs. fr0,fr0,fr0,fr0 0xfc 00 00 3d
+:fnmsubs. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=30 & Rc=1 & ps1T
+{
+ tmp:8 = fA f* fC;
+ tmp2:4 = float2float(tmp f- fB);
+ fD = f- float2float(tmp2);
+ setFPRF(fD);
+# fp_fr = floatMsubRoundedUp(fA, fC, fB);
+# fp_fi = floatMsubInexact(fA,fC,fB);
+# fp_ox = fp_ox | floatMsubOverflow(fA,fC,fB);
+# fp_ux = fp_ux | floatMsubUnderflow(fA,fC,fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fA) | nan(fC) | nan(fB);
+# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, fB);
+# fp_vximz = fp_vximz | floatInfinityMulZero(fA,fC);
+ setSummaryFPSCR();
+ cr1flags();
+ ps1T = fD;
+}
+
+#fres fr0,fr0 0xec 00 00 30
+:fres fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=0 & ps1T
+{
+ one:8 = 1;
+ floatOne:8 = int2float(one);
+ tmp:4 = float2float(floatOne f/ fB);
+ fD = float2float(tmp);
+ setFPRF(fD);
+# fp_fr = floatDivRoundedUp(floatOne, fB);
+# fp_fi = floatDivInexact(floatOne, fB);
+# fp_ox = fp_ox | floatDivOverflow(floatOne, fB);
+# fp_ux = fp_ux | floatDivUnderflow(floatOne, fB);
+ fp_zx = fp_zx | (fB f== 0);
+ fp_vxsnan = fp_vxsnan | nan(fB);
+ setSummaryFPSCR();
+ ps1T = fD;
+}
+
+#fres. fr0,fr0 0xec 00 00 31
+:fres. fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=1 & ps1T
+{
+ one:8 = 1;
+ floatOne:8 = int2float(one);
+ tmp:4 = float2float(floatOne f/ fB);
+ fD = float2float(tmp);
+ setFPRF(fD);
+# fp_fr = floatDivRoundedUp(floatOne, fB);
+# fp_fi = floatDivInexact(floatOne, fB);
+# fp_ox = fp_ox | floatDivOverflow(floatOne, fB);
+# fp_ux = fp_ux | floatDivUnderflow(floatOne, fB);
+ fp_zx = fp_zx | (fB f== 0);
+ fp_vxsnan = fp_vxsnan | nan(fB);
+ setSummaryFPSCR();
+ ps1T = fD;
+ cr1flags();
+}
+
+#frsp fr0,fr0 0xfc 00 00 18
+:frsp fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=12 & Rc=0
+{
+ #zero:8 = 0;
+ #floatZero:8 = int2float(zero);
+ tmp:4 = float2float(fB);
+ fD = float2float(tmp);
+ setFPRF(fD);
+# fp_fr = floatAddRoundedUp(floatZero, fB);
+# fp_fi = floatAddInexact(floatZero, fB);
+# fp_ox = fp_ox | floatAddOverflow(floatZero, fB);
+# fp_ux = fp_ux | floatAddUnderflow(floatZero, fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+ setSummaryFPSCR();
+}
+
+#frsp. fr0,fr0 0xfc 00 00 19
+:frsp. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=12 & Rc=1
+{
+ #zero:8 = 0;
+ #floatZero:8 = int2float(zero);
+ tmp:4 = float2float(fB);
+ fD = float2float(tmp);
+ setFPRF(fD);
+# fp_fr = floatAddRoundedUp(floatZero, fB);
+# fp_fi = floatAddInexact(floatZero, fB);
+# fp_ox = fp_ox | floatAddOverflow(floatZero, fB);
+# fp_ux = fp_ux | floatAddUnderflow(floatZero, fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#frsqrte fr0,fr0 0xfc 00 00 34
+:frsqrte fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=26 & Rc=0
+{
+ one:8 = 1;
+ floatOne:8 = int2float(one);
+ tmpSqrt:8 = sqrt(fB);
+ fD = (floatOne f/ tmpSqrt);
+ setFPRF(fD);
+# fp_fr = floatDivRoundedUp(floatOne, tmpSqrt);
+# fp_fi = floatDivInexact(floatOne, tmpSqrt);
+# fp_ox = fp_ox | floatDivOverflow(floatOne, tmpSqrt);
+# fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpSqrt);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+ setSummaryFPSCR();
+}
+
+#frsqrte. fr0,fr0 0xfc 00 00 35
+:frsqrte. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=26 & Rc=1
+{
+ one:8 = 1;
+ floatOne:8 = int2float(one);
+ tmpSqrt:8 = sqrt(fB);
+ fD = (floatOne f/ tmpSqrt);
+ setFPRF(fD);
+# fp_fr = floatDivRoundedUp(floatOne, tmpSqrt);
+# fp_fi = floatDivInexact(floatOne, tmpSqrt);
+# fp_ox = fp_ox | floatDivOverflow(floatOne, tmpSqrt);
+# fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpSqrt);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+ fp_vxsqrt = fp_vxsqrt | sqrtInvalid(fB);
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#fsel f0r,fr0,fr0,fr0 0xfc 00 00 2e
+:fsel fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fB & fC & XOP_1_5=23 & Rc=0
+{
+ local tmpfA = fA;
+ local tmpfB = fB;
+ zero:4=0;
+ fD=fC;
+ if (tmpfA f>= int2float(zero)) goto inst_next;
+ fD=tmpfB;
+}
+
+#fsel. fr0,fr0,fr0,fr0 0xfc 00 00 2f
+:fsel. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fB & fC & XOP_1_5=23 & Rc=1
+{
+ local tmpfA = fA;
+ local tmpfB = fB;
+ zero:4=0;
+ fD=fC;
+ if (tmpfA f>= int2float(zero)) goto ;
+ fD=tmpfB;
+
+ cr1flags();
+}
+
+#fsqrt f0r,fr0 0xfc 00 00 2c
+:fsqrt fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=0
+{
+ fD = sqrt(fB);
+ setFPRF(fD);
+# fp_fr = floatSqrtRoundedUp(fB);
+# fp_fi = floatSqrtInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxsqrt = fp_vxsqrt | sqrtInvalid(fB);
+ setSummaryFPSCR();
+}
+
+#fsqrt. fr0,fr0 0xfc 00 00 2d
+:fsqrt. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=1
+{
+ fD = sqrt(fB);
+ setFPRF(fD);
+# fp_fr = floatSqrtRoundedUp(fB);
+# fp_fi = floatSqrtInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxsqrt = fp_vxsqrt | sqrtInvalid(fB);
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#fsqrts fr0,fr0 0xec 00 00 2c
+:fsqrts fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=0
+{
+ tmp:4 = float2float(sqrt(fB));
+ fD = float2float(tmp);
+ setFPRF(fD);
+# fp_fr = floatSqrtRoundedUp(fB);
+# fp_fi = floatSqrtInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxsqrt = fp_vxsqrt | sqrtInvalid(fB);
+ setSummaryFPSCR();
+}
+
+#fsqrts. fr0,fr0 0xec 00 00 2d
+:fsqrts. fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=1
+{
+ tmp:4 = float2float(sqrt(fB));
+ fD = float2float(tmp);
+ setFPRF(fD);
+# fp_fr = floatSqrtRoundedUp(fB);
+# fp_fi = floatSqrtInexact(fB);
+ fp_xx = fp_xx | fp_fi;
+ fp_vxsnan = fp_vxsnan | nan(fB);
+# fp_vxsqrt = fp_vxsqrt | sqrtInvalid(fB);
+ setSummaryFPSCR();
+ cr1flags();
+}
+
+#fsub fr0,fr0,fr0 0xfc 00 00 28
+:fsub fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=0
+{
+ fD = fA f- fB;
+ setFPSubFlags(fA,fB,fD);
+}
+
+#fsub. fr0,fr0,fr0 0xfc 00 00 29
+:fsub. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=1
+{
+ fD = fA f- fB;
+ setFPSubFlags(fA,fB,fD);
+ cr1flags();
+}
+
+#fsubs fr0,fr0,fr0 0xec 00 00 28
+:fsubs fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=0 & ps1T
+{
+ tmp:4 = float2float(fA f- fB);
+ fD = float2float(tmp);
+ setFPSubFlags(fA,fB,fD);
+ ps1T = fD;
+}
+
+#fsubs. fr0,fr0,fr0 0xec 00 00 29
+:fsubs. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=1 & ps1T
+{
+ tmp:4 = float2float(fA f- fB);
+ fD = float2float(tmp);
+ setFPSubFlags(fA,fB,fD);
+ ps1T = fD;
+ cr1flags();
+}
+
+@ifndef IS_ISA
+# iccci is just a special form of ici
+#iccci 0,r0 0x7c 00 07 8c
+:iccci RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=966 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ instructionCacheCongruenceClassInvalidate(ea);
+}
+@endif
+
+#icread 0,r0 0x7c 00 07 cc
+:icread RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=998 & BIT_0=0 & RA_OR_ZERO
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ instructionCacheRead(ea);
+}
+
+#lbz r0,3(0) 0x88 00 00 03
+#lbz r0,3(r2) 0x88 02 00 03
+:lbz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=34 & D & dPlusRaOrZeroAddress
+{
+ D = zext(*:1(dPlusRaOrZeroAddress));
+
+}
+
+#lbzu r0,3(r2) 0x8c 02 00 03
+:lbzu D,dPlusRaAddress is $(NOTVLE) & OP=35 & D & dPlusRaAddress & A
+{
+ A = dPlusRaAddress;
+ D = zext(*:1(A));
+
+}
+
+#lbzux r0,r2,r0 0x7c 02 00 ee
+:lbzux D,A,B is OP=31 & D & A & B & XOP_1_10=119 & BIT_0=0
+{
+ A = A+B;
+ D = zext(*:1(A));
+}
+
+#lbzx r0,r2,r0 0x7c 02 00 ae
+:lbzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=87 & BIT_0=0
+{
+ tmp:$(REGISTER_SIZE) = RA_OR_ZERO+B;
+ D = zext(*:1(tmp));
+}
+
+@ifdef BIT_64
+#ld r0,8(r2) 0xe8 02 00 08
+:ld D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=58 & D & dPlusRaOrZeroAddress & BITS_0_1=0
+{
+ D = *:8(dPlusRaOrZeroAddress);
+}
+
+##ldarx r0,r0,r0 0x7c 00 00 a8
+#:ldarx T,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & T & RA_OR_ZERO & B & XOP_1_10=84 & TX
+#{
+# ea = RA_OR_ZERO+B;
+# RESERVE = 1;
+# RESERVE_ADDRSS = ea;
+# T = *:8(ea);
+#}
+
+#ldu r0,8(r2) 0xe8 02 00 09
+:ldu D,dsPlusRaAddress is $(NOTVLE) & OP=58 & D & dsPlusRaAddress & A & BITS_0_1=1
+{
+ A = dsPlusRaAddress;
+ D = *:8(A);
+}
+
+#ldux r0,r2,r0 0x7c 02 00 6a
+:ldux D,A,B is OP=31 & D & A & B & XOP_1_10=53 & BIT_0=0
+{
+ A = A+B;
+ D = *:8(A);
+}
+
+@ifndef IS_ISA
+#ldarx r0,r2,r0 0x7c 02 00 2a
+:ldarx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=21 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO+B;
+ D = *:8(ea);
+}
+@endif
+@endif
+
+#lfd fr0,8(r2) 0xc8 02 00 08
+:lfd fD,dPlusRaOrZeroAddress is $(NOTVLE) & OP=50 & fD & dPlusRaOrZeroAddress
+{
+ fD = *:8(dPlusRaOrZeroAddress);
+}
+
+#lfdu fr0,8(r2) 0xcc 02 00 08
+:lfdu fD,dPlusRaAddress is $(NOTVLE) & OP=51 & fD & dPlusRaAddress & A
+{
+ A = dPlusRaAddress;
+ fD = *:8(A);
+}
+#lfdux fr0,r2,r0 0x7c 02 04 ee
+:lfdux fD,A,B is $(NOTVLE) & OP=31 & fD & A & B & XOP_1_10=631 & BIT_0=0
+{
+ A = A+B;
+ fD = *:8(A);
+}
+#lfdx fr0,r2,r0 0x7c 02 04 ae
+:lfdx fD,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fD & RA_OR_ZERO & B & XOP_1_10=599 & BIT_0=0
+{
+ fD = *:8(RA_OR_ZERO+B);
+}
+
+#lfs fr0,8(r2) 0xc0 02 00 08
+:lfs fD,dPlusRaOrZeroAddress is $(NOTVLE) & OP=48 & fD & dPlusRaOrZeroAddress & ps1T
+{
+ fD = float2float(*:4(dPlusRaOrZeroAddress));
+ ps1T = fD;
+}
+#lfsu fr0,8(r2) 0xc0 02 00 08
+:lfsu fD,dPlusRaAddress is $(NOTVLE) & OP=49 & fD & dPlusRaAddress & A & ps1T
+{
+ A = dPlusRaAddress;
+ fD = float2float(*:4(A));
+ ps1T = fD;
+}
+
+#lfsux fr0,r2,r0 0x7c 02 04 6e
+:lfsux fD,A,B is $(NOTVLE) & OP=31 & fD & A & B & XOP_1_10=567 & BIT_0=0 & ps1T
+{
+ A = A+B;
+ fD = float2float(*:4(A));
+ ps1T = fD;
+}
+#lfsx fr0,r2,r0 0x7c 02 04 2e
+:lfsx fD,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fD & RA_OR_ZERO & B & XOP_1_10=535 & BIT_0=0 & ps1T
+{
+ fD = float2float(*:4(RA_OR_ZERO+B));
+ ps1T = fD;
+}
+#lha r0,4(0) 0xa8 00 00 04
+#lha r0,4(r2) 0xa8 02 00 04
+:lha D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=42 & D & dPlusRaOrZeroAddress
+{
+ D = sext(*:2(dPlusRaOrZeroAddress));
+
+}
+#lhau r0,8(r2) 0xac 02 00 08
+:lhau D,dPlusRaAddress is $(NOTVLE) & OP=43 & D & dPlusRaAddress & A
+{
+ A = dPlusRaAddress;
+ D = sext(*:2(A));
+}
+#lhaux r0,r2,r0 0x7c 02 02 ee
+:lhaux D,A,B is OP=31 & D & A & B & XOP_1_10=375 & BIT_0=0
+{
+ A = A+B;
+ D = sext(*:2(A));
+}
+#lhax r0,r2,r0 0x7c 02 02 ae
+:lhax D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=343 & BIT_0=0
+{
+ D = sext(*:2(RA_OR_ZERO+B));
+}
+
+#lhbrx r0,r2,r0 0x7c 02 06 2c
+:lhbrx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=790 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO+B;
+ tmp:$(REGISTER_SIZE) = zext(*:1(ea+1)) << 8;
+ D = tmp | zext(*:1(ea));
+}
+
+#lhz r0,4(0) 0xa0 00 00 04
+#lhz r0,4(r2) 0xa0 02 00 04
+:lhz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=40 & D & dPlusRaOrZeroAddress
+{
+ D = zext(*:2(dPlusRaOrZeroAddress));
+
+}
+
+#lhzu r0,4(r2) 0xa4 02 00 04
+:lhzu D,dPlusRaAddress is $(NOTVLE) & OP=41 & D & dPlusRaAddress & A
+{
+ A = dPlusRaAddress;
+ D = zext(*:2(A));
+}
+
+#lhzux r0,r2,r0 0x7c 02 02 6e
+:lhzux D,A,B is OP=31 & D & A & B & XOP_1_10=311 & BIT_0=0
+{
+ A = A+B;
+ D = zext(*:2(A));
+}
+#lhzx r0,r2,r0 0x7c 02 02 2e
+:lhzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=279 & BIT_0=0
+{
+ D = zext(*:2(RA_OR_ZERO+B));
+}
+
+# big stuffs
+@include "lmwInstructions.sinc"
+
+@include "lswInstructions.sinc"
+
+#lswx r0,0,r0 0x7c 00 3c 2a
+#lswx r0,r2,40 0x7c 02 3c 2a
+define pcodeop lswxOp;
+:lswx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & NB & BITS_21_25 & B & XOP_1_10=533 & BIT_0=0
+{
+ D = lswxOp(D,RA_OR_ZERO,B);
+}
+@ifdef BIT_64
+#lwa r0,8(r2) 0xe8 02 00 0a
+:lwa D,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=58 & D & dsPlusRaOrZeroAddress & BITS_0_1=2
+{
+ D = sext(*:4(dsPlusRaOrZeroAddress));
+}
+@endif
+
+#lwarx r0,r0,r0 0x7c 00 00 28
+:lwarx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=20 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO+B;
+ #RESERVE = 1;
+ #RESERVE_ADDRSS:$(REGISTER_SIZE) = ea;
+@ifdef BIT_64
+ D = zext(*:4(ea));
+@else
+ D = *:4(ea);
+@endif
+}
+
+@ifdef BIT_64
+#lwaux r0,r2,r0 0x7c 02 02 ea
+:lwaux D,A,B is OP=31 & D & A & B & XOP_1_10=373 & BIT_0=0
+{
+ A = A+B;
+ D = sext(*:4(A));
+}
+#lwax r0,r2,r0 0x7c 02 02 aa
+:lwax D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=341 & BIT_0=0
+{
+ D = sext(*:4(RA_OR_ZERO));
+}
+@endif
+
+#lwbrx r0,r2,r0 0x7c 02 04 2c
+:lwbrx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=534 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO+B;
+ tmp1:$(REGISTER_SIZE) = zext(*:1(ea+3)) << 24;
+ tmp2:$(REGISTER_SIZE) = zext(*:1(ea+2)) << 16;
+ tmp3:$(REGISTER_SIZE) = zext(*:1(ea+1)) << 8;
+ D = tmp1 | tmp2 | tmp3 | zext(*:1(ea));
+}
+#lwz r0,4(0) 0x80 00 00 04
+#lwz r0,4(r2) 0x80 02 00 04
+:lwz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=32 & D & dPlusRaOrZeroAddress
+{
+@ifdef BIT_64
+ D = zext(*:4(dPlusRaOrZeroAddress));
+@else
+ D = *:4(dPlusRaOrZeroAddress);
+@endif
+}
+
+#lwzu r0,4(r2) 0x84 02 00 04
+:lwzu D,dPlusRaAddress is $(NOTVLE) & OP=33 & D & dPlusRaAddress & A
+{
+ A = dPlusRaAddress;
+@ifdef BIT_64
+ D = zext(*:4(A));
+@else
+ D = *:4(A);
+@endif
+}
+
+#lwzux r0,r2,r0 0x7c 02 00 6e
+:lwzux D,A,B is OP=31 & D & A & B & XOP_1_10=55 & BIT_0=0
+{
+ A = A+B;
+@ifdef BIT_64
+ D = zext(*:4(A));
+@else
+ D = *:4(A);
+@endif
+
+}
+#lwzx r0,r2,r0 0x7c 02 00 2e
+:lwzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=23 & BIT_0=0
+{
+@ifdef BIT_64
+ D = zext(*:4(RA_OR_ZERO+B));
+@else
+ D = *:4(RA_OR_ZERO+B);
+@endif
+}
+
+#mcrf cr0,cr0 0x4c 00 00 00
+:mcrf CRFD,CRFS is $(NOTVLE) & OP=19 & CRFD & BITS_21_22=0 & CRFS & BITS_0_17=0
+{
+ CRFD = CRFS;
+}
+
+#mcrfs cr0,cr0 0xfc 00 00 80
+:mcrfs CRFD,CRFS is $(NOTVLE) & OP=63 & CRFD & FPSCR_CRFS & BITS_21_22=0 & CRFS & BITS_11_17=0 & XOP_1_10=64 & BIT_0=0
+{
+ CRFD = FPSCR_CRFS;
+}
+
+#mcrxr cr0 0x7c 00 04 00
+:mcrxr CRFD is OP=31 & CRFD & BITS_11_22=0 & XOP_1_10=512 & BIT_0=0
+{
+ CRFD = (xer_so & 1) << 3 | (xer_ov & 1) << 2 | (xer_ca & 1) << 1;
+}
+
+#mfcr r0 0x7c 00 00 26
+:mfcr D is OP=31 & D & BITS_11_20=0 & XOP_1_10=19 & BIT_0=0
+{
+ tmp:4 = zext(cr0 & 0xf) << 28 |
+ zext(cr1 & 0xf) << 24 |
+ zext(cr2 & 0xf) << 20 |
+ zext(cr3 & 0xf) << 16 |
+ zext(cr4 & 0xf) << 12 |
+ zext(cr5 & 0xf) << 8 |
+ zext(cr6 & 0xf) << 4 |
+ zext(cr7 & 0xf);
+@ifdef BIT_64
+ D = zext(tmp);
+@else
+ D = tmp;
+@endif
+}
+
+#mfocrf D,cr1 0x7c 31 00 26
+:mfocrf D,CRM_CR is OP=31 & D & BIT_20=1 & CRM_CR & BIT_11=0 & XOP_1_10=19 & BIT_0=0
+{
+@ifdef BIT_64
+ D = zext(CRM_CR);
+@else
+ D = CRM_CR;
+@endif
+}
+
+#mffs fD 0xfc 00 04 8e
+:mffs fD is $(NOTVLE) & OP=63 & fD & BITS_11_20=0 & XOP_1_10=583 & Rc=0
+{
+ tmp:4 = 0;
+ packFPSCR(tmp);
+ fD = zext(tmp);
+}
+
+#mffs. fD 0xfc 00 04 8f
+:mffs. fD is $(NOTVLE) & OP=63 & fD & BITS_11_20=0 & XOP_1_10=583 & Rc=1
+{
+ tmp:4 = 0;
+ packFPSCR(tmp);
+ fD = zext(tmp);
+ cr1flags();
+}
+
+### is this pcode correct on 64-bit bridge?
+#mfsr r0,r0 0x7c 00 04 a6
+:mfsr D,B is $(NOTVLE) & OP=31 & D & SR & BIT_20=0 & B & BITS_11_15=0 & XOP_1_10=595 & BIT_0=0
+{
+@ifdef BIT_64
+ D = zext(SR);
+@else
+ D = SR;
+@endif
+}
+#mfsrin r0,r0 0x7c 00 05 26
+:mfsrin D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=659 & BIT_0=0
+{
+@ifdef BIT_64
+ tmp:4 = (B:4 >> 28);
+@else
+ tmp:$(REGISTER_SIZE) = (B >> 28);
+@endif
+ D = *[register]:4 ($(SEG_REGISTER_BASE)+tmp);
+}
+
+#mtcrf 10,r0 0x7c 01 01 20
+:mtcrf CRM,S is OP=31 & S & BIT_20=0 & CRM & CRM0 & CRM1 & CRM2 & CRM3 & CRM4 & CRM5 & CRM6 & CRM7 & BIT_11=0 & XOP_1_10=144 & BIT_0=0
+{
+ tmp:$(REGISTER_SIZE) = (S >> 28) & 0xf;
+ cr0 = (cr0 * (CRM0:1 == 0)) | (tmp:1 * (CRM0:1 == 1));
+
+ tmp = (S >> 24) & 0xf;
+ cr1 = (cr1 * (CRM1:1 == 0)) | (tmp:1 * (CRM1:1 == 1));
+
+ tmp = (S >> 20) & 0xf;
+ cr2 = (cr2 * (CRM2:1 == 0)) | (tmp:1 * (CRM2:1 == 1));
+
+ tmp = (S >> 16) & 0xf;
+ cr3 = (cr3 * (CRM3:1 == 0)) | (tmp:1 * (CRM3:1 == 1));
+
+ tmp = (S >> 12) & 0xf;
+ cr4 = (cr4 * (CRM4:1 == 0)) | (tmp:1 * (CRM4:1 == 1));
+
+ tmp = (S >> 8) & 0xf;
+ cr5 = (cr5 * (CRM5:1 == 0)) | (tmp:1 * (CRM5:1 == 1));
+
+ tmp = (S >> 4) & 0xf;
+ cr6 = (cr6 * (CRM6:1 == 0)) | (tmp:1 * (CRM6:1 == 1));
+
+ tmp = S & 0xf;
+ cr7 = (cr7 * (CRM7:1 == 0)) | (tmp:1 * (CRM7:1 == 1));
+}
+
+#mtfsb0 fp_ux 0xfc 80 00 8c
+:mtfsb0 CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=70 & Rc=0
+{
+ CRBD = 0;
+}
+#mtfsb0. fp_ux 0xfc 80 00 8d
+:mtfsb0. CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=70 & Rc=1
+{
+ CRBD = 0;
+ cr1flags();
+}
+#mtfsb1 fp_ux 0xfc 80 00 4c
+:mtfsb1 CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=38 & Rc=0
+{
+ CRBD = 1;
+}
+#mtfsb1. fp_ux 0xfc 80 00 4d
+:mtfsb1. CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=38 & Rc=1
+{
+ CRBD = 1;
+}
+
+#mtfsf 10,fr0 0xfc 00 05 8e
+:mtfsf FM,fB is $(NOTVLE) & OP=63 & BIT_25=0 & FM & FM0 & FM1 & FM2 & FM3 & FM4 & FM5 & FM6 & FM7 & BIT_16=0 & fB & XOP_1_10=711 & Rc=0
+{
+ tmp:4 = 0;
+ packFPSCR(tmp);
+
+ mask0:4 = zext((FM0:1 == 1)* 0xf) << 28;
+ mask1:4 = zext((FM1:1 == 1)* 0xf) << 24;
+ mask2:4 = zext((FM2:1 == 1)* 0xf) << 20;
+ mask3:4 = zext((FM3:1 == 1)* 0xf) << 16;
+ mask4:4 = zext((FM4:1 == 1)* 0xf) << 12;
+ mask5:4 = zext((FM5:1 == 1)* 0xf) << 8;
+ mask6:4 = zext((FM6:1 == 1)* 0xf) << 4;
+ mask7:4 = zext((FM7:1 == 1)* 0xf);
+
+ mask:4 = mask0 | mask1 | mask2 | mask3 | mask4 | mask5 | mask6 | mask7;
+
+ tmp1:4 = fB:4;
+ tmp2:4 = (tmp & ~mask) | (tmp1 & mask);
+ unpackFPSCR(tmp2);
+}
+
+#mtfsf. 10,fr0 0xfc 00 05 8f
+:mtfsf. FM,fB is $(NOTVLE) & OP=63 & BIT_25=0 & FM & FM0 & FM1 & FM2 & FM3 & FM4 & FM5 & FM6 & FM7 & BIT_16=0 & fB & XOP_1_10=711 & Rc=1
+{
+ tmp:4 = 0;
+ packFPSCR(tmp);
+
+ mask0:4 = zext((FM0:1 == 1)* 0xf) << 28;
+ mask1:4 = zext((FM1:1 == 1)* 0xf) << 24;
+ mask2:4 = zext((FM2:1 == 1)* 0xf) << 20;
+ mask3:4 = zext((FM3:1 == 1)* 0xf) << 16;
+ mask4:4 = zext((FM4:1 == 1)* 0xf) << 12;
+ mask5:4 = zext((FM5:1 == 1)* 0xf) << 8;
+ mask6:4 = zext((FM6:1 == 1)* 0xf) << 4;
+ mask7:4 = zext((FM7:1 == 1)* 0xf);
+
+ mask:4 = mask0 | mask1 | mask2 | mask3 | mask4 | mask5 | mask6 | mask7;
+
+ tmp1:4 = fB:4;
+ tmp2:4 = (tmp & ~mask) | (tmp1 & mask);
+ unpackFPSCR(tmp2);
+ cr1flags();
+}
+
+#mtfsfi 10,3 0xfc 00 01 0c
+:mtfsfi crfD,IMM is $(NOTVLE) & OP=63 & crfD & BITS_16_22=0 & IMM & BIT_11=0 & XOP_1_10=134 & Rc=0
+{
+ tmp:4 = 0;
+ packFPSCR(tmp);
+ shift:1 = 28-(crfD*4);
+ mask:4 = 0xf << shift;
+ tmp1:4 = IMM << shift;
+ tmp2:4 = (tmp & ~mask) | tmp1;
+ unpackFPSCR(tmp2);
+}
+
+#mtfsfi. 10,3 0xfc 00 01 0d
+:mtfsfi. crfD,IMM is $(NOTVLE) & OP=63 & crfD & BITS_16_22=0 & IMM & BIT_11=0 & XOP_1_10=134 & Rc=1
+{
+ tmp:4 = 0;
+ packFPSCR(tmp);
+ shift:1 = 28-(crfD*4);
+ mask:4 = 0xf << shift;
+ tmp1:4 = IMM << shift;
+ tmp2:4 = (tmp & ~mask) | tmp1;
+ unpackFPSCR(tmp2);
+ cr1flags();
+}
+
+# This instruction is not exclusive to 64 bit processors, per page 1259 of the PowerISA manual.
+# However, it does seem to require 64 bit registers, so it is currently restricted to 64 bit machines.
+@ifdef BIT_64
+#mtmsrd r0,0 0x7c 00 01 64
+:mtmsrd S,0 is $(NOTVLE) & OP=31 & S & BITS_17_20=0 & MSR_L=0 & BITS_11_15=0 & XOP_1_10=178 & BIT_0=0
+{
+ bit0:8 = S >> 63 & 1;
+ bit1:8 = S >> 62 & 1;
+ bit49:8 = (S >> 14)& 1;
+ bit59:8 = (S >> 4) & 1;
+ tmp:8 = S & 0x6fffffffffff6fcf;
+ tmp = tmp & ((bit0 | bit1) << 63);
+ tmp = tmp & ((bit59 | bit49) << 5);
+ MSR = MSR & 0xefffffff00009020 | tmp;
+}
+
+#mtmsrd r0,1 0x7c 01 01 64
+:mtmsrd S,1 is $(NOTVLE) & OP=31 & S & BITS_17_20=0 & MSR_L=1 & BITS_11_15=0 & XOP_1_10=178 & BIT_0=0
+{
+ mask:8 = 0x000000000000fffe & S;
+ MSR = (MSR & ~mask) | (S & mask);
+}
+@endif
+#mtocrf 10,r0 0x7c 21 01 20
+:mtocrf CRM,S is OP=31 & S & BIT_20=1 & CRM & CRM0 & CRM1 & CRM2 & CRM3 & CRM4 & CRM5 & CRM6 & CRM7 & BIT_11=0 & XOP_1_10=144 & BIT_0=0
+{
+ tmp:$(REGISTER_SIZE) = (S >> 28) & 0xf;
+ cr0 = (cr0 * (CRM0:1 == 0)) | (tmp:1 * (CRM0:1 == 1));
+
+ tmp = (S >> 24) & 0xf;
+ cr1 = (cr1 * (CRM1:1 == 0)) | (tmp:1 * (CRM1:1 == 1));
+
+ tmp = (S >> 20) & 0xf;
+ cr2 = (cr2 * (CRM2:1 == 0)) | (tmp:1 * (CRM2:1 == 1));
+
+ tmp = (S >> 16) & 0xf;
+ cr3 = (cr3 * (CRM3:1 == 0)) | (tmp:1 * (CRM3:1 == 1));
+
+ tmp = (S >> 12) & 0xf;
+ cr4 = (cr4 * (CRM4:1 == 0)) | (tmp:1 * (CRM4:1 == 1));
+
+ tmp = (S >> 8) & 0xf;
+ cr5 = (cr5 * (CRM5:1 == 0)) | (tmp:1 * (CRM5:1 == 1));
+
+ tmp = (S >> 4) & 0xf;
+ cr6 = (cr6 * (CRM6:1 == 0)) | (tmp:1 * (CRM6:1 == 1));
+
+ tmp = S & 0xf;
+ cr7 = (cr7 * (CRM7:1 == 0)) | (tmp:1 * (CRM7:1 == 1));
+}
+
+### is this pcode correct on 64-bit bridge?
+#mtsr sr0,r0 0x7c 00 01 a4
+:mtsr SR,S is $(NOTVLE) & OP=31 & S & SR & BIT_20=0 & B & BITS_11_15=0 & XOP_1_10=210 & BIT_0=0
+{
+@ifdef BIT_64
+ SR = S:4;
+@else
+ SR = S;
+@endif
+}
+
+#mtsrd sr0,r0 0x7c 00 0 a4
+:mtsrd SR,S is $(NOTVLE) & OP=31 & S & BIT_20=0 & SR & BITS_11_15=0 & XOP_1_10=82 & BIT_0=0
+{
+ SR = S:4;
+}
+
+#mtsrdin r0,r0 0x7c 00 00 e4
+:mtsrdin S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=114 & BIT_0=0
+{
+ local tmp = (B >> 28) & 0xf;
+ *[register]:4 ($(SEG_REGISTER_BASE)+tmp:4) = S:4;
+}
+
+### is this pcode correct on 64-bit bridge?
+#mtsrin r0,r0 0x7c 00 01 e4
+:mtsrin S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=242 & BIT_0=0
+{
+@ifdef BIT_64
+ tmp:4 = (B:4 >> 28);
+@else
+ tmp:$(REGISTER_SIZE) = (B >> 28);
+@endif
+ *[register]:4 ($(SEG_REGISTER_BASE)+tmp) = S;
+}
+
+@ifdef BIT_64
+#mulhd r0,r0 0x7c 00 00 92
+:mulhd D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=73 & Rc=0
+{
+ tmp:16 = sext(A) * sext(B);
+ D = tmp(8);
+}
+#mulhd. r0,r0 0x7c 00 00 93
+:mulhd. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=73 & Rc=1
+{
+ tmp:16 = sext(A) * sext(B);
+ D = tmp(8);
+ cr0flags(D);
+}
+
+#mulhdu r0,r0 0x7c 00 00 12
+:mulhdu D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=9 & Rc=0
+{
+ tmp:16 = zext(A) * zext(B);
+ D = tmp(8);
+}
+#mulhdu. r0,r0 0x7c 00 00 13
+:mulhdu. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=9 & Rc=1
+{
+ tmp:16 = zext(A) * zext(B);
+ D = tmp(8);
+ cr0flags(D);
+}
+
+@endif
+
+#mulhw r0,r0,r0 0x7c 00 00 96
+:mulhw D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=75 & Rc=0
+{
+@ifdef BIT_64
+ tmp:8 = sext(A:4) * sext(B:4);
+ tmp2:4 = tmp(4);
+ D = sext(tmp2);
+@else
+ tmp:8 = sext(A) * sext(B);
+ D = tmp(4);
+@endif
+}
+
+#mulhw. r0,r0,r0 0x7c 00 00 97
+:mulhw. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=75 & Rc=1
+{
+@ifdef BIT_64
+ tmp:8 = sext(A:4) * sext(B:4);
+ tmp2:4 = tmp(4);
+ D = sext(tmp2);
+@else
+ tmp:8 = sext(A) * sext(B);
+ D = tmp(4);
+@endif
+ cr0flags(D);
+}
+
+#mulhwu r0,r0,r0 0x7c 00 00 16
+:mulhwu D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=11 & Rc=0
+{
+@ifdef BIT_64
+ tmp:8 = zext(A:4) * zext(B:4);
+ tmp2:4 = tmp(4);
+ D=zext(tmp2);
+@else
+ tmp:8 = zext(A) * zext(B);
+ D = tmp(4);
+@endif
+}
+#mulhwu. r0,r0,r0 0x7c 00 00 17
+:mulhwu. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=11 & Rc=1
+{
+@ifdef BIT_64
+ tmp:8 = zext(A:4) * zext(B:4);
+ tmp2:4 = tmp(4);
+ D=zext(tmp2);
+@else
+ tmp:8 = zext(A) * zext(B);
+ D = tmp(4);
+@endif
+ cr0flags(D);
+}
+
+@ifdef BIT_64
+#mulld r0, r0, r0 0x7C 00 01 D2
+:mulld D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=233 & Rc=0
+{
+ tmp:16 = sext(A) * sext(B);
+ D = tmp:8;
+}
+
+#mulld. r0, r0, r0 0x7C 00 01 D3
+:mulld. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=233 & Rc=1
+{
+ tmp:16 = sext(A) * sext(B);
+ D = tmp:8;
+ cr0flags(D);
+}
+
+#mulldo r0, r0, r0 0x7C 00 05 D2
+:mulldo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=233 & Rc=0
+{
+ tmp:16 = sext(A) * sext(B);
+ D = tmp:8;
+ mulOverflow128(tmp);
+}
+
+#mulldo. r0, r0, r0 0x7C 00 05 D3
+:mulldo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=233 & Rc=1
+{
+ tmp:16 = sext(A) * sext(B);
+ D = tmp:8;
+ mulOverflow128(tmp);
+ cr0flags(D);
+}
+
+@endif
+
+#mulli r0,r0,r0 0x1C 00 00 00
+:mulli D,A,SIMM is $(NOTVLE) & OP=7 & D & A & SIMM
+{
+ D = A * SIMM;
+}
+
+#mullw r0,r0,r0 0x7C 00 01 D6
+:mullw D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=235 & Rc=0
+{
+@ifdef BIT_64
+ D = sext(A:4) * sext(B:4);
+@else
+ D = A*B;
+@endif
+}
+
+#mullw. r0,r0,r0 0x7C 00 01 D7
+:mullw. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=235 & Rc=1
+{
+@ifdef BIT_64
+ D = sext(A:4) * sext(B:4);
+@else
+ D = A*B;
+@endif
+ cr0flags(D);
+}
+
+#mullwo r0,r0,r0 0x7C 00 05 D6
+:mullwo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=235 & Rc=0
+{
+@ifdef BIT_64
+ D = sext(A:4) * sext(B:4);
+ mulOverflow64(D);
+@else
+ tmp:8 = sext(A) * sext(B);
+ mulOverflow64(tmp);
+ D = tmp:4;
+@endif
+}
+
+#mullwo. r0,r0,r0 0x7C 00 05 D7
+:mullwo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=235 & Rc=1
+{
+@ifdef BIT_64
+ D = sext(A:4) * sext(B:4);
+ mulOverflow64(D);
+@else
+ tmp:8 = sext(A) * sext(B);
+ mulOverflow64(tmp);
+ D = tmp:4;
+@endif
+ cr0flags(D);
+}
+
+#nand r0,r0,r0 0x7C 00 03 B8
+:nand A,S,B is OP=31 & S & A & B & XOP_1_10=476 & Rc=0
+{
+ A = ~(S & B);
+}
+
+#nand. r0,r0,r0 0x7C 00 03 B9
+:nand. A,S,B is OP=31 & S & A & B & XOP_1_10=476 & Rc=1
+{
+ A = ~(S & B);
+ cr0flags( A );
+}
+
+#neg r0,r0 0x7C 00 00 D0
+:neg D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=104 & Rc=0
+{
+ D = -A;
+}
+
+#neg. r0,r0 0x7C 00 00 D1
+:neg. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=104 & Rc=1
+{
+ D = -A;
+ cr0flags( D );
+}
+
+#nego r0,r0 0x7C 00 04 D0
+:nego D,A is $(NOTVLE) & OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=104 & Rc=0
+{
+ subOverflow(A,1);
+ D = -A;
+}
+
+#nego. r0,r0 0x7C 00 04 D1
+:nego. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=104 & Rc=1
+{
+ subOverflow(A,1);
+ D = -A;
+ cr0flags( A );
+}
+
+#nor r0,r0,r0 0x7C 00 00 F8
+:nor A,S,B is OP=31 & A & S & B & XOP_1_10=124 & Rc=0
+{
+ A = ~(S | B);
+}
+
+#nor. r0,r0,r0 0x7C 00 00 F9
+:nor. A,S,B is OP=31 & A & S & B & XOP_1_10=124 & Rc=1
+{
+ A = ~(S | B);
+ cr0flags(A);
+}
+
+#or r0,r0,r0 0x7C 00 03 78
+:or A,S,B is OP=31 & A & S & B & XOP_1_10=444 & Rc=0
+{
+ A = (S | B);
+}
+
+#or. r0,r0,r0 0x7C 00 03 79
+:or. A,S,B is OP=31 & A & S & B & XOP_1_10=444 & Rc=1
+{
+ A = (S | B);
+ cr0flags(A);
+}
+
+#orc r0,r0,r0 0x7C 00 03 38
+:orc A,S,B is OP=31 & A & S & B & XOP_1_10=412 & Rc=0
+{
+ A = S | ~B;
+}
+
+#orc. r0,r0,r0 0x7C 00 03 39
+:orc. A,S,B is OP=31 & A & S & B & XOP_1_10=412 & Rc=1
+{
+ A = S | ~B;
+ cr0flags(A);
+}
+
+#ori r0,r0,r0 0x60 00 00 00
+:ori A,S,UIMM is $(NOTVLE) & OP=24 & A & S & UIMM
+{
+ A = S | UIMM;
+}
+
+#oris r0,r0,r0 0x64 00 00 00
+:oris A,S,UIMM is $(NOTVLE) & OP=25 & A & S & UIMM
+{
+ A = S | (UIMM << 16);
+}
+
+#rfid 0x4c 00 00 24
+:rfid is $(NOTVLE) & OP=19 & BITS_11_25=0 & XOP_1_10=18 & BIT_0=0
+{
+ returnFromInterrupt();
+ return[SRR0];
+}
+
+@ifdef BIT_64
+#rldcl r0,r0,r0,0 0x78 00 00 10
+:rldcl A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=8 & Rc=0
+{
+ shift:$(REGISTER_SIZE) = B & 0x3f;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ A = tmp & (0xffffffffffffffff >> MB);
+}
+#rldcl. r0,r0,r0,0 0x78 00 00 11
+:rldcl. A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=8 & Rc=1
+{
+ shift:$(REGISTER_SIZE) = B & 0x3f;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ A = tmp & (0xffffffffffffffff >> MB);
+ cr0flags(A);
+}
+#rldcr r0,r0,r0,0 0x78 00 00 12
+:rldcr A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=9 & Rc=0
+{
+ shift:$(REGISTER_SIZE) = B & 0x3f;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ A = tmp & (0xffffffffffffffff >> MB);
+}
+#rldcr. r0,r0,r0,0 0x78 00 00 13
+:rldcr. A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=9 & Rc=1
+{
+ shift:$(REGISTER_SIZE) = B & 0x3f;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ A = tmp & (0xffffffffffffffff << (64-MB));
+ cr0flags(A);
+}
+
+#rldic r0,r0,r0,0 0x78 00 00 08
+:rldic A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=2 & Rc=0
+{
+ shift:4 = SH;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ mask:$(REGISTER_SIZE) = (0xffffffffffffffff >> MB) & (0xffffffffffffffff << shift);
+ A = tmp & mask;
+}
+#rldic. r0,r0,r0,0 0x78 00 00 09
+:rldic. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=2 & Rc=1
+{
+ shift:4 = SH;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ mask:$(REGISTER_SIZE) = (0xffffffffffffffff >> MB) & (0xffffffffffffffff << shift);
+ A = tmp & mask;
+ cr0flags(A);
+}
+#rldicl r0,r0,r0,0 0x78 00 00 00
+:rldicl A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=0 & Rc=0
+{
+ shift:4 = SH;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ A = tmp & (0xffffffffffffffff >> MB);
+}
+#rldicl. r0,r0,r0,0 0x78 00 00 01
+:rldicl. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=0 & Rc=1
+{
+ shift:4 = SH;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ A = tmp & (0xffffffffffffffff >> MB);
+ cr0flags(A);
+}
+#rldicr r0,r0,r0,0 0x78 00 00 04
+:rldicr A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=1 & Rc=0
+{
+ shift:4 = SH;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ A = tmp & (0xffffffffffffffff << (63-MB));
+}
+#rldicr. r0,r0,r0,0 0x78 00 00 05
+:rldicr. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=1 & Rc=1
+{
+ shift:4 = SH;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ A = tmp & (0xffffffffffffffff << (63-MB));
+ cr0flags(A);
+}
+#rldimi r0,r0,r0,0 0x78 00 00 0c
+:rldimi A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=3 & Rc=0
+{
+ shift:4 = SH;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ mask:$(REGISTER_SIZE) = (0xffffffffffffffff >> MB) & (0xffffffffffffffff << shift);
+ A = (tmp & mask) | (A & ~mask);
+}
+#rldimi. r0,r0,r0,0 0x78 00 00 0d
+:rldimi. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=3 & Rc=1
+{
+ shift:4 = SH;
+ tmp:$(REGISTER_SIZE)=(S<>(64-shift));
+ mask:$(REGISTER_SIZE) = (0xffffffffffffffff >> MB) & (0xffffffffffffffff << shift);
+ A = (tmp & mask) | (A & ~mask);
+ cr0flags(A);
+}
+@endif
+
+
+
+#rlwimi r0,r0,0,0,0 0x50 00 00 00
+:rlwimi A,S,SHL,MBL,ME is $(NOTVLE) & OP=20 & S & A & SHL & MBL & ME & Rc=0 & rotmask
+{
+ shift:1 = SHL;
+@ifdef BIT_64
+ tmp:4 = S:4;
+ tmp2:4 = (tmp<>(32-shift));
+ A = zext(tmp2 & rotmask) | (A & ~zext(rotmask));
+@else
+ tmp = (S<>(32-shift));
+ A = (tmp & rotmask) | (A & ~rotmask);
+@endif
+}
+
+#rlwimi. r0,r0,0,0,0 0x50 00 00 01
+:rlwimi. A,S,SHL,MBL,ME is $(NOTVLE) & OP=20 & S & A & SHL & MBL & ME & Rc=1 & rotmask
+{
+ shift:1 = SHL;
+@ifdef BIT_64
+ tmp:4 = S:4;
+ tmp2:4 = (tmp<>(32-shift));
+ A = zext(tmp2 & rotmask) | (A & ~zext(rotmask));
+@else
+ tmp = (S<>(32-shift));
+ A = (tmp & rotmask) | (A & ~rotmask);
+@endif
+ cr0flags(A);
+}
+
+#rlwinm r0,r0,0,0,0 0x54 00 00 00
+:rlwinm A,S,SHL,MBL,ME is $(NOTVLE) & OP=21 & S & A & SHL & MBL & ME & Rc=0 & rotmask
+{
+ shift:1 = SHL;
+@ifdef BIT_64
+ tmp:4 = S:4;
+ tmp2:4 = (tmp<>(32-shift));
+ A = zext(tmp2 & rotmask);
+@else
+ tmp = (S<>(32-shift));
+ A = (tmp & rotmask);
+@endif
+}
+
+#rlwinm. r0,r0,0,0,0 0x54 00 00 01
+:rlwinm. A,S,SHL,MBL,ME is $(NOTVLE) & OP=21 & S & A & SHL & MBL & ME & Rc=1 & rotmask
+{
+ shift:1 = SHL;
+@ifdef BIT_64
+ tmp:4 = S:4;
+ tmp2:4 = (tmp<>(32-shift));
+ A = zext(tmp2 & rotmask);
+@else
+ tmp = (S<>(32-shift));
+ A = (tmp & rotmask);
+@endif
+ cr0flags(A);
+}
+
+#rlwnm r0,r0,0,0,0 0x5C 00 00 00
+:rlwnm A,S,B,MBL,ME is $(NOTVLE) & OP=23 & S & A & B & MBL & ME & Rc=0 & rotmask
+{
+ shift:$(REGISTER_SIZE) = B & 0x1f;
+@ifdef BIT_64
+ tmp:4 = S:4;
+ tmp2:4 = (tmp<>(32-shift));
+ A = zext(tmp2 & rotmask);
+@else
+ tmp = (S<>(32-shift));
+ A = (tmp & rotmask);
+@endif
+}
+
+#rlwnm. r0,r0,0,0,0 0x5C 00 00 01
+:rlwnm. A,S,B,MBL,ME is $(NOTVLE) & OP=23 & S & A & B & MBL & ME & Rc=1 & rotmask
+{
+ shift:$(REGISTER_SIZE) = B & 0x1f;
+@ifdef BIT_64
+ tmp:4 = S:4;
+ tmp2:4 = (tmp<>(32-shift));
+ A = zext(tmp2 & rotmask);
+@else
+ tmp = (S<>(32-shift));
+ A = (tmp & rotmask);
+@endif
+ cr0flags(A);
+}
+
+#sc 0x44 00 00 02
+:sc LEV is $(NOTVLE) & OP=17 & BITS_12_25=0 & LEV & BITS_2_4=0 & BIT_1=1 & BIT_0=0
+{
+ syscall();
+}
+
+#slbia 0x7C 00 03 E4
+:slbia is $(NOTVLE) & OP=31 & BITS_11_25=0 & XOP_1_10=498 & BIT_0=0
+{
+ slbInvalidateAll();
+}
+
+#slbie r0 0x7C 00 03 64
+:slbie B is $(NOTVLE) & OP=31 & BITS_16_20=0 & B & XOP_1_10=434 & BIT_0=0
+{
+ slbInvalidateEntry();
+}
+
+#slbmfee r0,r0 0x7C 00 07 26
+:slbmfee D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=915 & BIT_0=0
+{
+ slbMoveFromEntryESID();
+}
+
+#slbmfev r0,r0 0x7C 00 06 A6
+:slbmfev D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=851 & BIT_0=0
+{
+ slbMoveFromEntryVSID();
+}
+
+#slbmte r0,r0 0x7C 00 03 24
+:slbmte S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=402 & BIT_0=0
+{
+ slbMoveToEntry();
+}
+
+@ifdef BIT_64
+#sld r0,r0,r0 0x7C 00 00 36
+:sld A,S,B is $(NOTVLE) & OP=31 & S & A & B & XOP_1_10=27 & Rc=0
+{
+ A = S << (B & 0x7f);
+}
+
+#sld. 0x7C 00 00 37
+:sld. A,S,B is $(NOTVLE) & OP=31 & S & A & B & XOP_1_10=27 & Rc=1
+{
+ A = S << (B & 0x7f);
+ cr0flags(A);
+}
+@endif
+
+#slw r0,r0,r0 0x7C 00 00 30
+:slw A,S,B is OP=31 & S & A & B & XOP_1_10=24 & Rc=0
+{
+@ifdef BIT_64
+ tmp:4 = S:4 << B;
+ A = (A & 0xffffffff00000000) | zext(tmp);
+@else
+ A = S << B;
+@endif
+}
+
+
+#slw. r0,r0,r0 0x7C 00 00 31
+:slw. A,S,B is OP=31 & S & A & B & XOP_1_10=24 & Rc=1
+{
+@ifdef BIT_64
+ tmp:4 = S:4 << B;
+ A = (A & 0xffffffff00000000) | zext(tmp);
+@else
+ A = S << B;
+@endif
+ cr0flags(A);
+}
+
+@ifdef BIT_64
+#srad r0,r0,r0 0x7C 00 06 34
+:srad A,S,B is OP=31 & A & S & B & XOP_1_10=794 & Rc=0
+{
+ tmp:$(REGISTER_SIZE) = B & 0x7f;
+ shiftCarry(S,tmp);
+ A = S s>> tmp;
+}
+
+#srad. r0,r0,r0 0x7C 00 06 35
+:srad. A,S,B is OP=31 & A & S & B & XOP_1_10=794 & Rc=1
+{
+ tmp:$(REGISTER_SIZE) = B & 0x7f;
+ shiftCarry(S,tmp);
+ A = S s>> tmp;
+ cr0flags(A);
+}
+
+#sradi r0,r0,r0 0x7C 00 06 74
+:sradi A,S,SH is OP=31 & A & S & SH & XOP_2_10=413 & Rc=0
+{
+ shiftCarry(S,SH);
+ A = S s>> SH;
+}
+
+#sradi. r0,r0,r0 0x7C 00 06 75
+:sradi. A,S,SH is OP=31 & A & S & SH & XOP_2_10=413 & Rc=1
+{
+ shiftCarry(S,SH);
+ A = S s>> SH;
+}
+
+@endif
+
+
+#sraw r0,r0,r0 0x7C 00 06 30
+:sraw A,S,B is OP=31 & A & S & B & XOP_1_10=792 & Rc=0
+{
+ shift:$(REGISTER_SIZE) = B & 0x3f;
+@ifdef BIT_64
+ shiftCarry(S:4,shift);
+ tmp2:4 = S:4 s>> shift;
+ A = (A & 0xffffffff00000000) | zext(tmp2);
+@else
+ shiftCarry(S,shift);
+ A = S s>> shift;
+@endif
+}
+#sraw. r0,r0,r0 0x7C 00 06 31
+:sraw. A,S,B is OP=31 & A & S & B & XOP_1_10=792 & Rc=1
+{
+ shift:$(REGISTER_SIZE) = B & 0x3f;
+@ifdef BIT_64
+ shiftCarry(S:4,shift);
+ tmp2:4 = S:4 s>> shift;
+ A = (A & 0xffffffff00000000) | zext(tmp2);
+@else
+ shiftCarry(S,shift);
+ A = S s>> shift;
+@endif
+ cr0flags(A);
+}
+
+#srawi r0,r0,r0 0x7C 00 06 70
+:srawi A,S,SHL is OP=31 & A & S & SHL & XOP_1_10=824 & Rc=0
+{
+@ifdef BIT_64
+ shift:4 = SHL;
+ shiftCarry(S:4,shift);
+ tmp2:4 = S:4 s>> shift;
+ A = (A & 0xffffffff00000000) | zext(tmp2);
+@else
+ shiftCarry(S,SHL);
+ A = S s>> SHL;
+@endif
+}
+#srawi. r0,r0,r0 0x7C 00 06 71
+:srawi. A,S,SHL is OP=31 & A & S & SHL & XOP_1_10=824 & Rc=1
+{
+@ifdef BIT_64
+ shift:4 = SHL;
+ shiftCarry(S:4,shift);
+ tmp2:4 = S:4 s>> shift;
+ A = (A & 0xffffffff00000000) | zext(tmp2);
+@else
+ shiftCarry(S,SHL);
+ A = S s>> SHL;
+@endif
+ cr0flags(A);
+}
+
+@ifdef BIT_64
+#srd r0,r0,r0 0x7C 00 04 36
+:srd A,S,B is OP=31 & S & A & B & XOP_1_10=539 & Rc=0
+{
+ A = S >> (B & 0x7f);
+}
+
+#srd. 0x7C 00 04 37
+:srd. A,S,B is OP=31 & S & A & B & XOP_1_10=539 & Rc=1
+{
+ A = S >> (B & 0x7f);
+ cr0flags(A);
+}
+@endif
+
+#srw r0,r0,r0 0x7C 00 04 30
+:srw A,S,B is OP=31 & S & A & B & XOP_1_10=536 & Rc=0
+{
+@ifdef BIT_64
+ tmp:4 = S:4 >> B;
+ A = (A & 0xffffffff00000000) | zext(tmp);
+@else
+ A = S >> B;
+@endif
+}
+
+
+#srw. r0,r0,r0 0x7C 00 04 31
+:srw. A,S,B is OP=31 & S & A & B & XOP_1_10=536 & Rc=1
+{
+@ifdef BIT_64
+ tmp:4 = S:4 >> B;
+ A = (A & 0xffffffff00000000) | zext(tmp);
+@else
+ A = S >> B;
+@endif
+ cr0flags(A);
+}
+
+
+
+#stb r0,3(0) 0x98 00 00 00
+#stb r0,3(r2) 0x98 02 00 00
+:stb S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=38 & S & dPlusRaOrZeroAddress
+{
+ *:1(dPlusRaOrZeroAddress) = S:1;
+}
+
+#stbu r0,3(0) 0x9c 00 00 00
+#stbu r0,3(r2) 0x9c 02 00 00
+:stbu S,dPlusRaAddress is $(NOTVLE) & OP=39 & S & dPlusRaAddress & A
+{
+ *:1(dPlusRaAddress) = S:1;
+ A = dPlusRaAddress;
+}
+
+#stbux r0,r2,r0 0x7c 00 01 ee ### WARNING the B in this definition is different from manual - I think the manual is wrong
+:stbux S,A,B is OP=31 & S & A & B & XOP_1_10=247 & BIT_0=0
+{
+ tmp:$(REGISTER_SIZE) = A+B; # S may be same register as A
+ *tmp = S:1; # So do store before updating A
+ A = tmp;
+}
+
+#stbx r0,r2,r0 0x7c 00 01 ae ### WARNING the B in this definition is different from manual - I think the manual is wrong
+:stbx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=215 & BIT_0=0
+{
+ *(RA_OR_ZERO+B) = S:1;
+}
+
+@ifdef BIT_64
+#std r0,8(0) 0xf8 00 00 08
+#std r0,8(r2) 0xf8 02 00 08
+:std S,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=62 & S & dsPlusRaOrZeroAddress & BITS_0_1=0
+{
+ *:8(dsPlusRaOrZeroAddress) = S;
+}
+
+#Special case when saving r2 to stack prior to function call (for inline call stub case)
+#std r2,0x28(r1)
+:std r2,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=62 & S=2 & r2 & A=1 & SIMM_DS=0xa & dsPlusRaOrZeroAddress & BITS_0_1=0
+{
+ r2Save = r2;
+ *:8(dsPlusRaOrZeroAddress) = r2;
+}
+
+#stdcx. r0,8(0) 0x7c 00 01 AD
+:stdcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=214 & BIT_0=1
+{
+ EA:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ if (RESERVE == 0) goto inst_next;
+ *[ram]:8 EA = storeDoubleWordConditionalIndexed(S,RA_OR_ZERO,B);
+}
+
+#stdu r0,8(0) 0xf8 00 00 01
+#stdu r0,8(r2) 0xf8 02 00 01
+:stdu S,dsPlusRaAddress is $(NOTVLE) & OP=62 & S & A & dsPlusRaAddress & BITS_0_1=1
+{
+ *:8(dsPlusRaAddress) = S;
+ A = dsPlusRaAddress;
+}
+
+#stdux r0,r2,r0 0x7c 00 01 6a
+:stdux S,A,B is OP=31 & S & A & B & XOP_1_10=181 & BIT_0=0
+{
+ A = A+B;
+ *:8(A) = S;
+}
+
+#stdx r0,r2,r0 0x7c 00 01 2a
+:stdx S,RA_OR_ZERO,B is OP=31 & S & B & RA_OR_ZERO & XOP_1_10=149 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO+B;
+ *:8(ea) = S;
+}
+
+@endif
+
+#stfd fr0,8(0) 0xD8 00 00 08
+#stfd fr0,8(r2) 0xD8 02 00 08
+:stfd fS,dPlusRaOrZeroAddress is $(NOTVLE) & OP=54 & fS & dPlusRaOrZeroAddress
+{
+ *:8(dPlusRaOrZeroAddress) = fS;
+}
+
+#stfdu fr0,8(0) 0xDC 00 00 08
+#stfdu fr0,8(r2) 0xDC 02 00 08
+:stfdu fS,dPlusRaAddress is $(NOTVLE) & OP=55 & fS & dPlusRaAddress & A
+{
+ A = dPlusRaAddress;
+ *:8(dPlusRaAddress) = fS;
+}
+
+#stfdux fr0,r2,r0 0x7C 00 05 EE
+:stfdux fS,A,B is $(NOTVLE) & OP=31 & fS & A & B & XOP_1_10=759 & BIT_0=0
+{
+ A = A+B;
+ *:8(A) = fS;
+}
+
+#stfdx fr0,r0,r0 0x7C 00 05 AE
+:stfdx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=727 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO+B;
+ *:8(ea) = fS;
+}
+
+#stfiwx fr0,r0,r0 0x7C 00 07 AE
+:stfiwx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=983 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO+B;
+ *:4(ea) = fS:4;
+}
+
+#stfs fr0,8(0) 0xD0 00 00 08
+#stfs fr0,8(r2) 0xD0 02 00 08
+:stfs fS,dPlusRaOrZeroAddress is $(NOTVLE) & OP=52 & fS & dPlusRaOrZeroAddress
+{
+ tmp:4 = float2float(fS);
+ *:4(dPlusRaOrZeroAddress) = tmp;
+}
+
+#stfsu fr0,8(0) 0xD4 00 00 08
+#stfsu fr0,8(r2) 0xD4 02 00 08
+:stfsu fS,dPlusRaAddress is $(NOTVLE) & OP=53 & fS & dPlusRaAddress & A
+{
+ tmp:4 = float2float(fS);
+ *:4(dPlusRaAddress) = tmp;
+ A = dPlusRaAddress;
+}
+
+#stfsux fr0,r0,r0 0x7C 00 05 6E
+:stfsux fS,A,B is $(NOTVLE) & OP=31 & fS & B & A & XOP_1_10=695 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = A + B;
+ tmp:4 = float2float(fS);
+ *:4(ea) = tmp;
+ A = ea;
+}
+
+#stfsx fr0,r0,r0 0x7C 00 05 2E
+:stfsx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=663 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ tmp:4 = float2float(fS);
+ *:4(ea) = tmp;
+}
+
+#sth r0,r0 0xB0 00 00 00
+:sth S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=44 & S & dPlusRaOrZeroAddress
+{
+ *:2(dPlusRaOrZeroAddress) = S:2;
+}
+
+#sthbrx r0,r0,r0 0x7C 00 07 2C
+:sthbrx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=918 & BIT_0=0
+{
+ tmp:2 = zext(S:1) <<8;
+ tmp2:2 = S:2 >>8;
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ *:2(ea) = tmp2 | tmp;
+}
+
+#sthu r0,r0 0xB4 00 00 00
+:sthu S,dPlusRaAddress is $(NOTVLE) & OP=45 & S & A & dPlusRaAddress
+{
+ *:2(dPlusRaAddress) = S:2;
+ A = dPlusRaAddress;
+}
+
+#sthux r0,r0,r0 0x7C 00 03 6E
+:sthux S,A,B is OP=31 & S & A & B & XOP_1_10=439 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = A + B;
+ *:2(ea) = S:2;
+ A = ea;
+}
+
+#sthx r0,r0,r0 0x7C 00 03 2E
+:sthx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=407 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ *:2(ea) = S:2;
+}
+
+####
+#stm instruction
+@include "stmwInstructions.sinc"
+
+@include "stswiInstructions.sinc"
+#stswi r0,r0,0 0x7c 00 05 aa
+#:stswi S,A,NB is $(NOTVLE) & OP=31 & S & A & NB & XOP_1_10=725 & BIT_0=0
+#{
+# tmp:1 = NB;
+# storeString(S,A,tmp);
+#}
+
+#stswx r0,r0,0 0x7c 00 05 2a
+define pcodeop stswxOp;
+:stswx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=661 & BIT_0=0
+{
+ EA:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ *[ram]:1 EA = stswxOp(S,RA_OR_ZERO,B);
+}
+
+#stw r0,r0,0 0x90 00 00 00
+:stw S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=36 & S & dPlusRaOrZeroAddress
+{
+@ifdef BIT_64
+ *:4(dPlusRaOrZeroAddress) = S:4;
+@else
+ *:4(dPlusRaOrZeroAddress) = S;
+@endif
+}
+
+#stwbrx r0,r0,0 0x7c 00 05 2c
+:stwbrx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=662 & BIT_0=0
+{
+@ifdef BIT_64
+ value:4 = S:4;
+@else
+ value:$(REGISTER_SIZE) = S;
+@endif
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ tmp1:4 = value << 24;
+ tmp2:4 = (value << 8) & 0xff0000;
+ tmp3:4 = (value >> 8) & 0x00ff00;
+ tmp4:4 = value >> 24;
+ *:4(ea) = tmp1 | tmp2 | tmp3 | tmp4;
+}
+
+#stwcx. r0,8(0) 0x7c 00 01 2D
+:stwcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=150 & BIT_0=1
+{
+ EA:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+ if (RESERVE == 0) goto inst_next;
+ *[ram]:4 EA = storeWordConditionalIndexed(S,RA_OR_ZERO,B);
+}
+
+#stwu r0,r0 0x94 00 00 00
+:stwu S,dPlusRaAddress is $(NOTVLE) & OP=37 & S & A & dPlusRaAddress
+{
+@ifdef BIT_64
+ *:4(dPlusRaAddress) = S:4;
+@else
+ *:4(dPlusRaAddress) = S;
+@endif
+ A = dPlusRaAddress;
+}
+
+#stwux r0,r0,r0 0x7C 00 01 6E
+:stwux S,A,B is OP=31 & S & A & B & XOP_1_10=183 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = A + B;
+@ifdef BIT_64
+ *:4(ea) = S:4;
+@else
+ *:4(ea) = S;
+@endif
+ A = ea;
+}
+
+#stwx r0,r0,r0 0x7C 00 01 2E
+:stwx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=151 & BIT_0=0
+{
+ ea:$(REGISTER_SIZE) = RA_OR_ZERO + B;
+@ifdef BIT_64
+ *:4(ea) = S:4;
+@else
+ *:4(ea) = S;
+@endif
+}
+
+
+#subf r0,r0,r0 0x7c 00 00 50
+:subf D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=40 & Rc=0
+{
+ D = B - A;
+}
+
+#subf. r0,r0,r0 0x7c 00 00 51
+:subf. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=40 & Rc=1
+{
+ D = B - A;
+ cr0flags(D);
+}
+
+#subfo r1,r2,r3 0x7c 00 04 50
+:subfo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=40 & Rc=0
+{
+ D = B - A;
+ subOverflow(A,B);
+}
+
+#subfo. r1,r2,r3 0x7c 00 04 51
+:subfo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=40 & Rc=1
+{
+ D = B - A;
+ subOverflow( A, B );
+ cr0flags(D);
+}
+
+#subfc r0,r0,r0 0x7c 00 00 10
+:subfc D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=8 & Rc=0
+{
+ xer_ca = (A <= B);
+ D = B - A;
+}
+
+#subfc. r0,r0,r0 0x7c 00 00 11
+:subfc. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=8 & Rc=1
+{
+ xer_ca = (A <= B);
+ D = B - A;
+ cr0flags(D);
+}
+
+#subfco r0,r0,r0 0x7c 00 04 10
+:subfco D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=8 & Rc=0
+{
+ xer_ca = (A <= B);
+ D = B - A;
+ subOverflow( A, B );
+}
+
+#subfco. r0,r0,r0 0x7c 00 04 11
+:subfco. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=8 & Rc=1
+{
+ xer_ca = (A <= B);
+ D = B - A;
+ subOverflow( B, A );
+ cr0flags(D);
+}
+
+#subfe r0,r0,r0 0x7c 00 01 10
+:subfe D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=136 & Rc=0
+{
+ tmp:$(REGISTER_SIZE) = zext(!xer_ca)+A;
+ xer_ca= (tmp<=B);
+ D = B - tmp;
+}
+
+#subfe. r0,r0,r0 0x7c 00 01 11
+:subfe. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=136 & Rc=1
+{
+ tmp:$(REGISTER_SIZE) = zext(!xer_ca)+A;
+ xer_ca= (tmp<=B);
+ D = B - tmp;
+ cr0flags(D);
+}
+
+#subfeo r0,r0,r0 0x7c 00 05 10
+:subfeo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=136 & Rc=0
+{
+ tmp:$(REGISTER_SIZE) = zext(!xer_ca)+A;
+ xer_ca= (tmp<=B);
+ D = B - tmp;
+ subOverflow( B, tmp );
+}
+
+#subfeo. r0,r0,r0 0x7c 00 05 11
+:subfeo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=136 & Rc=1
+{
+ tmp:$(REGISTER_SIZE) = zext(!xer_ca)+A;
+ xer_ca= (tmp<=B);
+ D = B - tmp;
+ subOverflow( B, tmp );
+ cr0flags(D);
+}
+
+#subfic r0,r0,2 0x20 00 00 02
+:subfic D,A,SIMM is $(NOTVLE) & OP=8 & D & A & SIMM
+{
+ xer_ca = !(SIMM