[coreboot-gerrit] Patch set updated for coreboot: buildgcc: Update GCC, Binutils, GMP, MPFR, GDB and IASL

Martin Roth (martinroth@google.com) gerrit at coreboot.org
Fri Feb 24 17:31:20 CET 2017


Martin Roth (martinroth at google.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/17189

-gerrit

commit fe20937de34b409b6be8317091410444b481abd3
Author: Iru Cai <mytbk920423 at gmail.com>
Date:   Sat Oct 29 23:37:42 2016 +0800

    buildgcc: Update GCC, Binutils, GMP, MPFR, GDB and IASL
    
    - GCC gets updated from 5.2.0 to 6.3.0:
    For RISC-V, I rebase riscv-gnu-toolchain on gcc-6_2_0-release and make a diff:
    git diff --src-prefix=original-gcc/ --dst-prefix=gcc-6.2.0/ gcc-6_2_0-release
    
    - Binutils goes from 2.26.1 to 2.27:
    There is a build error for MIPS gold so I add patch for it.
    
    The RISC-V patches are based on riscv-gnu-toolchain revision 910ea19, and
    the riscv-binutils-gdb tree that generated the binutils-2.27_riscv.patch
    has lost. For GCC, the gcc-6.2.0_riscv.patch can still be used in GCC 6.3.0.
    
    - GMP gets a bump from 6.1.0 to 6.1.2
    - MPFR is updated from 3.1.4 to 3.1.5
    - GDB is upgraded from 6.1.1 to 6.1.2
    - IASL is changed from 20160831 to 20161222
    
    Change-Id: I20fea838d798c430d8c4d2cc6b07614d967c60c5
    Signed-off-by: Iru Cai <mytbk920423 at gmail.com>
    Signed-off-by: Martin Roth <martinroth at google.com>
---
 util/crossgcc/buildgcc                             |    16 +-
 .../patches/acpica-unix2-20160831_iasl.patch       |    27 -
 .../patches/acpica-unix2-20161222_iasl.patch       |    27 +
 util/crossgcc/patches/binutils-2.26.1_aarch.patch  |    92 -
 .../patches/binutils-2.26.1_no-bfd-doc.patch       |    12 -
 util/crossgcc/patches/binutils-2.26.1_riscv.patch  |  9780 -----------------
 util/crossgcc/patches/binutils-2.27_aarch.patch    |    92 +
 .../crossgcc/patches/binutils-2.27_mips-gold.patch |    11 +
 .../patches/binutils-2.27_no-bfd-doc.patch         |    12 +
 util/crossgcc/patches/binutils-2.27_riscv.patch    | 10251 ++++++++++++++++++
 util/crossgcc/patches/gcc-5.3.0_elf_biarch.patch   |    87 -
 util/crossgcc/patches/gcc-5.3.0_gnat.patch         |    11 -
 util/crossgcc/patches/gcc-5.3.0_libc_name_p.patch  |    24 -
 util/crossgcc/patches/gcc-5.3.0_libgcc.patch       |    57 -
 util/crossgcc/patches/gcc-5.3.0_nds32.patch        |    17 -
 util/crossgcc/patches/gcc-5.3.0_riscv.patch        | 10122 ------------------
 util/crossgcc/patches/gcc-6.3.0_elf_biarch.patch   |    87 +
 util/crossgcc/patches/gcc-6.3.0_gnat.patch         |    11 +
 util/crossgcc/patches/gcc-6.3.0_libgcc.patch       |    57 +
 util/crossgcc/patches/gcc-6.3.0_nds32.patch        |    17 +
 util/crossgcc/patches/gcc-6.3.0_riscv.patch        | 10428 +++++++++++++++++++
 util/crossgcc/patches/gdb-7.11_amd64.patch         |    15 -
 util/crossgcc/patches/gdb-7.11_no-doc.patch        |    12 -
 util/crossgcc/patches/gdb-7.11_pythonhome.patch    |    19 -
 util/crossgcc/patches/gdb-7.12_amd64.patch         |    15 +
 util/crossgcc/patches/gdb-7.12_no-doc.patch        |    12 +
 util/crossgcc/patches/gdb-7.12_pythonhome.patch    |    19 +
 .../sum/acpica-unix2-20160831.tar.gz.cksum         |     1 -
 .../sum/acpica-unix2-20161222.tar.gz.cksum         |     1 +
 util/crossgcc/sum/binutils-2.26.1.tar.bz2.cksum    |     1 -
 util/crossgcc/sum/binutils-2.27.tar.bz2.cksum      |     1 +
 util/crossgcc/sum/gcc-5.3.0.tar.bz2.cksum          |     1 -
 util/crossgcc/sum/gcc-6.3.0.tar.bz2.cksum          |     1 +
 util/crossgcc/sum/gdb-7.11.tar.xz.cksum            |     1 -
 util/crossgcc/sum/gdb-7.12.tar.xz.cksum            |     1 +
 util/crossgcc/sum/gmp-6.1.0.tar.xz.cksum           |     1 -
 util/crossgcc/sum/gmp-6.1.2.tar.xz.cksum           |     1 +
 util/crossgcc/sum/mpfr-3.1.4.tar.xz.cksum          |     1 -
 util/crossgcc/sum/mpfr-3.1.5.tar.xz.cksum          |     1 +
 39 files changed, 21053 insertions(+), 20289 deletions(-)

diff --git a/util/crossgcc/buildgcc b/util/crossgcc/buildgcc
index 1cefcd5..f64d224 100755
--- a/util/crossgcc/buildgcc
+++ b/util/crossgcc/buildgcc
@@ -18,8 +18,8 @@
 
 cd $(dirname $0)
 
-CROSSGCC_DATE="August 31st, 2016"
-CROSSGCC_VERSION="1.43"
+CROSSGCC_DATE="December 28th, 2016"
+CROSSGCC_VERSION="1.44"
 CROSSGCC_COMMIT=$( git describe )
 
 # default settings
@@ -35,15 +35,15 @@ BOOTSTRAP=0
 THREADS=1
 
 # GCC toolchain version numbers
-GMP_VERSION=6.1.0
-MPFR_VERSION=3.1.4
+GMP_VERSION=6.1.2
+MPFR_VERSION=3.1.5
 MPC_VERSION=1.0.3
 LIBELF_VERSION=0.8.13
-GCC_VERSION=5.3.0
+GCC_VERSION=6.3.0
 GCC_AUTOCONF_VERSION=2.69
-BINUTILS_VERSION=2.26.1
-GDB_VERSION=7.11
-IASL_VERSION=20160831
+BINUTILS_VERSION=2.27
+GDB_VERSION=7.12
+IASL_VERSION=20161222
 PYTHON_VERSION=3.5.1
 EXPAT_VERSION=2.1.1
 # CLANG version number
diff --git a/util/crossgcc/patches/acpica-unix2-20160831_iasl.patch b/util/crossgcc/patches/acpica-unix2-20160831_iasl.patch
deleted file mode 100644
index f119f46..0000000
--- a/util/crossgcc/patches/acpica-unix2-20160831_iasl.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-diff -Naur acpica-unix2-20160831/source/compiler/asloptions.c acpica-unix2-20160831a/source/compiler/asloptions.c
---- acpica-unix2-20160831/source/compiler/asloptions.c
-+++ acpica-unix2-20160831a/source/compiler/asloptions.c
-@@ -100,6 +100,7 @@
-     if (argc < 2)
-     {
-         printf (ACPI_COMMON_SIGNON (ASL_COMPILER_NAME));
-+        printf ("%s\n", COREBOOT_TOOLCHAIN_VERSION);
-         Usage ();
-         exit (1);
-     }
-@@ -130,6 +131,7 @@
-     if (Gbl_DoSignon)
-     {
-         printf (ACPI_COMMON_SIGNON (ASL_COMPILER_NAME));
-+        printf ("%s\n", COREBOOT_TOOLCHAIN_VERSION);
-         if (Gbl_IgnoreErrors)
-         {
-             printf ("Ignoring all errors, forcing AML file generation\n\n");
-@@ -711,6 +713,7 @@
-         case '^':
- 
-             printf (ACPI_COMMON_SIGNON (ASL_COMPILER_NAME));
-+            printf ("%s\n", COREBOOT_TOOLCHAIN_VERSION);
-             exit (0);
- 
-         case 'a':
diff --git a/util/crossgcc/patches/acpica-unix2-20161222_iasl.patch b/util/crossgcc/patches/acpica-unix2-20161222_iasl.patch
new file mode 100644
index 0000000..24bde98
--- /dev/null
+++ b/util/crossgcc/patches/acpica-unix2-20161222_iasl.patch
@@ -0,0 +1,27 @@
+diff -Naur acpica-unix2-20161222/source/compiler/asloptions.c acpica-unix2-20161222/source/compiler/asloptions.c
+--- acpica-unix2-20161222/source/compiler/asloptions.c
++++ acpica-unix2-20161222/source/compiler/asloptions.c
+@@ -100,6 +100,7 @@
+     if (argc < 2)
+     {
+         printf (ACPI_COMMON_SIGNON (ASL_COMPILER_NAME));
++        printf ("%s\n", COREBOOT_TOOLCHAIN_VERSION);
+         Usage ();
+         exit (1);
+     }
+@@ -130,6 +131,7 @@
+     if (Gbl_DoSignon)
+     {
+         printf (ACPI_COMMON_SIGNON (ASL_COMPILER_NAME));
++        printf ("%s\n", COREBOOT_TOOLCHAIN_VERSION);
+         if (Gbl_IgnoreErrors)
+         {
+             printf ("Ignoring all errors, forcing AML file generation\n\n");
+@@ -711,6 +713,7 @@
+         case '^':
+
+             printf (ACPI_COMMON_SIGNON (ASL_COMPILER_NAME));
++            printf ("%s\n", COREBOOT_TOOLCHAIN_VERSION);
+             exit (0);
+
+         case 'a':
diff --git a/util/crossgcc/patches/binutils-2.26.1_aarch.patch b/util/crossgcc/patches/binutils-2.26.1_aarch.patch
deleted file mode 100644
index 4a04418..0000000
--- a/util/crossgcc/patches/binutils-2.26.1_aarch.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
-index 2d491f6..e221ef4 100644
---- a/gas/config/tc-aarch64.c
-+++ b/gas/config/tc-aarch64.c
-@@ -1736,13 +1736,13 @@ s_ltorg (int ignored ATTRIBUTE_UNUSED)
-       if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
- 	continue;
- 
--      mapping_state (MAP_DATA);
--
-       /* Align pool as you have word accesses.
-          Only make a frag if we have to.  */
-       if (!need_pass_2)
- 	frag_align (align, 0, 0);
- 
-+      mapping_state (MAP_DATA);
-+
-       record_alignment (now_seg, align);
- 
-       sprintf (sym_name, "$$lit_\002%x", pool->id);
-@@ -6373,11 +6373,15 @@ aarch64_init_frag (fragS * fragP, int max_chars)
- 
-   switch (fragP->fr_type)
-     {
--    case rs_align:
-     case rs_align_test:
-     case rs_fill:
-       mapping_state_2 (MAP_DATA, max_chars);
-       break;
-+    case rs_align:
-+      /* PR 20364: We can get alignment frags in code sections,
-+	 so do not just assume that we should use the MAP_DATA state.  */
-+      mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
-+      break;
-     case rs_align_code:
-       mapping_state_2 (MAP_INSN, max_chars);
-       break;
-diff --git a/gas/testsuite/gas/aarch64/pr20364.d b/gas/testsuite/gas/aarch64/pr20364.d
-new file mode 100644
-index 0000000..babcff1
---- /dev/null
-+++ b/gas/testsuite/gas/aarch64/pr20364.d
-@@ -0,0 +1,13 @@
-+# Check that ".align <size>, <fill>" does not set the mapping state to DATA, causing unnecessary frag generation.
-+#name: PR20364 
-+#objdump: -d
-+
-+.*:     file format .*
-+
-+Disassembly of section \.vectors:
-+
-+0+000 <.*>:
-+   0:	d2800000 	mov	x0, #0x0                   	// #0
-+   4:	94000000 	bl	0 <plat_report_exception>
-+   8:	17fffffe 	b	0 <bl1_exceptions>
-+
-diff --git a/gas/testsuite/gas/aarch64/pr20364.s b/gas/testsuite/gas/aarch64/pr20364.s
-new file mode 100644
-index 0000000..594ad7c
---- /dev/null
-+++ b/gas/testsuite/gas/aarch64/pr20364.s
-@@ -0,0 +1,28 @@
-+ .macro vector_base label
-+ .section .vectors, "ax"
-+ .align 11, 0
-+ \label:
-+ .endm
-+
-+ .macro vector_entry label
-+ .section .vectors, "ax"
-+ .align 7, 0
-+ \label:
-+ .endm
-+
-+ .macro check_vector_size since
-+   .if (. - \since) > (32 * 4)
-+     .error "Vector exceeds 32 instructions"
-+   .endif
-+ .endm
-+
-+ .globl bl1_exceptions
-+
-+vector_base bl1_exceptions
-+
-+vector_entry SynchronousExceptionSP0
-+ mov x0, #0x0
-+ bl plat_report_exception
-+ b SynchronousExceptionSP0
-+ check_vector_size SynchronousExceptionSP0
-+
--- 
-1.7.1
diff --git a/util/crossgcc/patches/binutils-2.26.1_no-bfd-doc.patch b/util/crossgcc/patches/binutils-2.26.1_no-bfd-doc.patch
deleted file mode 100644
index 35c22ff..0000000
--- a/util/crossgcc/patches/binutils-2.26.1_no-bfd-doc.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -ur binutils-2.26.1/bfd/Makefile.in binutils-2.26.1.patched/bfd/Makefile.in
---- binutils-2.26.1/bfd/Makefile.in	2015-11-13 16:27:40.000000000 +0800
-+++ binutils-2.26.1.patched/bfd/Makefile.in	2016-04-02 11:05:43.398422394 +0800
-@@ -341,7 +341,7 @@
- ACLOCAL_AMFLAGS = -I . -I .. -I ../config
- INCDIR = $(srcdir)/../include
- CSEARCH = -I. -I$(srcdir) -I$(INCDIR)
--SUBDIRS = doc po
-+SUBDIRS = po
- bfddocdir = doc
- libbfd_la_LDFLAGS = $(am__append_1) -release `cat libtool-soversion` \
- 	@SHARED_LDFLAGS@ $(am__empty)
diff --git a/util/crossgcc/patches/binutils-2.26.1_riscv.patch b/util/crossgcc/patches/binutils-2.26.1_riscv.patch
deleted file mode 100644
index 248ee40..0000000
--- a/util/crossgcc/patches/binutils-2.26.1_riscv.patch
+++ /dev/null
@@ -1,9780 +0,0 @@
-diff -urN empty/bfd/cpu-riscv.c binutils-2.26.1/bfd/cpu-riscv.c
---- empty/bfd/cpu-riscv.c	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/bfd/cpu-riscv.c	2016-04-03 10:33:12.058793036 +0800
-@@ -0,0 +1,76 @@
-+/* BFD backend for RISC-V
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target.
-+
-+   This file is part of BFD, the Binary File Descriptor library.
-+
-+   This program is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3 of the License, or
-+   (at your option) any later version.
-+
-+   This program is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+#include "sysdep.h"
-+#include "bfd.h"
-+#include "libbfd.h"
-+
-+/* This routine is provided two arch_infos and returns an arch_info
-+   that is compatible with both, or NULL if none exists.  */
-+
-+static const bfd_arch_info_type *
-+riscv_compatible (const bfd_arch_info_type *a, const bfd_arch_info_type *b)
-+{
-+  if (a->arch != b->arch)
-+    return NULL;
-+
-+  /* Machine compatibility is checked in
-+     _bfd_riscv_elf_merge_private_bfd_data.  */
-+
-+  return a;
-+}
-+
-+#define N(BITS_WORD, BITS_ADDR, NUMBER, PRINT, DEFAULT, NEXT)		\
-+  {							\
-+    BITS_WORD, /*  bits in a word */			\
-+    BITS_ADDR, /* bits in an address */			\
-+    8,	/* 8 bits in a byte */				\
-+    bfd_arch_riscv,					\
-+    NUMBER,						\
-+    "riscv",						\
-+    PRINT,						\
-+    3,							\
-+    DEFAULT,						\
-+    riscv_compatible,					\
-+    bfd_default_scan,					\
-+    bfd_arch_default_fill,				\
-+    NEXT,						\
-+  }
-+
-+enum
-+{
-+  I_riscv64,
-+  I_riscv32
-+};
-+
-+#define NN(index) (&arch_info_struct[(index) + 1])
-+
-+static const bfd_arch_info_type arch_info_struct[] =
-+{
-+  N (64, 64, bfd_mach_riscv64, "riscv:rv64", FALSE, NN (I_riscv64)),
-+  N (32, 32, bfd_mach_riscv32, "riscv:rv32", FALSE, 0)
-+};
-+
-+/* The default architecture is riscv:rv64.  */
-+
-+const bfd_arch_info_type bfd_riscv_arch =
-+  N (64, 64, 0, "riscv", TRUE, &arch_info_struct[0]);
-diff -urN empty/bfd/elfnn-riscv.c binutils-2.26.1/bfd/elfnn-riscv.c
---- empty/bfd/elfnn-riscv.c	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/bfd/elfnn-riscv.c	2016-04-03 10:33:12.062126369 +0800
-@@ -0,0 +1,3022 @@
-+/* RISC-V-specific support for NN-bit ELF.
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on TILE-Gx and MIPS targets.
-+
-+   This file is part of BFD, the Binary File Descriptor library.
-+
-+   This program is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3 of the License, or
-+   (at your option) any later version.
-+
-+   This program is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+/* This file handles RISC-V ELF targets.  */
-+
-+#include "sysdep.h"
-+#include "bfd.h"
-+#include "libbfd.h"
-+#include "bfdlink.h"
-+#include "genlink.h"
-+#include "elf-bfd.h"
-+#include "elfxx-riscv.h"
-+#include "elf/riscv.h"
-+#include "opcode/riscv.h"
-+
-+#define ARCH_SIZE NN
-+
-+#define MINUS_ONE ((bfd_vma)0 - 1)
-+
-+#define RISCV_ELF_LOG_WORD_BYTES (ARCH_SIZE == 32 ? 2 : 3)
-+
-+#define RISCV_ELF_WORD_BYTES (1 << RISCV_ELF_LOG_WORD_BYTES)
-+
-+/* The name of the dynamic interpreter.  This is put in the .interp
-+   section.  */
-+
-+#define ELF64_DYNAMIC_INTERPRETER "/lib/ld.so.1"
-+#define ELF32_DYNAMIC_INTERPRETER "/lib32/ld.so.1"
-+
-+#define ELF_ARCH			bfd_arch_riscv
-+#define ELF_TARGET_ID			RISCV_ELF_DATA
-+#define ELF_MACHINE_CODE		EM_RISCV
-+#define ELF_MAXPAGESIZE			0x1000
-+#define ELF_COMMONPAGESIZE		0x1000
-+
-+/* The RISC-V linker needs to keep track of the number of relocs that it
-+   decides to copy as dynamic relocs in check_relocs for each symbol.
-+   This is so that it can later discard them if they are found to be
-+   unnecessary.  We store the information in a field extending the
-+   regular ELF linker hash table.  */
-+
-+struct riscv_elf_dyn_relocs
-+{
-+  struct riscv_elf_dyn_relocs *next;
-+
-+  /* The input section of the reloc.  */
-+  asection *sec;
-+
-+  /* Total number of relocs copied for the input section.  */
-+  bfd_size_type count;
-+
-+  /* Number of pc-relative relocs copied for the input section.  */
-+  bfd_size_type pc_count;
-+};
-+
-+/* RISC-V ELF linker hash entry.  */
-+
-+struct riscv_elf_link_hash_entry
-+{
-+  struct elf_link_hash_entry elf;
-+
-+  /* Track dynamic relocs copied for this symbol.  */
-+  struct riscv_elf_dyn_relocs *dyn_relocs;
-+
-+#define GOT_UNKNOWN     0
-+#define GOT_NORMAL      1
-+#define GOT_TLS_GD      2
-+#define GOT_TLS_IE      4
-+#define GOT_TLS_LE      8
-+  char tls_type;
-+};
-+
-+#define riscv_elf_hash_entry(ent) \
-+  ((struct riscv_elf_link_hash_entry *)(ent))
-+
-+struct _bfd_riscv_elf_obj_tdata
-+{
-+  struct elf_obj_tdata root;
-+
-+  /* tls_type for each local got entry.  */
-+  char *local_got_tls_type;
-+};
-+
-+#define _bfd_riscv_elf_tdata(abfd) \
-+  ((struct _bfd_riscv_elf_obj_tdata *) (abfd)->tdata.any)
-+
-+#define _bfd_riscv_elf_local_got_tls_type(abfd) \
-+  (_bfd_riscv_elf_tdata (abfd)->local_got_tls_type)
-+
-+#define _bfd_riscv_elf_tls_type(abfd, h, symndx)		\
-+  (*((h) != NULL ? &riscv_elf_hash_entry (h)->tls_type		\
-+     : &_bfd_riscv_elf_local_got_tls_type (abfd) [symndx]))
-+
-+#define is_riscv_elf(bfd)				\
-+  (bfd_get_flavour (bfd) == bfd_target_elf_flavour	\
-+   && elf_tdata (bfd) != NULL				\
-+   && elf_object_id (bfd) == RISCV_ELF_DATA)
-+
-+#include "elf/common.h"
-+#include "elf/internal.h"
-+
-+struct riscv_elf_link_hash_table
-+{
-+  struct elf_link_hash_table elf;
-+
-+  /* Short-cuts to get to dynamic linker sections.  */
-+  asection *sdynbss;
-+  asection *srelbss;
-+  asection *sdyntdata;
-+
-+  /* Small local sym to section mapping cache.  */
-+  struct sym_cache sym_cache;
-+};
-+
-+
-+/* Get the RISC-V ELF linker hash table from a link_info structure.  */
-+#define riscv_elf_hash_table(p) \
-+  (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
-+  == RISCV_ELF_DATA ? ((struct riscv_elf_link_hash_table *) ((p)->hash)) : NULL)
-+
-+static void
-+riscv_info_to_howto_rela (bfd *abfd ATTRIBUTE_UNUSED,
-+			  arelent *cache_ptr,
-+			  Elf_Internal_Rela *dst)
-+{
-+  cache_ptr->howto = riscv_elf_rtype_to_howto (ELFNN_R_TYPE (dst->r_info));
-+}
-+
-+static void
-+riscv_elf_append_rela (bfd *abfd, asection *s, Elf_Internal_Rela *rel)
-+{
-+  const struct elf_backend_data *bed;
-+  bfd_byte *loc;
-+
-+  bed = get_elf_backend_data (abfd);
-+  loc = s->contents + (s->reloc_count++ * bed->s->sizeof_rela);
-+  bed->s->swap_reloca_out (abfd, rel, loc);
-+}
-+
-+/* PLT/GOT stuff */
-+
-+#define PLT_HEADER_INSNS 8
-+#define PLT_ENTRY_INSNS 4
-+#define PLT_HEADER_SIZE (PLT_HEADER_INSNS * 4)
-+#define PLT_ENTRY_SIZE (PLT_ENTRY_INSNS * 4)
-+
-+#define GOT_ENTRY_SIZE RISCV_ELF_WORD_BYTES
-+
-+#define GOTPLT_HEADER_SIZE (2 * GOT_ENTRY_SIZE)
-+
-+#define sec_addr(sec) ((sec)->output_section->vma + (sec)->output_offset)
-+
-+static bfd_vma
-+riscv_elf_got_plt_val (bfd_vma plt_index, struct bfd_link_info *info)
-+{
-+  return sec_addr (riscv_elf_hash_table (info)->elf.sgotplt)
-+	 + GOTPLT_HEADER_SIZE + (plt_index * GOT_ENTRY_SIZE);
-+}
-+
-+#if ARCH_SIZE == 32
-+# define MATCH_LREG MATCH_LW
-+#else
-+# define MATCH_LREG MATCH_LD
-+#endif
-+
-+/* Generate a PLT header.  */
-+
-+static void
-+riscv_make_plt_header (bfd_vma gotplt_addr, bfd_vma addr, uint32_t *entry)
-+{
-+  bfd_vma gotplt_offset_high = RISCV_PCREL_HIGH_PART (gotplt_addr, addr);
-+  bfd_vma gotplt_offset_low = RISCV_PCREL_LOW_PART (gotplt_addr, addr);
-+
-+  /* auipc  t2, %hi(.got.plt)
-+     sub    t1, t1, t3               # shifted .got.plt offset + hdr size + 12
-+     l[w|d] t3, %lo(.got.plt)(t2)    # _dl_runtime_resolve
-+     addi   t1, t1, -(hdr size + 12) # shifted .got.plt offset
-+     addi   t0, t2, %lo(.got.plt)    # &.got.plt
-+     srli   t1, t1, log2(16/PTRSIZE) # .got.plt offset
-+     l[w|d] t0, PTRSIZE(t0)          # link map
-+     jr     t3 */
-+
-+  entry[0] = RISCV_UTYPE (AUIPC, X_T2, gotplt_offset_high);
-+  entry[1] = RISCV_RTYPE (SUB, X_T1, X_T1, X_T3);
-+  entry[2] = RISCV_ITYPE (LREG, X_T3, X_T2, gotplt_offset_low);
-+  entry[3] = RISCV_ITYPE (ADDI, X_T1, X_T1, -(PLT_HEADER_SIZE + 12));
-+  entry[4] = RISCV_ITYPE (ADDI, X_T0, X_T2, gotplt_offset_low);
-+  entry[5] = RISCV_ITYPE (SRLI, X_T1, X_T1, 4 - RISCV_ELF_LOG_WORD_BYTES);
-+  entry[6] = RISCV_ITYPE (LREG, X_T0, X_T0, RISCV_ELF_WORD_BYTES);
-+  entry[7] = RISCV_ITYPE (JALR, 0, X_T3, 0);
-+}
-+
-+/* Generate a PLT entry.  */
-+
-+static void
-+riscv_make_plt_entry (bfd_vma got, bfd_vma addr, uint32_t *entry)
-+{
-+  /* auipc  t3, %hi(.got.plt entry)
-+     l[w|d] t3, %lo(.got.plt entry)(t3)
-+     jalr   t1, t3
-+     nop */
-+
-+  entry[0] = RISCV_UTYPE (AUIPC, X_T3, RISCV_PCREL_HIGH_PART (got, addr));
-+  entry[1] = RISCV_ITYPE (LREG,  X_T3, X_T3, RISCV_PCREL_LOW_PART(got, addr));
-+  entry[2] = RISCV_ITYPE (JALR, X_T1, X_T3, 0);
-+  entry[3] = RISCV_NOP;
-+}
-+
-+/* Create an entry in an RISC-V ELF linker hash table.  */
-+
-+static struct bfd_hash_entry *
-+link_hash_newfunc (struct bfd_hash_entry *entry,
-+		   struct bfd_hash_table *table, const char *string)
-+{
-+  /* Allocate the structure if it has not already been allocated by a
-+     subclass.  */
-+  if (entry == NULL)
-+    {
-+      entry =
-+	bfd_hash_allocate (table,
-+			   sizeof (struct riscv_elf_link_hash_entry));
-+      if (entry == NULL)
-+	return entry;
-+    }
-+
-+  /* Call the allocation method of the superclass.  */
-+  entry = _bfd_elf_link_hash_newfunc (entry, table, string);
-+  if (entry != NULL)
-+    {
-+      struct riscv_elf_link_hash_entry *eh;
-+
-+      eh = (struct riscv_elf_link_hash_entry *) entry;
-+      eh->dyn_relocs = NULL;
-+      eh->tls_type = GOT_UNKNOWN;
-+    }
-+
-+  return entry;
-+}
-+
-+/* Create a RISC-V ELF linker hash table.  */
-+
-+static struct bfd_link_hash_table *
-+riscv_elf_link_hash_table_create (bfd *abfd)
-+{
-+  struct riscv_elf_link_hash_table *ret;
-+  bfd_size_type amt = sizeof (struct riscv_elf_link_hash_table);
-+
-+  ret = (struct riscv_elf_link_hash_table *) bfd_zmalloc (amt);
-+  if (ret == NULL)
-+    return NULL;
-+
-+  if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, link_hash_newfunc,
-+				      sizeof (struct riscv_elf_link_hash_entry),
-+				      RISCV_ELF_DATA))
-+    {
-+      free (ret);
-+      return NULL;
-+    }
-+
-+  return &ret->elf.root;
-+}
-+
-+/* Create the .got section.  */
-+
-+static bfd_boolean
-+riscv_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
-+{
-+  flagword flags;
-+  asection *s, *s_got;
-+  struct elf_link_hash_entry *h;
-+  const struct elf_backend_data *bed = get_elf_backend_data (abfd);
-+  struct elf_link_hash_table *htab = elf_hash_table (info);
-+
-+  /* This function may be called more than once.  */
-+  s = bfd_get_linker_section (abfd, ".got");
-+  if (s != NULL)
-+    return TRUE;
-+
-+  flags = bed->dynamic_sec_flags;
-+
-+  s = bfd_make_section_anyway_with_flags (abfd,
-+					  (bed->rela_plts_and_copies_p
-+					   ? ".rela.got" : ".rel.got"),
-+					  (bed->dynamic_sec_flags
-+					   | SEC_READONLY));
-+  if (s == NULL
-+      || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
-+    return FALSE;
-+  htab->srelgot = s;
-+
-+  s = s_got = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
-+  if (s == NULL
-+      || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
-+    return FALSE;
-+  htab->sgot = s;
-+
-+  /* The first bit of the global offset table is the header.  */
-+  s->size += bed->got_header_size;
-+
-+  if (bed->want_got_plt)
-+    {
-+      s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
-+      if (s == NULL
-+	  || !bfd_set_section_alignment (abfd, s,
-+					 bed->s->log_file_align))
-+	return FALSE;
-+      htab->sgotplt = s;
-+
-+      /* Reserve room for the header.  */
-+      s->size += GOTPLT_HEADER_SIZE;
-+    }
-+
-+  if (bed->want_got_sym)
-+    {
-+      /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
-+	 section.  We don't do this in the linker script because we don't want
-+	 to define the symbol if we are not creating a global offset
-+	 table.  */
-+      h = _bfd_elf_define_linkage_sym (abfd, info, s_got,
-+				       "_GLOBAL_OFFSET_TABLE_");
-+      elf_hash_table (info)->hgot = h;
-+      if (h == NULL)
-+	return FALSE;
-+    }
-+
-+  return TRUE;
-+}
-+
-+/* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
-+   .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
-+   hash table.  */
-+
-+static bfd_boolean
-+riscv_elf_create_dynamic_sections (bfd *dynobj,
-+				   struct bfd_link_info *info)
-+{
-+  struct riscv_elf_link_hash_table *htab;
-+
-+  htab = riscv_elf_hash_table (info);
-+  BFD_ASSERT (htab != NULL);
-+
-+  if (!riscv_elf_create_got_section (dynobj, info))
-+    return FALSE;
-+
-+  if (!_bfd_elf_create_dynamic_sections (dynobj, info))
-+    return FALSE;
-+
-+  htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
-+  if (!bfd_link_pic (info))
-+    {
-+      htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
-+      htab->sdyntdata =
-+	bfd_make_section_anyway_with_flags (dynobj, ".tdata.dyn",
-+					    SEC_ALLOC | SEC_THREAD_LOCAL);
-+    }
-+
-+  if (!htab->elf.splt || !htab->elf.srelplt || !htab->sdynbss
-+      || (!bfd_link_pic (info) && (!htab->srelbss || !htab->sdyntdata)))
-+    abort ();
-+
-+  return TRUE;
-+}
-+
-+/* Copy the extra info we tack onto an elf_link_hash_entry.  */
-+
-+static void
-+riscv_elf_copy_indirect_symbol (struct bfd_link_info *info,
-+				struct elf_link_hash_entry *dir,
-+				struct elf_link_hash_entry *ind)
-+{
-+  struct riscv_elf_link_hash_entry *edir, *eind;
-+
-+  edir = (struct riscv_elf_link_hash_entry *) dir;
-+  eind = (struct riscv_elf_link_hash_entry *) ind;
-+
-+  if (eind->dyn_relocs != NULL)
-+    {
-+      if (edir->dyn_relocs != NULL)
-+	{
-+	  struct riscv_elf_dyn_relocs **pp;
-+	  struct riscv_elf_dyn_relocs *p;
-+
-+	  /* Add reloc counts against the indirect sym to the direct sym
-+	     list.  Merge any entries against the same section.  */
-+	  for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
-+	    {
-+	      struct riscv_elf_dyn_relocs *q;
-+
-+	      for (q = edir->dyn_relocs; q != NULL; q = q->next)
-+		if (q->sec == p->sec)
-+		  {
-+		    q->pc_count += p->pc_count;
-+		    q->count += p->count;
-+		    *pp = p->next;
-+		    break;
-+		  }
-+	      if (q == NULL)
-+		pp = &p->next;
-+	    }
-+	  *pp = edir->dyn_relocs;
-+	}
-+
-+      edir->dyn_relocs = eind->dyn_relocs;
-+      eind->dyn_relocs = NULL;
-+    }
-+
-+  if (ind->root.type == bfd_link_hash_indirect
-+      && dir->got.refcount <= 0)
-+    {
-+      edir->tls_type = eind->tls_type;
-+      eind->tls_type = GOT_UNKNOWN;
-+    }
-+  _bfd_elf_link_hash_copy_indirect (info, dir, ind);
-+}
-+
-+static bfd_boolean
-+riscv_elf_record_tls_type (bfd *abfd, struct elf_link_hash_entry *h,
-+			   unsigned long symndx, char tls_type)
-+{
-+  char *new_tls_type = &_bfd_riscv_elf_tls_type (abfd, h, symndx);
-+  *new_tls_type |= tls_type;
-+  if ((*new_tls_type & GOT_NORMAL) && (*new_tls_type & ~GOT_NORMAL))
-+    {
-+      (*_bfd_error_handler)
-+	(_("%B: `%s' accessed both as normal and thread local symbol"),
-+	 abfd, h ? h->root.root.string : "<local>");
-+      return FALSE;
-+    }
-+  return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_elf_record_got_reference (bfd *abfd, struct bfd_link_info *info,
-+				struct elf_link_hash_entry *h, long symndx)
-+{
-+  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
-+
-+  if (htab->elf.sgot == NULL)
-+    {
-+      if (!riscv_elf_create_got_section (htab->elf.dynobj, info))
-+	return FALSE;
-+    }
-+
-+  if (h != NULL)
-+    {
-+      h->got.refcount += 1;
-+      return TRUE;
-+    }
-+
-+  /* This is a global offset table entry for a local symbol.  */
-+  if (elf_local_got_refcounts (abfd) == NULL)
-+    {
-+      bfd_size_type size = symtab_hdr->sh_info * (sizeof (bfd_vma) + 1);
-+      if (!(elf_local_got_refcounts (abfd) = bfd_zalloc (abfd, size)))
-+	return FALSE;
-+      _bfd_riscv_elf_local_got_tls_type (abfd)
-+	= (char *) (elf_local_got_refcounts (abfd) + symtab_hdr->sh_info);
-+    }
-+  elf_local_got_refcounts (abfd) [symndx] += 1;
-+
-+  return TRUE;
-+}
-+
-+static bfd_boolean
-+bad_static_reloc (bfd *abfd, unsigned r_type, struct elf_link_hash_entry *h)
-+{
-+  (*_bfd_error_handler)
-+    (_("%B: relocation %s against `%s' can not be used when making a shared "
-+       "object; recompile with -fPIC"),
-+      abfd, riscv_elf_rtype_to_howto (r_type)->name,
-+      h != NULL ? h->root.root.string : "a local symbol");
-+  bfd_set_error (bfd_error_bad_value);
-+  return FALSE;
-+}
-+/* Look through the relocs for a section during the first phase, and
-+   allocate space in the global offset table or procedure linkage
-+   table.  */
-+
-+static bfd_boolean
-+riscv_elf_check_relocs (bfd *abfd, struct bfd_link_info *info,
-+			asection *sec, const Elf_Internal_Rela *relocs)
-+{
-+  struct riscv_elf_link_hash_table *htab;
-+  Elf_Internal_Shdr *symtab_hdr;
-+  struct elf_link_hash_entry **sym_hashes;
-+  const Elf_Internal_Rela *rel;
-+  asection *sreloc = NULL;
-+
-+  if (bfd_link_relocatable (info))
-+    return TRUE;
-+
-+  htab = riscv_elf_hash_table (info);
-+  symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
-+  sym_hashes = elf_sym_hashes (abfd);
-+
-+  if (htab->elf.dynobj == NULL)
-+    htab->elf.dynobj = abfd;
-+
-+  for (rel = relocs; rel < relocs + sec->reloc_count; rel++)
-+    {
-+      unsigned int r_type;
-+      unsigned long r_symndx;
-+      struct elf_link_hash_entry *h;
-+
-+      r_symndx = ELFNN_R_SYM (rel->r_info);
-+      r_type = ELFNN_R_TYPE (rel->r_info);
-+
-+      if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
-+	{
-+	  (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
-+				 abfd, r_symndx);
-+	  return FALSE;
-+	}
-+
-+      if (r_symndx < symtab_hdr->sh_info)
-+	h = NULL;
-+      else
-+	{
-+	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
-+	  while (h->root.type == bfd_link_hash_indirect
-+		 || h->root.type == bfd_link_hash_warning)
-+	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
-+
-+	  /* PR15323, ref flags aren't set for references in the same
-+	     object.  */
-+	  h->root.non_ir_ref = 1;
-+	}
-+
-+      switch (r_type)
-+	{
-+	case R_RISCV_TLS_GD_HI20:
-+	  if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
-+	      || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_GD))
-+	    return FALSE;
-+	  break;
-+
-+	case R_RISCV_TLS_GOT_HI20:
-+	  if (bfd_link_pic (info))
-+	    info->flags |= DF_STATIC_TLS;
-+	  if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
-+	      || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_IE))
-+	    return FALSE;
-+	  break;
-+
-+	case R_RISCV_GOT_HI20:
-+	  if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
-+	      || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_NORMAL))
-+	    return FALSE;
-+	  break;
-+
-+	case R_RISCV_CALL_PLT:
-+	  /* This symbol requires a procedure linkage table entry.  We
-+	     actually build the entry in adjust_dynamic_symbol,
-+	     because this might be a case of linking PIC code without
-+	     linking in any dynamic objects, in which case we don't
-+	     need to generate a procedure linkage table after all.  */
-+
-+	  if (h != NULL)
-+	    {
-+	      h->needs_plt = 1;
-+	      h->plt.refcount += 1;
-+	    }
-+	  break;
-+
-+	case R_RISCV_CALL:
-+	case R_RISCV_JAL:
-+	case R_RISCV_BRANCH:
-+	case R_RISCV_RVC_BRANCH:
-+	case R_RISCV_RVC_JUMP:
-+	case R_RISCV_PCREL_HI20:
-+	  /* In shared libraries, these relocs are known to bind locally.  */
-+	  if (bfd_link_pic (info))
-+	    break;
-+	  goto static_reloc;
-+
-+	case R_RISCV_TPREL_HI20:
-+	  if (!bfd_link_executable (info))
-+	    return bad_static_reloc (abfd, r_type, h);
-+	  if (h != NULL)
-+	    riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_LE);
-+	  goto static_reloc;
-+
-+	case R_RISCV_HI20:
-+	  if (bfd_link_pic (info))
-+	    return bad_static_reloc (abfd, r_type, h);
-+	  /* Fall through.  */
-+
-+	case R_RISCV_COPY:
-+	case R_RISCV_JUMP_SLOT:
-+	case R_RISCV_RELATIVE:
-+	case R_RISCV_64:
-+	case R_RISCV_32:
-+	  /* Fall through.  */
-+
-+	static_reloc:
-+	  /* This reloc might not bind locally.  */
-+	  if (h != NULL)
-+	    h->non_got_ref = 1;
-+
-+	  if (h != NULL && !bfd_link_pic (info))
-+	    {
-+	      /* We may need a .plt entry if the function this reloc
-+		 refers to is in a shared lib.  */
-+	      h->plt.refcount += 1;
-+	    }
-+
-+	  /* If we are creating a shared library, and this is a reloc
-+	     against a global symbol, or a non PC relative reloc
-+	     against a local symbol, then we need to copy the reloc
-+	     into the shared library.  However, if we are linking with
-+	     -Bsymbolic, we do not need to copy a reloc against a
-+	     global symbol which is defined in an object we are
-+	     including in the link (i.e., DEF_REGULAR is set).  At
-+	     this point we have not seen all the input files, so it is
-+	     possible that DEF_REGULAR is not set now but will be set
-+	     later (it is never cleared).  In case of a weak definition,
-+	     DEF_REGULAR may be cleared later by a strong definition in
-+	     a shared library.  We account for that possibility below by
-+	     storing information in the relocs_copied field of the hash
-+	     table entry.  A similar situation occurs when creating
-+	     shared libraries and symbol visibility changes render the
-+	     symbol local.
-+
-+	     If on the other hand, we are creating an executable, we
-+	     may need to keep relocations for symbols satisfied by a
-+	     dynamic library if we manage to avoid copy relocs for the
-+	     symbol.  */
-+	  if ((bfd_link_pic (info)
-+	       && (sec->flags & SEC_ALLOC) != 0
-+	       && (! riscv_elf_rtype_to_howto (r_type)->pc_relative
-+		   || (h != NULL
-+		       && (! info->symbolic
-+			   || h->root.type == bfd_link_hash_defweak
-+			   || !h->def_regular))))
-+	      || (!bfd_link_pic (info)
-+		  && (sec->flags & SEC_ALLOC) != 0
-+		  && h != NULL
-+		  && (h->root.type == bfd_link_hash_defweak
-+		      || !h->def_regular)))
-+	    {
-+	      struct riscv_elf_dyn_relocs *p;
-+	      struct riscv_elf_dyn_relocs **head;
-+
-+	      /* When creating a shared object, we must copy these
-+		 relocs into the output file.  We create a reloc
-+		 section in dynobj and make room for the reloc.  */
-+	      if (sreloc == NULL)
-+		{
-+		  sreloc = _bfd_elf_make_dynamic_reloc_section
-+		    (sec, htab->elf.dynobj, RISCV_ELF_LOG_WORD_BYTES,
-+		    abfd, /*rela?*/ TRUE);
-+
-+		  if (sreloc == NULL)
-+		    return FALSE;
-+		}
-+
-+	      /* If this is a global symbol, we count the number of
-+		 relocations we need for this symbol.  */
-+	      if (h != NULL)
-+		head = &((struct riscv_elf_link_hash_entry *) h)->dyn_relocs;
-+	      else
-+		{
-+		  /* Track dynamic relocs needed for local syms too.
-+		     We really need local syms available to do this
-+		     easily.  Oh well.  */
-+
-+		  asection *s;
-+		  void *vpp;
-+		  Elf_Internal_Sym *isym;
-+
-+		  isym = bfd_sym_from_r_symndx (&htab->sym_cache,
-+						abfd, r_symndx);
-+		  if (isym == NULL)
-+		    return FALSE;
-+
-+		  s = bfd_section_from_elf_index (abfd, isym->st_shndx);
-+		  if (s == NULL)
-+		    s = sec;
-+
-+		  vpp = &elf_section_data (s)->local_dynrel;
-+		  head = (struct riscv_elf_dyn_relocs **) vpp;
-+		}
-+
-+	      p = *head;
-+	      if (p == NULL || p->sec != sec)
-+		{
-+		  bfd_size_type amt = sizeof *p;
-+		  p = ((struct riscv_elf_dyn_relocs *)
-+		       bfd_alloc (htab->elf.dynobj, amt));
-+		  if (p == NULL)
-+		    return FALSE;
-+		  p->next = *head;
-+		  *head = p;
-+		  p->sec = sec;
-+		  p->count = 0;
-+		  p->pc_count = 0;
-+		}
-+
-+	      p->count += 1;
-+	      p->pc_count += riscv_elf_rtype_to_howto (r_type)->pc_relative;
-+	    }
-+
-+	  break;
-+
-+	case R_RISCV_GNU_VTINHERIT:
-+	  if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
-+	    return FALSE;
-+	  break;
-+
-+	case R_RISCV_GNU_VTENTRY:
-+	  if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
-+	    return FALSE;
-+	  break;
-+
-+	default:
-+	  break;
-+	}
-+    }
-+
-+  return TRUE;
-+}
-+
-+static asection *
-+riscv_elf_gc_mark_hook (asection *sec,
-+			struct bfd_link_info *info,
-+			Elf_Internal_Rela *rel,
-+			struct elf_link_hash_entry *h,
-+			Elf_Internal_Sym *sym)
-+{
-+  if (h != NULL)
-+    switch (ELFNN_R_TYPE (rel->r_info))
-+      {
-+      case R_RISCV_GNU_VTINHERIT:
-+      case R_RISCV_GNU_VTENTRY:
-+	return NULL;
-+      }
-+
-+  return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
-+}
-+
-+/* Update the got entry reference counts for the section being removed.  */
-+static bfd_boolean
-+riscv_elf_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
-+			 asection *sec, const Elf_Internal_Rela *relocs)
-+{
-+  const Elf_Internal_Rela *rel, *relend;
-+  Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (abfd);
-+  struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (abfd);
-+  bfd_signed_vma *local_got_refcounts = elf_local_got_refcounts (abfd);
-+
-+  if (bfd_link_relocatable (info))
-+    return TRUE;
-+
-+  elf_section_data (sec)->local_dynrel = NULL;
-+
-+  for (rel = relocs, relend = relocs + sec->reloc_count; rel < relend; rel++)
-+    {
-+      unsigned long r_symndx;
-+      struct elf_link_hash_entry *h = NULL;
-+
-+      r_symndx = ELFNN_R_SYM (rel->r_info);
-+      if (r_symndx >= symtab_hdr->sh_info)
-+	{
-+	  struct riscv_elf_link_hash_entry *eh;
-+	  struct riscv_elf_dyn_relocs **pp;
-+	  struct riscv_elf_dyn_relocs *p;
-+
-+	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
-+	  while (h->root.type == bfd_link_hash_indirect
-+		 || h->root.type == bfd_link_hash_warning)
-+	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
-+	  eh = (struct riscv_elf_link_hash_entry *) h;
-+	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
-+	    if (p->sec == sec)
-+	      {
-+		/* Everything must go for SEC.  */
-+		*pp = p->next;
-+		break;
-+	      }
-+	}
-+
-+      switch (ELFNN_R_TYPE (rel->r_info))
-+	{
-+	case R_RISCV_GOT_HI20:
-+	case R_RISCV_TLS_GOT_HI20:
-+	case R_RISCV_TLS_GD_HI20:
-+	  if (h != NULL)
-+	    {
-+	      if (h->got.refcount > 0)
-+		h->got.refcount--;
-+	    }
-+	  else
-+	    {
-+	      if (local_got_refcounts &&
-+		  local_got_refcounts[r_symndx] > 0)
-+		local_got_refcounts[r_symndx]--;
-+	    }
-+	  break;
-+
-+	case R_RISCV_HI20:
-+	case R_RISCV_PCREL_HI20:
-+	case R_RISCV_COPY:
-+	case R_RISCV_JUMP_SLOT:
-+	case R_RISCV_RELATIVE:
-+	case R_RISCV_64:
-+	case R_RISCV_32:
-+	case R_RISCV_BRANCH:
-+	case R_RISCV_CALL:
-+	case R_RISCV_JAL:
-+	case R_RISCV_RVC_BRANCH:
-+	case R_RISCV_RVC_JUMP:
-+	  if (bfd_link_pic (info))
-+	    break;
-+	  /* Fall through.  */
-+
-+	case R_RISCV_CALL_PLT:
-+	  if (h != NULL)
-+	    {
-+	      if (h->plt.refcount > 0)
-+		h->plt.refcount--;
-+	    }
-+	  break;
-+
-+	default:
-+	  break;
-+	}
-+    }
-+
-+  return TRUE;
-+}
-+
-+/* Adjust a symbol defined by a dynamic object and referenced by a
-+   regular object.  The current definition is in some section of the
-+   dynamic object, but we're not including those sections.  We have to
-+   change the definition to something the rest of the link can
-+   understand.  */
-+
-+static bfd_boolean
-+riscv_elf_adjust_dynamic_symbol (struct bfd_link_info *info,
-+				 struct elf_link_hash_entry *h)
-+{
-+  struct riscv_elf_link_hash_table *htab;
-+  struct riscv_elf_link_hash_entry * eh;
-+  struct riscv_elf_dyn_relocs *p;
-+  bfd *dynobj;
-+  asection *s;
-+
-+  htab = riscv_elf_hash_table (info);
-+  BFD_ASSERT (htab != NULL);
-+
-+  dynobj = htab->elf.dynobj;
-+
-+  /* Make sure we know what is going on here.  */
-+  BFD_ASSERT (dynobj != NULL
-+	      && (h->needs_plt
-+		  || h->type == STT_GNU_IFUNC
-+		  || h->u.weakdef != NULL
-+		  || (h->def_dynamic
-+		      && h->ref_regular
-+		      && !h->def_regular)));
-+
-+  /* If this is a function, put it in the procedure linkage table.  We
-+     will fill in the contents of the procedure linkage table later
-+     (although we could actually do it here).  */
-+  if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
-+    {
-+      if (h->plt.refcount <= 0
-+	  || SYMBOL_CALLS_LOCAL (info, h)
-+	  || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
-+	      && h->root.type == bfd_link_hash_undefweak))
-+	{
-+	  /* This case can occur if we saw a R_RISCV_CALL_PLT reloc in an
-+	     input file, but the symbol was never referred to by a dynamic
-+	     object, or if all references were garbage collected.  In such
-+	     a case, we don't actually need to build a PLT entry.  */
-+	  h->plt.offset = (bfd_vma) -1;
-+	  h->needs_plt = 0;
-+	}
-+
-+      return TRUE;
-+    }
-+  else
-+    h->plt.offset = (bfd_vma) -1;
-+
-+  /* If this is a weak symbol, and there is a real definition, the
-+     processor independent code will have arranged for us to see the
-+     real definition first, and we can just use the same value.  */
-+  if (h->u.weakdef != NULL)
-+    {
-+      BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
-+		  || h->u.weakdef->root.type == bfd_link_hash_defweak);
-+      h->root.u.def.section = h->u.weakdef->root.u.def.section;
-+      h->root.u.def.value = h->u.weakdef->root.u.def.value;
-+      return TRUE;
-+    }
-+
-+  /* This is a reference to a symbol defined by a dynamic object which
-+     is not a function.  */
-+
-+  /* If we are creating a shared library, we must presume that the
-+     only references to the symbol are via the global offset table.
-+     For such cases we need not do anything here; the relocations will
-+     be handled correctly by relocate_section.  */
-+  if (bfd_link_pic (info))
-+    return TRUE;
-+
-+  /* If there are no references to this symbol that do not use the
-+     GOT, we don't need to generate a copy reloc.  */
-+  if (!h->non_got_ref)
-+    return TRUE;
-+
-+  /* If -z nocopyreloc was given, we won't generate them either.  */
-+  if (info->nocopyreloc)
-+    {
-+      h->non_got_ref = 0;
-+      return TRUE;
-+    }
-+
-+  eh = (struct riscv_elf_link_hash_entry *) h;
-+  for (p = eh->dyn_relocs; p != NULL; p = p->next)
-+    {
-+      s = p->sec->output_section;
-+      if (s != NULL && (s->flags & SEC_READONLY) != 0)
-+	break;
-+    }
-+
-+  /* If we didn't find any dynamic relocs in read-only sections, then
-+     we'll be keeping the dynamic relocs and avoiding the copy reloc.  */
-+  if (p == NULL)
-+    {
-+      h->non_got_ref = 0;
-+      return TRUE;
-+    }
-+
-+  /* We must allocate the symbol in our .dynbss section, which will
-+     become part of the .bss section of the executable.  There will be
-+     an entry for this symbol in the .dynsym section.  The dynamic
-+     object will contain position independent code, so all references
-+     from the dynamic object to this symbol will go through the global
-+     offset table.  The dynamic linker will use the .dynsym entry to
-+     determine the address it must put in the global offset table, so
-+     both the dynamic object and the regular object will refer to the
-+     same memory location for the variable.  */
-+
-+  /* We must generate a R_RISCV_COPY reloc to tell the dynamic linker
-+     to copy the initial value out of the dynamic object and into the
-+     runtime process image.  We need to remember the offset into the
-+     .rel.bss section we are going to use.  */
-+  if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
-+    {
-+      htab->srelbss->size += sizeof (ElfNN_External_Rela);
-+      h->needs_copy = 1;
-+    }
-+
-+  if (eh->tls_type & ~GOT_NORMAL)
-+    return _bfd_elf_adjust_dynamic_copy (info, h, htab->sdyntdata);
-+
-+  return _bfd_elf_adjust_dynamic_copy (info, h, htab->sdynbss);
-+}
-+
-+/* Allocate space in .plt, .got and associated reloc sections for
-+   dynamic relocs.  */
-+
-+static bfd_boolean
-+allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
-+{
-+  struct bfd_link_info *info;
-+  struct riscv_elf_link_hash_table *htab;
-+  struct riscv_elf_link_hash_entry *eh;
-+  struct riscv_elf_dyn_relocs *p;
-+
-+  if (h->root.type == bfd_link_hash_indirect)
-+    return TRUE;
-+
-+  info = (struct bfd_link_info *) inf;
-+  htab = riscv_elf_hash_table (info);
-+  BFD_ASSERT (htab != NULL);
-+
-+  if (htab->elf.dynamic_sections_created
-+      && h->plt.refcount > 0)
-+    {
-+      /* Make sure this symbol is output as a dynamic symbol.
-+	 Undefined weak syms won't yet be marked as dynamic.  */
-+      if (h->dynindx == -1
-+	  && !h->forced_local)
-+	{
-+	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
-+	    return FALSE;
-+	}
-+
-+      if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, bfd_link_pic (info), h))
-+	{
-+	  asection *s = htab->elf.splt;
-+
-+	  if (s->size == 0)
-+	    s->size = PLT_HEADER_SIZE;
-+
-+	  h->plt.offset = s->size;
-+
-+	  /* Make room for this entry.  */
-+	  s->size += PLT_ENTRY_SIZE;
-+
-+	  /* We also need to make an entry in the .got.plt section.  */
-+	  htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
-+
-+	  /* We also need to make an entry in the .rela.plt section.  */
-+	  htab->elf.srelplt->size += sizeof (ElfNN_External_Rela);
-+
-+	  /* If this symbol is not defined in a regular file, and we are
-+	     not generating a shared library, then set the symbol to this
-+	     location in the .plt.  This is required to make function
-+	     pointers compare as equal between the normal executable and
-+	     the shared library.  */
-+	  if (! bfd_link_pic (info)
-+	      && !h->def_regular)
-+	    {
-+	      h->root.u.def.section = s;
-+	      h->root.u.def.value = h->plt.offset;
-+	    }
-+	}
-+      else
-+	{
-+	  h->plt.offset = (bfd_vma) -1;
-+	  h->needs_plt = 0;
-+	}
-+    }
-+  else
-+    {
-+      h->plt.offset = (bfd_vma) -1;
-+      h->needs_plt = 0;
-+    }
-+
-+  if (h->got.refcount > 0)
-+    {
-+      asection *s;
-+      bfd_boolean dyn;
-+      int tls_type = riscv_elf_hash_entry (h)->tls_type;
-+
-+      /* Make sure this symbol is output as a dynamic symbol.
-+	 Undefined weak syms won't yet be marked as dynamic.  */
-+      if (h->dynindx == -1
-+	  && !h->forced_local)
-+	{
-+	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
-+	    return FALSE;
-+	}
-+
-+      s = htab->elf.sgot;
-+      h->got.offset = s->size;
-+      dyn = htab->elf.dynamic_sections_created;
-+      if (tls_type & (GOT_TLS_GD | GOT_TLS_IE))
-+	{
-+	  /* TLS_GD needs two dynamic relocs and two GOT slots.  */
-+	  if (tls_type & GOT_TLS_GD)
-+	    {
-+	      s->size += 2 * RISCV_ELF_WORD_BYTES;
-+	      htab->elf.srelgot->size += 2 * sizeof (ElfNN_External_Rela);
-+	    }
-+
-+	  /* TLS_IE needs one dynamic reloc and one GOT slot.  */
-+	  if (tls_type & GOT_TLS_IE)
-+	    {
-+	      s->size += RISCV_ELF_WORD_BYTES;
-+	      htab->elf.srelgot->size += sizeof (ElfNN_External_Rela);
-+	    }
-+	}
-+      else
-+	{
-+	  s->size += RISCV_ELF_WORD_BYTES;
-+	  if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h))
-+	    htab->elf.srelgot->size += sizeof (ElfNN_External_Rela);
-+	}
-+    }
-+  else
-+    h->got.offset = (bfd_vma) -1;
-+
-+  eh = (struct riscv_elf_link_hash_entry *) h;
-+  if (eh->dyn_relocs == NULL)
-+    return TRUE;
-+
-+  /* In the shared -Bsymbolic case, discard space allocated for
-+     dynamic pc-relative relocs against symbols which turn out to be
-+     defined in regular objects.  For the normal shared case, discard
-+     space for pc-relative relocs that have become local due to symbol
-+     visibility changes.  */
-+
-+  if (bfd_link_pic (info))
-+    {
-+      if (SYMBOL_CALLS_LOCAL (info, h))
-+	{
-+	  struct riscv_elf_dyn_relocs **pp;
-+
-+	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
-+	    {
-+	      p->count -= p->pc_count;
-+	      p->pc_count = 0;
-+	      if (p->count == 0)
-+		*pp = p->next;
-+	      else
-+		pp = &p->next;
-+	    }
-+	}
-+
-+      /* Also discard relocs on undefined weak syms with non-default
-+	 visibility.  */
-+      if (eh->dyn_relocs != NULL
-+	  && h->root.type == bfd_link_hash_undefweak)
-+	{
-+	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
-+	    eh->dyn_relocs = NULL;
-+
-+	  /* Make sure undefined weak symbols are output as a dynamic
-+	     symbol in PIEs.  */
-+	  else if (h->dynindx == -1
-+		   && !h->forced_local)
-+	    {
-+	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
-+		return FALSE;
-+	    }
-+	}
-+    }
-+  else
-+    {
-+      /* For the non-shared case, discard space for relocs against
-+	 symbols which turn out to need copy relocs or are not
-+	 dynamic.  */
-+
-+      if (!h->non_got_ref
-+	  && ((h->def_dynamic
-+	       && !h->def_regular)
-+	      || (htab->elf.dynamic_sections_created
-+		  && (h->root.type == bfd_link_hash_undefweak
-+		      || h->root.type == bfd_link_hash_undefined))))
-+	{
-+	  /* Make sure this symbol is output as a dynamic symbol.
-+	     Undefined weak syms won't yet be marked as dynamic.  */
-+	  if (h->dynindx == -1
-+	      && !h->forced_local)
-+	    {
-+	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
-+		return FALSE;
-+	    }
-+
-+	  /* If that succeeded, we know we'll be keeping all the
-+	     relocs.  */
-+	  if (h->dynindx != -1)
-+	    goto keep;
-+	}
-+
-+      eh->dyn_relocs = NULL;
-+
-+    keep: ;
-+    }
-+
-+  /* Finally, allocate space.  */
-+  for (p = eh->dyn_relocs; p != NULL; p = p->next)
-+    {
-+      asection *sreloc = elf_section_data (p->sec)->sreloc;
-+      sreloc->size += p->count * sizeof (ElfNN_External_Rela);
-+    }
-+
-+  return TRUE;
-+}
-+
-+/* Find any dynamic relocs that apply to read-only sections.  */
-+
-+static bfd_boolean
-+readonly_dynrelocs (struct elf_link_hash_entry *h, void *inf)
-+{
-+  struct riscv_elf_link_hash_entry *eh;
-+  struct riscv_elf_dyn_relocs *p;
-+
-+  eh = (struct riscv_elf_link_hash_entry *) h;
-+  for (p = eh->dyn_relocs; p != NULL; p = p->next)
-+    {
-+      asection *s = p->sec->output_section;
-+
-+      if (s != NULL && (s->flags & SEC_READONLY) != 0)
-+	{
-+	  ((struct bfd_link_info *) inf)->flags |= DF_TEXTREL;
-+	  return FALSE;
-+	}
-+    }
-+  return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_elf_size_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info)
-+{
-+  struct riscv_elf_link_hash_table *htab;
-+  bfd *dynobj;
-+  asection *s;
-+  bfd *ibfd;
-+
-+  htab = riscv_elf_hash_table (info);
-+  BFD_ASSERT (htab != NULL);
-+  dynobj = htab->elf.dynobj;
-+  BFD_ASSERT (dynobj != NULL);
-+
-+  if (elf_hash_table (info)->dynamic_sections_created)
-+    {
-+      /* Set the contents of the .interp section to the interpreter.  */
-+      if (bfd_link_executable (info) && !info->nointerp)
-+	{
-+	  s = bfd_get_linker_section (dynobj, ".interp");
-+	  BFD_ASSERT (s != NULL);
-+	  s->size = strlen (ELFNN_DYNAMIC_INTERPRETER) + 1;
-+	  s->contents = (unsigned char *) ELFNN_DYNAMIC_INTERPRETER;
-+	}
-+    }
-+
-+  /* Set up .got offsets for local syms, and space for local dynamic
-+     relocs.  */
-+  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
-+    {
-+      bfd_signed_vma *local_got;
-+      bfd_signed_vma *end_local_got;
-+      char *local_tls_type;
-+      bfd_size_type locsymcount;
-+      Elf_Internal_Shdr *symtab_hdr;
-+      asection *srel;
-+
-+      if (! is_riscv_elf (ibfd))
-+	continue;
-+
-+      for (s = ibfd->sections; s != NULL; s = s->next)
-+	{
-+	  struct riscv_elf_dyn_relocs *p;
-+
-+	  for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
-+	    {
-+	      if (!bfd_is_abs_section (p->sec)
-+		  && bfd_is_abs_section (p->sec->output_section))
-+		{
-+		  /* Input section has been discarded, either because
-+		     it is a copy of a linkonce section or due to
-+		     linker script /DISCARD/, so we'll be discarding
-+		     the relocs too.  */
-+		}
-+	      else if (p->count != 0)
-+		{
-+		  srel = elf_section_data (p->sec)->sreloc;
-+		  srel->size += p->count * sizeof (ElfNN_External_Rela);
-+		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
-+		    info->flags |= DF_TEXTREL;
-+		}
-+	    }
-+	}
-+
-+      local_got = elf_local_got_refcounts (ibfd);
-+      if (!local_got)
-+	continue;
-+
-+      symtab_hdr = &elf_symtab_hdr (ibfd);
-+      locsymcount = symtab_hdr->sh_info;
-+      end_local_got = local_got + locsymcount;
-+      local_tls_type = _bfd_riscv_elf_local_got_tls_type (ibfd);
-+      s = htab->elf.sgot;
-+      srel = htab->elf.srelgot;
-+      for (; local_got < end_local_got; ++local_got, ++local_tls_type)
-+	{
-+	  if (*local_got > 0)
-+	    {
-+	      *local_got = s->size;
-+	      s->size += RISCV_ELF_WORD_BYTES;
-+	      if (*local_tls_type & GOT_TLS_GD)
-+		s->size += RISCV_ELF_WORD_BYTES;
-+	      if (bfd_link_pic (info)
-+		  || (*local_tls_type & (GOT_TLS_GD | GOT_TLS_IE)))
-+		srel->size += sizeof (ElfNN_External_Rela);
-+	    }
-+	  else
-+	    *local_got = (bfd_vma) -1;
-+	}
-+    }
-+
-+  /* Allocate global sym .plt and .got entries, and space for global
-+     sym dynamic relocs.  */
-+  elf_link_hash_traverse (&htab->elf, allocate_dynrelocs, info);
-+
-+  if (htab->elf.sgotplt)
-+    {
-+      struct elf_link_hash_entry *got;
-+      got = elf_link_hash_lookup (elf_hash_table (info),
-+				  "_GLOBAL_OFFSET_TABLE_",
-+				  FALSE, FALSE, FALSE);
-+
-+      /* Don't allocate .got.plt section if there are no GOT nor PLT
-+	 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_.  */
-+      if ((got == NULL
-+	   || !got->ref_regular_nonweak)
-+	  && (htab->elf.sgotplt->size == GOTPLT_HEADER_SIZE)
-+	  && (htab->elf.splt == NULL
-+	      || htab->elf.splt->size == 0)
-+	  && (htab->elf.sgot == NULL
-+	      || (htab->elf.sgot->size
-+		  == get_elf_backend_data (output_bfd)->got_header_size)))
-+	htab->elf.sgotplt->size = 0;
-+    }
-+
-+  /* The check_relocs and adjust_dynamic_symbol entry points have
-+     determined the sizes of the various dynamic sections.  Allocate
-+     memory for them.  */
-+  for (s = dynobj->sections; s != NULL; s = s->next)
-+    {
-+      if ((s->flags & SEC_LINKER_CREATED) == 0)
-+	continue;
-+
-+      if (s == htab->elf.splt
-+	  || s == htab->elf.sgot
-+	  || s == htab->elf.sgotplt
-+	  || s == htab->sdynbss)
-+	{
-+	  /* Strip this section if we don't need it; see the
-+	     comment below.  */
-+	}
-+      else if (strncmp (s->name, ".rela", 5) == 0)
-+	{
-+	  if (s->size != 0)
-+	    {
-+	      /* We use the reloc_count field as a counter if we need
-+		 to copy relocs into the output file.  */
-+	      s->reloc_count = 0;
-+	    }
-+	}
-+      else
-+	{
-+	  /* It's not one of our sections.  */
-+	  continue;
-+	}
-+
-+      if (s->size == 0)
-+	{
-+	  /* If we don't need this section, strip it from the
-+	     output file.  This is mostly to handle .rela.bss and
-+	     .rela.plt.  We must create both sections in
-+	     create_dynamic_sections, because they must be created
-+	     before the linker maps input sections to output
-+	     sections.  The linker does that before
-+	     adjust_dynamic_symbol is called, and it is that
-+	     function which decides whether anything needs to go
-+	     into these sections.  */
-+	  s->flags |= SEC_EXCLUDE;
-+	  continue;
-+	}
-+
-+      if ((s->flags & SEC_HAS_CONTENTS) == 0)
-+	continue;
-+
-+      /* Allocate memory for the section contents.  Zero the memory
-+	 for the benefit of .rela.plt, which has 4 unused entries
-+	 at the beginning, and we don't want garbage.  */
-+      s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
-+      if (s->contents == NULL)
-+	return FALSE;
-+    }
-+
-+  if (elf_hash_table (info)->dynamic_sections_created)
-+    {
-+      /* Add some entries to the .dynamic section.  We fill in the
-+	 values later, in riscv_elf_finish_dynamic_sections, but we
-+	 must add the entries now so that we get the correct size for
-+	 the .dynamic section.  The DT_DEBUG entry is filled in by the
-+	 dynamic linker and used by the debugger.  */
-+#define add_dynamic_entry(TAG, VAL) \
-+  _bfd_elf_add_dynamic_entry (info, TAG, VAL)
-+
-+      if (bfd_link_executable (info))
-+	{
-+	  if (!add_dynamic_entry (DT_DEBUG, 0))
-+	    return FALSE;
-+	}
-+
-+      if (htab->elf.srelplt->size != 0)
-+	{
-+	  if (!add_dynamic_entry (DT_PLTGOT, 0)
-+	      || !add_dynamic_entry (DT_PLTRELSZ, 0)
-+	      || !add_dynamic_entry (DT_PLTREL, DT_RELA)
-+	      || !add_dynamic_entry (DT_JMPREL, 0))
-+	    return FALSE;
-+	}
-+
-+      if (!add_dynamic_entry (DT_RELA, 0)
-+	  || !add_dynamic_entry (DT_RELASZ, 0)
-+	  || !add_dynamic_entry (DT_RELAENT, sizeof (ElfNN_External_Rela)))
-+	return FALSE;
-+
-+      /* If any dynamic relocs apply to a read-only section,
-+	 then we need a DT_TEXTREL entry.  */
-+      if ((info->flags & DF_TEXTREL) == 0)
-+	elf_link_hash_traverse (&htab->elf, readonly_dynrelocs, info);
-+
-+      if (info->flags & DF_TEXTREL)
-+	{
-+	  if (!add_dynamic_entry (DT_TEXTREL, 0))
-+	    return FALSE;
-+	}
-+    }
-+#undef add_dynamic_entry
-+
-+  return TRUE;
-+}
-+
-+#define TP_OFFSET 0
-+#define DTP_OFFSET 0x800
-+
-+/* Return the relocation value for a TLS dtp-relative reloc.  */
-+
-+static bfd_vma
-+dtpoff (struct bfd_link_info *info, bfd_vma address)
-+{
-+  /* If tls_sec is NULL, we should have signalled an error already.  */
-+  if (elf_hash_table (info)->tls_sec == NULL)
-+    return 0;
-+  return address - elf_hash_table (info)->tls_sec->vma - DTP_OFFSET;
-+}
-+
-+/* Return the relocation value for a static TLS tp-relative relocation.  */
-+
-+static bfd_vma
-+tpoff (struct bfd_link_info *info, bfd_vma address)
-+{
-+  /* If tls_sec is NULL, we should have signalled an error already.  */
-+  if (elf_hash_table (info)->tls_sec == NULL)
-+    return 0;
-+  return address - elf_hash_table (info)->tls_sec->vma - TP_OFFSET;
-+}
-+
-+/* Return the global pointer's value, or 0 if it is not in use.  */
-+
-+static bfd_vma
-+riscv_global_pointer_value (struct bfd_link_info *info)
-+{
-+  struct bfd_link_hash_entry *h;
-+
-+  h = bfd_link_hash_lookup (info->hash, "_gp", FALSE, FALSE, TRUE);
-+  if (h == NULL || h->type != bfd_link_hash_defined)
-+    return 0;
-+
-+  return h->u.def.value + sec_addr (h->u.def.section);
-+}
-+
-+/* Emplace a static relocation.  */
-+
-+static bfd_reloc_status_type
-+perform_relocation (const reloc_howto_type *howto,
-+		    const Elf_Internal_Rela *rel,
-+		    bfd_vma value,
-+		    asection *input_section,
-+		    bfd *input_bfd,
-+		    bfd_byte *contents)
-+{
-+  if (howto->pc_relative)
-+    value -= sec_addr (input_section) + rel->r_offset;
-+  value += rel->r_addend;
-+
-+  switch (ELFNN_R_TYPE (rel->r_info))
-+    {
-+    case R_RISCV_HI20:
-+    case R_RISCV_TPREL_HI20:
-+    case R_RISCV_PCREL_HI20:
-+    case R_RISCV_GOT_HI20:
-+    case R_RISCV_TLS_GOT_HI20:
-+    case R_RISCV_TLS_GD_HI20:
-+      if (ARCH_SIZE > 32 && !VALID_UTYPE_IMM (RISCV_CONST_HIGH_PART (value)))
-+	return bfd_reloc_overflow;
-+      value = ENCODE_UTYPE_IMM (RISCV_CONST_HIGH_PART (value));
-+      break;
-+
-+    case R_RISCV_LO12_I:
-+    case R_RISCV_GPREL_I:
-+    case R_RISCV_TPREL_LO12_I:
-+    case R_RISCV_PCREL_LO12_I:
-+      value = ENCODE_ITYPE_IMM (value);
-+      break;
-+
-+    case R_RISCV_LO12_S:
-+    case R_RISCV_GPREL_S:
-+    case R_RISCV_TPREL_LO12_S:
-+    case R_RISCV_PCREL_LO12_S:
-+      value = ENCODE_STYPE_IMM (value);
-+      break;
-+
-+    case R_RISCV_CALL:
-+    case R_RISCV_CALL_PLT:
-+      if (ARCH_SIZE > 32 && !VALID_UTYPE_IMM (RISCV_CONST_HIGH_PART (value)))
-+	return bfd_reloc_overflow;
-+      value = ENCODE_UTYPE_IMM (RISCV_CONST_HIGH_PART (value))
-+	      | (ENCODE_ITYPE_IMM (value) << 32);
-+      break;
-+
-+    case R_RISCV_JAL:
-+      if (!VALID_UJTYPE_IMM (value))
-+	return bfd_reloc_overflow;
-+      value = ENCODE_UJTYPE_IMM (value);
-+      break;
-+
-+    case R_RISCV_BRANCH:
-+      if (!VALID_SBTYPE_IMM (value))
-+	return bfd_reloc_overflow;
-+      value = ENCODE_SBTYPE_IMM (value);
-+      break;
-+
-+    case R_RISCV_RVC_BRANCH:
-+      if (!VALID_RVC_B_IMM (value))
-+	return bfd_reloc_overflow;
-+      value = ENCODE_RVC_B_IMM (value);
-+      break;
-+
-+    case R_RISCV_RVC_JUMP:
-+      if (!VALID_RVC_J_IMM (value))
-+	return bfd_reloc_overflow;
-+      value = ENCODE_RVC_J_IMM (value);
-+      break;
-+
-+    case R_RISCV_RVC_LUI:
-+      if (!VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (value)))
-+	return bfd_reloc_overflow;
-+      value = ENCODE_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (value));
-+      break;
-+
-+    case R_RISCV_32:
-+    case R_RISCV_64:
-+    case R_RISCV_ADD8:
-+    case R_RISCV_ADD16:
-+    case R_RISCV_ADD32:
-+    case R_RISCV_ADD64:
-+    case R_RISCV_SUB8:
-+    case R_RISCV_SUB16:
-+    case R_RISCV_SUB32:
-+    case R_RISCV_SUB64:
-+    case R_RISCV_TLS_DTPREL32:
-+    case R_RISCV_TLS_DTPREL64:
-+      break;
-+
-+    default:
-+      return bfd_reloc_notsupported;
-+    }
-+
-+  bfd_vma word = bfd_get (howto->bitsize, input_bfd, contents + rel->r_offset);
-+  word = (word & ~howto->dst_mask) | (value & howto->dst_mask);
-+  bfd_put (howto->bitsize, input_bfd, word, contents + rel->r_offset);
-+
-+  return bfd_reloc_ok;
-+}
-+
-+/* Remember all PC-relative high-part relocs we've encountered to help us
-+   later resolve the corresponding low-part relocs.  */
-+
-+typedef struct {
-+  bfd_vma address;
-+  bfd_vma value;
-+} riscv_pcrel_hi_reloc;
-+
-+typedef struct riscv_pcrel_lo_reloc {
-+  asection *input_section;
-+  struct bfd_link_info *info;
-+  reloc_howto_type *howto;
-+  const Elf_Internal_Rela *reloc;
-+  bfd_vma addr;
-+  const char *name;
-+  bfd_byte *contents;
-+  struct riscv_pcrel_lo_reloc *next;
-+} riscv_pcrel_lo_reloc;
-+
-+typedef struct {
-+  htab_t hi_relocs;
-+  riscv_pcrel_lo_reloc *lo_relocs;
-+} riscv_pcrel_relocs;
-+
-+static hashval_t
-+riscv_pcrel_reloc_hash (const void *entry)
-+{
-+  const riscv_pcrel_hi_reloc *e = entry;
-+  return (hashval_t)(e->address >> 2);
-+}
-+
-+static bfd_boolean
-+riscv_pcrel_reloc_eq (const void *entry1, const void *entry2)
-+{
-+  const riscv_pcrel_hi_reloc *e1 = entry1, *e2 = entry2;
-+  return e1->address == e2->address;
-+}
-+
-+static bfd_boolean
-+riscv_init_pcrel_relocs (riscv_pcrel_relocs *p)
-+{
-+
-+  p->lo_relocs = NULL;
-+  p->hi_relocs = htab_create (1024, riscv_pcrel_reloc_hash,
-+			      riscv_pcrel_reloc_eq, free);
-+  return p->hi_relocs != NULL;
-+}
-+
-+static void
-+riscv_free_pcrel_relocs (riscv_pcrel_relocs *p)
-+{
-+  riscv_pcrel_lo_reloc *cur = p->lo_relocs;
-+  while (cur != NULL)
-+    {
-+      riscv_pcrel_lo_reloc *next = cur->next;
-+      free (cur);
-+      cur = next;
-+    }
-+
-+  htab_delete (p->hi_relocs);
-+}
-+
-+static bfd_boolean
-+riscv_record_pcrel_hi_reloc (riscv_pcrel_relocs *p, bfd_vma addr, bfd_vma value)
-+{
-+  riscv_pcrel_hi_reloc entry = {addr, value - addr};
-+  riscv_pcrel_hi_reloc **slot =
-+    (riscv_pcrel_hi_reloc **) htab_find_slot (p->hi_relocs, &entry, INSERT);
-+  BFD_ASSERT (*slot == NULL);
-+  *slot = (riscv_pcrel_hi_reloc *) bfd_malloc (sizeof (riscv_pcrel_hi_reloc));
-+  if (*slot == NULL)
-+    return FALSE;
-+  **slot = entry;
-+  return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_record_pcrel_lo_reloc (riscv_pcrel_relocs *p,
-+			     asection *input_section,
-+			     struct bfd_link_info *info,
-+			     reloc_howto_type *howto,
-+			     const Elf_Internal_Rela *reloc,
-+			     bfd_vma addr,
-+			     const char *name,
-+			     bfd_byte *contents)
-+{
-+  riscv_pcrel_lo_reloc *entry;
-+  entry = (riscv_pcrel_lo_reloc *) bfd_malloc (sizeof (riscv_pcrel_lo_reloc));
-+  if (entry == NULL)
-+    return FALSE;
-+  *entry = (riscv_pcrel_lo_reloc) {input_section, info, howto, reloc, addr,
-+				   name, contents, p->lo_relocs};
-+  p->lo_relocs = entry;
-+  return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_resolve_pcrel_lo_relocs (riscv_pcrel_relocs *p)
-+{
-+  riscv_pcrel_lo_reloc *r;
-+  for (r = p->lo_relocs; r != NULL; r = r->next)
-+    {
-+      bfd *input_bfd = r->input_section->owner;
-+      riscv_pcrel_hi_reloc search = {r->addr, 0};
-+      riscv_pcrel_hi_reloc *entry = htab_find (p->hi_relocs, &search);
-+      if (entry == NULL)
-+	return ((*r->info->callbacks->reloc_overflow)
-+		 (r->info, NULL, r->name, r->howto->name, (bfd_vma) 0,
-+		  input_bfd, r->input_section, r->reloc->r_offset));
-+
-+      perform_relocation (r->howto, r->reloc, entry->value, r->input_section,
-+			  input_bfd, r->contents);
-+    }
-+
-+  return TRUE;
-+}
-+
-+/* Relocate a RISC-V ELF section.
-+
-+   The RELOCATE_SECTION function is called by the new ELF backend linker
-+   to handle the relocations for a section.
-+
-+   The relocs are always passed as Rela structures.
-+
-+   This function is responsible for adjusting the section contents as
-+   necessary, and (if generating a relocatable output file) adjusting
-+   the reloc addend as necessary.
-+
-+   This function does not have to worry about setting the reloc
-+   address or the reloc symbol index.
-+
-+   LOCAL_SYMS is a pointer to the swapped in local symbols.
-+
-+   LOCAL_SECTIONS is an array giving the section in the input file
-+   corresponding to the st_shndx field of each local symbol.
-+
-+   The global hash table entry for the global symbols can be found
-+   via elf_sym_hashes (input_bfd).
-+
-+   When generating relocatable output, this function must handle
-+   STB_LOCAL/STT_SECTION symbols specially.  The output symbol is
-+   going to be the section symbol corresponding to the output
-+   section, which means that the addend must be adjusted
-+   accordingly.  */
-+
-+static bfd_boolean
-+riscv_elf_relocate_section (bfd *output_bfd, struct bfd_link_info *info,
-+			    bfd *input_bfd, asection *input_section,
-+			    bfd_byte *contents, Elf_Internal_Rela *relocs,
-+			    Elf_Internal_Sym *local_syms,
-+			    asection **local_sections)
-+{
-+  Elf_Internal_Rela *rel;
-+  Elf_Internal_Rela *relend;
-+  riscv_pcrel_relocs pcrel_relocs;
-+  bfd_boolean ret = FALSE;
-+  asection *sreloc = elf_section_data (input_section)->sreloc;
-+  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+  Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (input_bfd);
-+  struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (input_bfd);
-+  bfd_vma *local_got_offsets = elf_local_got_offsets (input_bfd);
-+
-+  if (!riscv_init_pcrel_relocs (&pcrel_relocs))
-+    return FALSE;
-+
-+  relend = relocs + input_section->reloc_count;
-+  for (rel = relocs; rel < relend; rel++)
-+    {
-+      unsigned long r_symndx;
-+      struct elf_link_hash_entry *h;
-+      Elf_Internal_Sym *sym;
-+      asection *sec;
-+      bfd_vma relocation;
-+      bfd_reloc_status_type r = bfd_reloc_ok;
-+      const char *name;
-+      bfd_vma off, ie_off;
-+      bfd_boolean unresolved_reloc, is_ie = FALSE;
-+      bfd_vma pc = sec_addr (input_section) + rel->r_offset;
-+      int r_type = ELFNN_R_TYPE (rel->r_info), tls_type;
-+      reloc_howto_type *howto = riscv_elf_rtype_to_howto (r_type);
-+      const char *msg = NULL;
-+
-+      if (r_type == R_RISCV_GNU_VTINHERIT || r_type == R_RISCV_GNU_VTENTRY)
-+	continue;
-+
-+      /* This is a final link.  */
-+      r_symndx = ELFNN_R_SYM (rel->r_info);
-+      h = NULL;
-+      sym = NULL;
-+      sec = NULL;
-+      unresolved_reloc = FALSE;
-+      if (r_symndx < symtab_hdr->sh_info)
-+	{
-+	  sym = local_syms + r_symndx;
-+	  sec = local_sections[r_symndx];
-+	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
-+	}
-+      else
-+	{
-+	  bfd_boolean warned, ignored;
-+
-+	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
-+				   r_symndx, symtab_hdr, sym_hashes,
-+				   h, sec, relocation,
-+				   unresolved_reloc, warned, ignored);
-+	  if (warned)
-+	    {
-+	      /* To avoid generating warning messages about truncated
-+		 relocations, set the relocation's address to be the same as
-+		 the start of this section.  */
-+	      if (input_section->output_section != NULL)
-+		relocation = input_section->output_section->vma;
-+	      else
-+		relocation = 0;
-+	    }
-+	}
-+
-+      if (sec != NULL && discarded_section (sec))
-+	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
-+					 rel, 1, relend, howto, 0, contents);
-+
-+      if (bfd_link_relocatable (info))
-+	continue;
-+
-+      if (h != NULL)
-+	name = h->root.root.string;
-+      else
-+	{
-+	  name = (bfd_elf_string_from_elf_section
-+		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
-+	  if (name == NULL || *name == '\0')
-+	    name = bfd_section_name (input_bfd, sec);
-+	}
-+
-+      switch (r_type)
-+	{
-+	case R_RISCV_NONE:
-+	case R_RISCV_TPREL_ADD:
-+	case R_RISCV_COPY:
-+	case R_RISCV_JUMP_SLOT:
-+	case R_RISCV_RELATIVE:
-+	  /* These require nothing of us at all.  */
-+	  continue;
-+
-+	case R_RISCV_HI20:
-+	case R_RISCV_BRANCH:
-+	case R_RISCV_RVC_BRANCH:
-+	case R_RISCV_RVC_LUI:
-+	case R_RISCV_LO12_I:
-+	case R_RISCV_LO12_S:
-+	  /* These require no special handling beyond perform_relocation.  */
-+	  break;
-+
-+	case R_RISCV_GOT_HI20:
-+	  if (h != NULL)
-+	    {
-+	      bfd_boolean dyn, pic;
-+
-+	      off = h->got.offset;
-+	      BFD_ASSERT (off != (bfd_vma) -1);
-+	      dyn = elf_hash_table (info)->dynamic_sections_created;
-+	      pic = bfd_link_pic (info);
-+
-+	      if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, pic, h)
-+		  || (pic && SYMBOL_REFERENCES_LOCAL (info, h)))
-+		{
-+		  /* This is actually a static link, or it is a
-+		     -Bsymbolic link and the symbol is defined
-+		     locally, or the symbol was forced to be local
-+		     because of a version file.  We must initialize
-+		     this entry in the global offset table.  Since the
-+		     offset must always be a multiple of the word size,
-+		     we use the least significant bit to record whether
-+		     we have initialized it already.
-+
-+		     When doing a dynamic link, we create a .rela.got
-+		     relocation entry to initialize the value.  This
-+		     is done in the finish_dynamic_symbol routine.  */
-+		  if ((off & 1) != 0)
-+		    off &= ~1;
-+		  else
-+		    {
-+		      bfd_put_NN (output_bfd, relocation,
-+				  htab->elf.sgot->contents + off);
-+		      h->got.offset |= 1;
-+		    }
-+		}
-+	      else
-+		unresolved_reloc = FALSE;
-+	    }
-+	  else
-+	    {
-+	      BFD_ASSERT (local_got_offsets != NULL
-+			  && local_got_offsets[r_symndx] != (bfd_vma) -1);
-+
-+	      off = local_got_offsets[r_symndx];
-+
-+	      /* The offset must always be a multiple of the word size.
-+		 So, we can use the least significant bit to record
-+		 whether we have already processed this entry.  */
-+	      if ((off & 1) != 0)
-+		off &= ~1;
-+	      else
-+		{
-+		  if (bfd_link_pic (info))
-+		    {
-+		      asection *s;
-+		      Elf_Internal_Rela outrel;
-+
-+		      /* We need to generate a R_RISCV_RELATIVE reloc
-+			 for the dynamic linker.  */
-+		      s = htab->elf.srelgot;
-+		      BFD_ASSERT (s != NULL);
-+
-+		      outrel.r_offset = sec_addr (htab->elf.sgot) + off;
-+		      outrel.r_info =
-+			ELFNN_R_INFO (0, R_RISCV_RELATIVE);
-+		      outrel.r_addend = relocation;
-+		      relocation = 0;
-+		      riscv_elf_append_rela (output_bfd, s, &outrel);
-+		    }
-+
-+		  bfd_put_NN (output_bfd, relocation,
-+			      htab->elf.sgot->contents + off);
-+		  local_got_offsets[r_symndx] |= 1;
-+		}
-+	    }
-+	  relocation = sec_addr (htab->elf.sgot) + off;
-+	  if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc, relocation))
-+	    r = bfd_reloc_overflow;
-+	  break;
-+
-+	case R_RISCV_ADD8:
-+	case R_RISCV_ADD16:
-+	case R_RISCV_ADD32:
-+	case R_RISCV_ADD64:
-+	  {
-+	    bfd_vma old_value = bfd_get (howto->bitsize, input_bfd,
-+					 contents + rel->r_offset);
-+	    relocation = old_value + relocation;
-+	  }
-+	  break;
-+
-+	case R_RISCV_SUB8:
-+	case R_RISCV_SUB16:
-+	case R_RISCV_SUB32:
-+	case R_RISCV_SUB64:
-+	  {
-+	    bfd_vma old_value = bfd_get (howto->bitsize, input_bfd,
-+					 contents + rel->r_offset);
-+	    relocation = old_value - relocation;
-+	  }
-+	  break;
-+
-+	case R_RISCV_CALL_PLT:
-+	case R_RISCV_CALL:
-+	case R_RISCV_JAL:
-+	case R_RISCV_RVC_JUMP:
-+	  if (bfd_link_pic (info) && h != NULL && h->plt.offset != MINUS_ONE)
-+	    {
-+	      /* Refer to the PLT entry.  */
-+	      relocation = sec_addr (htab->elf.splt) + h->plt.offset;
-+	      unresolved_reloc = FALSE;
-+	    }
-+	  break;
-+
-+	case R_RISCV_TPREL_HI20:
-+	  relocation = tpoff (info, relocation);
-+	  break;
-+
-+	case R_RISCV_TPREL_LO12_I:
-+	case R_RISCV_TPREL_LO12_S:
-+	  relocation = tpoff (info, relocation);
-+	  if (VALID_ITYPE_IMM (relocation + rel->r_addend))
-+	    {
-+	      /* We can use tp as the base register.  */
-+	      bfd_vma insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
-+	      insn &= ~(OP_MASK_RS1 << OP_SH_RS1);
-+	      insn |= X_TP << OP_SH_RS1;
-+	      bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
-+	    }
-+	  break;
-+
-+	case R_RISCV_GPREL_I:
-+	case R_RISCV_GPREL_S:
-+	  {
-+	    bfd_vma gp = riscv_global_pointer_value (info);
-+	    bfd_boolean x0_base = VALID_ITYPE_IMM (relocation + rel->r_addend);
-+	    if (x0_base || VALID_ITYPE_IMM (relocation + rel->r_addend - gp))
-+	      {
-+		/* We can use x0 or gp as the base register.  */
-+		bfd_vma insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
-+		insn &= ~(OP_MASK_RS1 << OP_SH_RS1);
-+		if (!x0_base)
-+		  {
-+		    rel->r_addend -= gp;
-+		    insn |= X_GP << OP_SH_RS1;
-+		  }
-+		bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
-+	      }
-+	    else
-+	      r = bfd_reloc_overflow;
-+	    break;
-+	  }
-+
-+	case R_RISCV_PCREL_HI20:
-+	  if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc,
-+					    relocation + rel->r_addend))
-+	    r = bfd_reloc_overflow;
-+	  break;
-+
-+	case R_RISCV_PCREL_LO12_I:
-+	case R_RISCV_PCREL_LO12_S:
-+	  if (riscv_record_pcrel_lo_reloc (&pcrel_relocs, input_section, info,
-+					   howto, rel, relocation, name,
-+					   contents))
-+	    continue;
-+	  r = bfd_reloc_overflow;
-+	  break;
-+
-+	case R_RISCV_TLS_DTPREL32:
-+	case R_RISCV_TLS_DTPREL64:
-+	  relocation = dtpoff (info, relocation);
-+	  break;
-+
-+	case R_RISCV_32:
-+	case R_RISCV_64:
-+	  if ((input_section->flags & SEC_ALLOC) == 0)
-+	    break;
-+
-+	  if ((bfd_link_pic (info)
-+	       && (h == NULL
-+		   || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
-+		   || h->root.type != bfd_link_hash_undefweak)
-+	       && (! howto->pc_relative
-+		   || !SYMBOL_CALLS_LOCAL (info, h)))
-+	      || (!bfd_link_pic (info)
-+		  && h != NULL
-+		  && h->dynindx != -1
-+		  && !h->non_got_ref
-+		  && ((h->def_dynamic
-+		       && !h->def_regular)
-+		      || h->root.type == bfd_link_hash_undefweak
-+		      || h->root.type == bfd_link_hash_undefined)))
-+	    {
-+	      Elf_Internal_Rela outrel;
-+	      bfd_boolean skip_static_relocation, skip_dynamic_relocation;
-+
-+	      /* When generating a shared object, these relocations
-+		 are copied into the output file to be resolved at run
-+		 time.  */
-+
-+	      outrel.r_offset =
-+		_bfd_elf_section_offset (output_bfd, info, input_section,
-+					 rel->r_offset);
-+	      skip_static_relocation = outrel.r_offset != (bfd_vma) -2;
-+	      skip_dynamic_relocation = outrel.r_offset >= (bfd_vma) -2;
-+	      outrel.r_offset += sec_addr (input_section);
-+
-+	      if (skip_dynamic_relocation)
-+		memset (&outrel, 0, sizeof outrel);
-+	      else if (h != NULL && h->dynindx != -1
-+		       && !(bfd_link_pic (info)
-+			    && SYMBOLIC_BIND (info, h)
-+			    && h->def_regular))
-+		{
-+		  outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
-+		  outrel.r_addend = rel->r_addend;
-+		}
-+	      else
-+		{
-+		  outrel.r_info = ELFNN_R_INFO (0, R_RISCV_RELATIVE);
-+		  outrel.r_addend = relocation + rel->r_addend;
-+		}
-+
-+	      riscv_elf_append_rela (output_bfd, sreloc, &outrel);
-+	      if (skip_static_relocation)
-+		continue;
-+	    }
-+	  break;
-+
-+	case R_RISCV_TLS_GOT_HI20:
-+	  is_ie = TRUE;
-+	  /* Fall through.  */
-+
-+	case R_RISCV_TLS_GD_HI20:
-+	  if (h != NULL)
-+	    {
-+	      off = h->got.offset;
-+	      h->got.offset |= 1;
-+	    }
-+	  else
-+	    {
-+	      off = local_got_offsets[r_symndx];
-+	      local_got_offsets[r_symndx] |= 1;
-+	    }
-+
-+	  tls_type = _bfd_riscv_elf_tls_type (input_bfd, h, r_symndx);
-+	  BFD_ASSERT (tls_type & (GOT_TLS_IE | GOT_TLS_GD));
-+	  /* If this symbol is referenced by both GD and IE TLS, the IE
-+	     reference's GOT slot follows the GD reference's slots.  */
-+	  ie_off = 0;
-+	  if ((tls_type & GOT_TLS_GD) && (tls_type & GOT_TLS_IE))
-+	    ie_off = 2 * GOT_ENTRY_SIZE;
-+
-+	  if ((off & 1) != 0)
-+	    off &= ~1;
-+	  else
-+	    {
-+	      Elf_Internal_Rela outrel;
-+	      int indx = 0;
-+	      bfd_boolean need_relocs = FALSE;
-+
-+	      if (htab->elf.srelgot == NULL)
-+		abort ();
-+
-+	      if (h != NULL)
-+		{
-+		  bfd_boolean dyn, pic;
-+		  dyn = htab->elf.dynamic_sections_created;
-+		  pic = bfd_link_pic (info);
-+
-+		  if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, pic, h)
-+		      && (!pic || !SYMBOL_REFERENCES_LOCAL (info, h)))
-+		    indx = h->dynindx;
-+		}
-+
-+	      /* The GOT entries have not been initialized yet.  Do it
-+	         now, and emit any relocations.  */
-+	      if ((bfd_link_pic (info) || indx != 0)
-+		  && (h == NULL
-+		      || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
-+		      || h->root.type != bfd_link_hash_undefweak))
-+		    need_relocs = TRUE;
-+
-+	      if (tls_type & GOT_TLS_GD)
-+		{
-+		  if (need_relocs)
-+		    {
-+		      outrel.r_offset = sec_addr (htab->elf.sgot) + off;
-+		      outrel.r_addend = 0;
-+		      outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_DTPMODNN);
-+		      bfd_put_NN (output_bfd, 0,
-+				  htab->elf.sgot->contents + off);
-+		      riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
-+		      if (indx == 0)
-+			{
-+			  BFD_ASSERT (! unresolved_reloc);
-+			  bfd_put_NN (output_bfd,
-+				      dtpoff (info, relocation),
-+				      (htab->elf.sgot->contents + off +
-+				       RISCV_ELF_WORD_BYTES));
-+			}
-+		      else
-+			{
-+			  bfd_put_NN (output_bfd, 0,
-+				      (htab->elf.sgot->contents + off +
-+				       RISCV_ELF_WORD_BYTES));
-+			  outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_DTPRELNN);
-+			  outrel.r_offset += RISCV_ELF_WORD_BYTES;
-+			  riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
-+			}
-+		    }
-+		  else
-+		    {
-+		      /* If we are not emitting relocations for a
-+			 general dynamic reference, then we must be in a
-+			 static link or an executable link with the
-+			 symbol binding locally.  Mark it as belonging
-+			 to module 1, the executable.  */
-+		      bfd_put_NN (output_bfd, 1,
-+				  htab->elf.sgot->contents + off);
-+		      bfd_put_NN (output_bfd,
-+				  dtpoff (info, relocation),
-+				  (htab->elf.sgot->contents + off +
-+				   RISCV_ELF_WORD_BYTES));
-+		   }
-+		}
-+
-+	      if (tls_type & GOT_TLS_IE)
-+		{
-+		  if (need_relocs)
-+		    {
-+		      bfd_put_NN (output_bfd, 0,
-+				  htab->elf.sgot->contents + off + ie_off);
-+		      outrel.r_offset = sec_addr (htab->elf.sgot)
-+				       + off + ie_off;
-+		      outrel.r_addend = 0;
-+		      if (indx == 0)
-+			outrel.r_addend = tpoff (info, relocation);
-+		      outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_TPRELNN);
-+		      riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
-+		    }
-+		  else
-+		    {
-+		      bfd_put_NN (output_bfd, tpoff (info, relocation),
-+				  htab->elf.sgot->contents + off + ie_off);
-+		    }
-+		}
-+	    }
-+
-+	  BFD_ASSERT (off < (bfd_vma) -2);
-+	  relocation = sec_addr (htab->elf.sgot) + off + (is_ie ? ie_off : 0);
-+	  if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc, relocation))
-+	    r = bfd_reloc_overflow;
-+	  unresolved_reloc = FALSE;
-+	  break;
-+
-+	default:
-+	  r = bfd_reloc_notsupported;
-+	}
-+
-+      /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
-+	 because such sections are not SEC_ALLOC and thus ld.so will
-+	 not process them.  */
-+      if (unresolved_reloc
-+	  && !((input_section->flags & SEC_DEBUGGING) != 0
-+	       && h->def_dynamic)
-+	  && _bfd_elf_section_offset (output_bfd, info, input_section,
-+				      rel->r_offset) != (bfd_vma) -1)
-+	{
-+	  (*_bfd_error_handler)
-+	    (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
-+	     input_bfd,
-+	     input_section,
-+	     (long) rel->r_offset,
-+	     howto->name,
-+	     h->root.root.string);
-+	  continue;
-+	}
-+
-+      if (r == bfd_reloc_ok)
-+	r = perform_relocation (howto, rel, relocation, input_section,
-+				input_bfd, contents);
-+
-+      switch (r)
-+	{
-+	case bfd_reloc_ok:
-+	  continue;
-+
-+	case bfd_reloc_overflow:
-+	  r = info->callbacks->reloc_overflow
-+	    (info, (h ? &h->root : NULL), name, howto->name,
-+	     (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
-+	  break;
-+
-+	case bfd_reloc_undefined:
-+	  r = info->callbacks->undefined_symbol
-+	    (info, name, input_bfd, input_section, rel->r_offset,
-+	     TRUE);
-+	  break;
-+
-+	case bfd_reloc_outofrange:
-+	  msg = _("internal error: out of range error");
-+	  break;
-+
-+	case bfd_reloc_notsupported:
-+	  msg = _("internal error: unsupported relocation error");
-+	  break;
-+
-+	case bfd_reloc_dangerous:
-+	  msg = _("internal error: dangerous relocation");
-+	  break;
-+
-+	default:
-+	  msg = _("internal error: unknown error");
-+	  break;
-+	}
-+
-+      if (msg)
-+	r = info->callbacks->warning
-+	  (info, msg, name, input_bfd, input_section, rel->r_offset);
-+      goto out;
-+    }
-+
-+  ret = riscv_resolve_pcrel_lo_relocs (&pcrel_relocs);
-+out:
-+  riscv_free_pcrel_relocs (&pcrel_relocs);
-+  return ret;
-+}
-+
-+/* Finish up dynamic symbol handling.  We set the contents of various
-+   dynamic sections here.  */
-+
-+static bfd_boolean
-+riscv_elf_finish_dynamic_symbol (bfd *output_bfd,
-+				 struct bfd_link_info *info,
-+				 struct elf_link_hash_entry *h,
-+				 Elf_Internal_Sym *sym)
-+{
-+  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+  const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
-+
-+  if (h->plt.offset != (bfd_vma) -1)
-+    {
-+      /* We've decided to create a PLT entry for this symbol.  */
-+      bfd_byte *loc;
-+      bfd_vma i, header_address, plt_idx, got_address;
-+      uint32_t plt_entry[PLT_ENTRY_INSNS];
-+      Elf_Internal_Rela rela;
-+
-+      BFD_ASSERT (h->dynindx != -1);
-+
-+      /* Calculate the address of the PLT header.  */
-+      header_address = sec_addr (htab->elf.splt);
-+
-+      /* Calculate the index of the entry.  */
-+      plt_idx = (h->plt.offset - PLT_HEADER_SIZE) / PLT_ENTRY_SIZE;
-+
-+      /* Calculate the address of the .got.plt entry.  */
-+      got_address = riscv_elf_got_plt_val (plt_idx, info);
-+
-+      /* Find out where the .plt entry should go.  */
-+      loc = htab->elf.splt->contents + h->plt.offset;
-+
-+      /* Fill in the PLT entry itself.  */
-+      riscv_make_plt_entry (got_address, header_address + h->plt.offset,
-+			    plt_entry);
-+      for (i = 0; i < PLT_ENTRY_INSNS; i++)
-+	bfd_put_32 (output_bfd, plt_entry[i], loc + 4*i);
-+
-+      /* Fill in the initial value of the .got.plt entry.  */
-+      loc = htab->elf.sgotplt->contents
-+	    + (got_address - sec_addr (htab->elf.sgotplt));
-+      bfd_put_NN (output_bfd, sec_addr (htab->elf.splt), loc);
-+
-+      /* Fill in the entry in the .rela.plt section.  */
-+      rela.r_offset = got_address;
-+      rela.r_addend = 0;
-+      rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_JUMP_SLOT);
-+
-+      loc = htab->elf.srelplt->contents + plt_idx * sizeof (ElfNN_External_Rela);
-+      bed->s->swap_reloca_out (output_bfd, &rela, loc);
-+
-+      if (!h->def_regular)
-+	{
-+	  /* Mark the symbol as undefined, rather than as defined in
-+	     the .plt section.  Leave the value alone.  */
-+	  sym->st_shndx = SHN_UNDEF;
-+	  /* If the symbol is weak, we do need to clear the value.
-+	     Otherwise, the PLT entry would provide a definition for
-+	     the symbol even if the symbol wasn't defined anywhere,
-+	     and so the symbol would never be NULL.  */
-+	  if (!h->ref_regular_nonweak)
-+	    sym->st_value = 0;
-+	}
-+    }
-+
-+  if (h->got.offset != (bfd_vma) -1
-+      && !(riscv_elf_hash_entry(h)->tls_type & (GOT_TLS_GD | GOT_TLS_IE)))
-+    {
-+      asection *sgot;
-+      asection *srela;
-+      Elf_Internal_Rela rela;
-+
-+      /* This symbol has an entry in the GOT.  Set it up.  */
-+
-+      sgot = htab->elf.sgot;
-+      srela = htab->elf.srelgot;
-+      BFD_ASSERT (sgot != NULL && srela != NULL);
-+
-+      rela.r_offset = sec_addr (sgot) + (h->got.offset &~ (bfd_vma) 1);
-+
-+      /* If this is a -Bsymbolic link, and the symbol is defined
-+	 locally, we just want to emit a RELATIVE reloc.  Likewise if
-+	 the symbol was forced to be local because of a version file.
-+	 The entry in the global offset table will already have been
-+	 initialized in the relocate_section function.  */
-+      if (bfd_link_pic (info)
-+	  && (info->symbolic || h->dynindx == -1)
-+	  && h->def_regular)
-+	{
-+	  asection *sec = h->root.u.def.section;
-+	  rela.r_info = ELFNN_R_INFO (0, R_RISCV_RELATIVE);
-+	  rela.r_addend = (h->root.u.def.value
-+			   + sec->output_section->vma
-+			   + sec->output_offset);
-+	}
-+      else
-+	{
-+	  BFD_ASSERT (h->dynindx != -1);
-+	  rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_NN);
-+	  rela.r_addend = 0;
-+	}
-+
-+      bfd_put_NN (output_bfd, 0,
-+		  sgot->contents + (h->got.offset & ~(bfd_vma) 1));
-+      riscv_elf_append_rela (output_bfd, srela, &rela);
-+    }
-+
-+  if (h->needs_copy)
-+    {
-+      Elf_Internal_Rela rela;
-+
-+      /* This symbols needs a copy reloc.  Set it up.  */
-+      BFD_ASSERT (h->dynindx != -1);
-+
-+      rela.r_offset = sec_addr (h->root.u.def.section) + h->root.u.def.value;
-+      rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_COPY);
-+      rela.r_addend = 0;
-+      riscv_elf_append_rela (output_bfd, htab->srelbss, &rela);
-+    }
-+
-+  /* Mark some specially defined symbols as absolute.  */
-+  if (h == htab->elf.hdynamic
-+      || (h == htab->elf.hgot || h == htab->elf.hplt))
-+    sym->st_shndx = SHN_ABS;
-+
-+  return TRUE;
-+}
-+
-+/* Finish up the dynamic sections.  */
-+
-+static bfd_boolean
-+riscv_finish_dyn (bfd *output_bfd, struct bfd_link_info *info,
-+		  bfd *dynobj, asection *sdyn)
-+{
-+  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+  const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
-+  size_t dynsize = bed->s->sizeof_dyn;
-+  bfd_byte *dyncon, *dynconend;
-+
-+  dynconend = sdyn->contents + sdyn->size;
-+  for (dyncon = sdyn->contents; dyncon < dynconend; dyncon += dynsize)
-+    {
-+      Elf_Internal_Dyn dyn;
-+      asection *s;
-+
-+      bed->s->swap_dyn_in (dynobj, dyncon, &dyn);
-+
-+      switch (dyn.d_tag)
-+	{
-+	case DT_PLTGOT:
-+	  s = htab->elf.sgotplt;
-+	  dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
-+	  break;
-+	case DT_JMPREL:
-+	  s = htab->elf.srelplt;
-+	  dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
-+	  break;
-+	case DT_PLTRELSZ:
-+	  s = htab->elf.srelplt;
-+	  dyn.d_un.d_val = s->size;
-+	  break;
-+	default:
-+	  continue;
-+	}
-+
-+      bed->s->swap_dyn_out (output_bfd, &dyn, dyncon);
-+    }
-+  return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_elf_finish_dynamic_sections (bfd *output_bfd,
-+				   struct bfd_link_info *info)
-+{
-+  bfd *dynobj;
-+  asection *sdyn;
-+  struct riscv_elf_link_hash_table *htab;
-+
-+  htab = riscv_elf_hash_table (info);
-+  BFD_ASSERT (htab != NULL);
-+  dynobj = htab->elf.dynobj;
-+
-+  sdyn = bfd_get_linker_section (dynobj, ".dynamic");
-+
-+  if (elf_hash_table (info)->dynamic_sections_created)
-+    {
-+      asection *splt;
-+      bfd_boolean ret;
-+
-+      splt = htab->elf.splt;
-+      BFD_ASSERT (splt != NULL && sdyn != NULL);
-+
-+      ret = riscv_finish_dyn (output_bfd, info, dynobj, sdyn);
-+
-+      if (ret != TRUE)
-+	return ret;
-+
-+      /* Fill in the head and tail entries in the procedure linkage table.  */
-+      if (splt->size > 0)
-+	{
-+	  int i;
-+	  uint32_t plt_header[PLT_HEADER_INSNS];
-+	  riscv_make_plt_header (sec_addr (htab->elf.sgotplt),
-+				 sec_addr (splt), plt_header);
-+
-+	  for (i = 0; i < PLT_HEADER_INSNS; i++)
-+	    bfd_put_32 (output_bfd, plt_header[i], splt->contents + 4*i);
-+	}
-+
-+      elf_section_data (splt->output_section)->this_hdr.sh_entsize
-+	= PLT_ENTRY_SIZE;
-+    }
-+
-+  if (htab->elf.sgotplt)
-+    {
-+      asection *output_section = htab->elf.sgotplt->output_section;
-+
-+      if (bfd_is_abs_section (output_section))
-+	{
-+	  (*_bfd_error_handler)
-+	    (_("discarded output section: `%A'"), htab->elf.sgotplt);
-+	  return FALSE;
-+	}
-+
-+      if (htab->elf.sgotplt->size > 0)
-+	{
-+	  /* Write the first two entries in .got.plt, needed for the dynamic
-+	     linker.  */
-+	  bfd_put_NN (output_bfd, (bfd_vma) -1, htab->elf.sgotplt->contents);
-+	  bfd_put_NN (output_bfd, (bfd_vma) 0,
-+		      htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
-+	}
-+
-+      elf_section_data (output_section)->this_hdr.sh_entsize = GOT_ENTRY_SIZE;
-+    }
-+
-+  if (htab->elf.sgot)
-+    {
-+      asection *output_section = htab->elf.sgot->output_section;
-+
-+      if (htab->elf.sgot->size > 0)
-+	{
-+	  /* Set the first entry in the global offset table to the address of
-+	     the dynamic section.  */
-+	  bfd_vma val = sdyn ? sec_addr (sdyn) : 0;
-+	  bfd_put_NN (output_bfd, val, htab->elf.sgot->contents);
-+	}
-+
-+      elf_section_data (output_section)->this_hdr.sh_entsize = GOT_ENTRY_SIZE;
-+    }
-+
-+  return TRUE;
-+}
-+
-+/* Return address for Ith PLT stub in section PLT, for relocation REL
-+   or (bfd_vma) -1 if it should not be included.  */
-+
-+static bfd_vma
-+riscv_elf_plt_sym_val (bfd_vma i, const asection *plt,
-+		       const arelent *rel ATTRIBUTE_UNUSED)
-+{
-+  return plt->vma + PLT_HEADER_SIZE + i * PLT_ENTRY_SIZE;
-+}
-+
-+static enum elf_reloc_type_class
-+riscv_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
-+			const asection *rel_sec ATTRIBUTE_UNUSED,
-+			const Elf_Internal_Rela *rela)
-+{
-+  switch (ELFNN_R_TYPE (rela->r_info))
-+    {
-+    case R_RISCV_RELATIVE:
-+      return reloc_class_relative;
-+    case R_RISCV_JUMP_SLOT:
-+      return reloc_class_plt;
-+    case R_RISCV_COPY:
-+      return reloc_class_copy;
-+    default:
-+      return reloc_class_normal;
-+    }
-+}
-+
-+/* Merge backend specific data from an object file to the output
-+   object file when linking.  */
-+
-+static bfd_boolean
-+_bfd_riscv_elf_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
-+{
-+  flagword new_flags = elf_elfheader (ibfd)->e_flags;
-+  flagword old_flags = elf_elfheader (obfd)->e_flags;
-+
-+  if (!is_riscv_elf (ibfd) || !is_riscv_elf (obfd))
-+    return TRUE;
-+
-+  if (strcmp (bfd_get_target (ibfd), bfd_get_target (obfd)) != 0)
-+    {
-+      (*_bfd_error_handler)
-+	(_("%B: ABI is incompatible with that of the selected emulation"),
-+	 ibfd);
-+      return FALSE;
-+    }
-+
-+  if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
-+    return FALSE;
-+
-+  if (! elf_flags_init (obfd))
-+    {
-+      elf_flags_init (obfd) = TRUE;
-+      elf_elfheader (obfd)->e_flags = new_flags;
-+      return TRUE;
-+    }
-+
-+  /* Disallow linking soft-float and hard-float.  */
-+  if ((old_flags ^ new_flags) & EF_RISCV_SOFT_FLOAT)
-+    {
-+      (*_bfd_error_handler)
-+	(_("%B: can't link hard-float modules with soft-float modules"), ibfd);
-+      goto fail;
-+    }
-+
-+  /* Allow linking RVC and non-RVC, and keep the RVC flag.  */
-+  elf_elfheader (obfd)->e_flags |= new_flags & EF_RISCV_RVC;
-+
-+  return TRUE;
-+
-+fail:
-+  bfd_set_error (bfd_error_bad_value);
-+  return FALSE;
-+}
-+
-+/* Delete some bytes from a section while relaxing.  */
-+
-+static bfd_boolean
-+riscv_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, size_t count)
-+{
-+  unsigned int i, symcount;
-+  bfd_vma toaddr = sec->size;
-+  struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (abfd);
-+  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
-+  unsigned int sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
-+  struct bfd_elf_section_data *data = elf_section_data (sec);
-+  bfd_byte *contents = data->this_hdr.contents;
-+
-+  /* Actually delete the bytes.  */
-+  sec->size -= count;
-+  memmove (contents + addr, contents + addr + count, toaddr - addr - count);
-+
-+  /* Adjust the location of all of the relocs.  Note that we need not
-+     adjust the addends, since all PC-relative references must be against
-+     symbols, which we will adjust below.  */
-+  for (i = 0; i < sec->reloc_count; i++)
-+    if (data->relocs[i].r_offset > addr && data->relocs[i].r_offset < toaddr)
-+      data->relocs[i].r_offset -= count;
-+
-+  /* Adjust the local symbols defined in this section.  */
-+  for (i = 0; i < symtab_hdr->sh_info; i++)
-+    {
-+      Elf_Internal_Sym *sym = (Elf_Internal_Sym *) symtab_hdr->contents + i;
-+      if (sym->st_shndx == sec_shndx)
-+	{
-+	  /* If the symbol is in the range of memory we just moved, we
-+	     have to adjust its value.  */
-+	  if (sym->st_value > addr && sym->st_value <= toaddr)
-+	    sym->st_value -= count;
-+
-+	  /* If the symbol *spans* the bytes we just deleted (i.e. its
-+	     *end* is in the moved bytes but its *start* isn't), then we
-+	     must adjust its size.  */
-+	  if (sym->st_value <= addr
-+	      && sym->st_value + sym->st_size > addr
-+	      && sym->st_value + sym->st_size <= toaddr)
-+	    sym->st_size -= count;
-+	}
-+    }
-+
-+  /* Now adjust the global symbols defined in this section.  */
-+  symcount = ((symtab_hdr->sh_size / sizeof (ElfNN_External_Sym))
-+	      - symtab_hdr->sh_info);
-+
-+  for (i = 0; i < symcount; i++)
-+    {
-+      struct elf_link_hash_entry *sym_hash = sym_hashes[i];
-+
-+      if ((sym_hash->root.type == bfd_link_hash_defined
-+	   || sym_hash->root.type == bfd_link_hash_defweak)
-+	  && sym_hash->root.u.def.section == sec)
-+	{
-+	  /* As above, adjust the value if needed.  */
-+	  if (sym_hash->root.u.def.value > addr
-+	      && sym_hash->root.u.def.value <= toaddr)
-+	    sym_hash->root.u.def.value -= count;
-+
-+	  /* As above, adjust the size if needed.  */
-+	  if (sym_hash->root.u.def.value <= addr
-+	      && sym_hash->root.u.def.value + sym_hash->size > addr
-+	      && sym_hash->root.u.def.value + sym_hash->size <= toaddr)
-+	    sym_hash->size -= count;
-+	}
-+    }
-+
-+  return TRUE;
-+}
-+
-+/* Relax AUIPC + JALR into JAL.  */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_call (bfd *abfd, asection *sec, asection *sym_sec,
-+		       struct bfd_link_info *link_info,
-+		       Elf_Internal_Rela *rel,
-+		       bfd_vma symval,
-+		       bfd_boolean *again)
-+{
-+  bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
-+  bfd_signed_vma foff = symval - (sec_addr (sec) + rel->r_offset);
-+  bfd_boolean near_zero = (symval + RISCV_IMM_REACH/2) < RISCV_IMM_REACH;
-+  bfd_vma auipc, jalr;
-+  int rd, r_type, len = 4, rvc = elf_elfheader (abfd)->e_flags & EF_RISCV_RVC;
-+
-+  /* If the call crosses section boundaries, an alignment directive could
-+     cause the PC-relative offset to later increase.  Assume at most
-+     page-alignment, and account for this by adding some slop.  */
-+  if (VALID_UJTYPE_IMM (foff) && sym_sec->output_section != sec->output_section)
-+    foff += (foff < 0 ? -ELF_MAXPAGESIZE : ELF_MAXPAGESIZE);
-+
-+  /* See if this function call can be shortened.  */
-+  if (!VALID_UJTYPE_IMM (foff) && !(!bfd_link_pic (link_info) && near_zero))
-+    return TRUE;
-+
-+  /* Shorten the function call.  */
-+  BFD_ASSERT (rel->r_offset + 8 <= sec->size);
-+
-+  auipc = bfd_get_32 (abfd, contents + rel->r_offset);
-+  jalr = bfd_get_32 (abfd, contents + rel->r_offset + 4);
-+  rd = (jalr >> OP_SH_RD) & OP_MASK_RD;
-+  rvc = rvc && VALID_RVC_J_IMM (foff) && ARCH_SIZE == 32;
-+
-+  if (rvc && (rd == 0 || rd == X_RA))
-+    {
-+      /* Relax to C.J[AL] rd, addr.  */
-+      r_type = R_RISCV_RVC_JUMP;
-+      auipc = rd == 0 ? MATCH_C_J : MATCH_C_JAL;
-+      len = 2;
-+    }
-+  else if (VALID_UJTYPE_IMM (foff))
-+    {
-+      /* Relax to JAL rd, addr.  */
-+      r_type = R_RISCV_JAL;
-+      auipc = MATCH_JAL | (rd << OP_SH_RD);
-+    }
-+  else /* near_zero */
-+    {
-+      /* Relax to JALR rd, x0, addr.  */
-+      r_type = R_RISCV_LO12_I;
-+      auipc = MATCH_JALR | (rd << OP_SH_RD);
-+    }
-+
-+  /* Replace the R_RISCV_CALL reloc.  */
-+  rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), r_type);
-+  /* Replace the AUIPC.  */
-+  bfd_put (8 * len, abfd, auipc, contents + rel->r_offset);
-+
-+  /* Delete unnecessary JALR.  */
-+  *again = TRUE;
-+  return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + len, 8 - len);
-+}
-+
-+/* Relax non-PIC global variable references.  */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_lui (bfd *abfd, asection *sec, asection *sym_sec,
-+		      struct bfd_link_info *link_info,
-+		      Elf_Internal_Rela *rel,
-+		      bfd_vma symval,
-+		      bfd_boolean *again)
-+{
-+  bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
-+  bfd_vma gp = riscv_global_pointer_value (link_info);
-+  int use_rvc = elf_elfheader (abfd)->e_flags & EF_RISCV_RVC;
-+
-+  /* Mergeable symbols might later move out of range.  */
-+  if (sym_sec->flags & SEC_MERGE)
-+    return TRUE;
-+
-+  BFD_ASSERT (rel->r_offset + 4 <= sec->size);
-+
-+  /* Is the reference in range of x0 or gp?  */
-+  if (VALID_ITYPE_IMM (symval) || VALID_ITYPE_IMM (symval - gp))
-+    {
-+      unsigned sym = ELFNN_R_SYM (rel->r_info);
-+      switch (ELFNN_R_TYPE (rel->r_info))
-+	{
-+	case R_RISCV_LO12_I:
-+	  rel->r_info = ELFNN_R_INFO (sym, R_RISCV_GPREL_I);
-+	  return TRUE;
-+
-+	case R_RISCV_LO12_S:
-+	  rel->r_info = ELFNN_R_INFO (sym, R_RISCV_GPREL_S);
-+	  return TRUE;
-+
-+	case R_RISCV_HI20:
-+	  /* We can delete the unnecessary LUI and reloc.  */
-+	  rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
-+	  *again = TRUE;
-+	  return riscv_relax_delete_bytes (abfd, sec, rel->r_offset, 4);
-+
-+	default:
-+	  abort ();
-+	}
-+    }
-+
-+  /* Can we relax LUI to C.LUI?  Alignment might move the section forward;
-+     account for this assuming page alignment at worst.  */
-+  if (use_rvc
-+      && ELFNN_R_TYPE (rel->r_info) == R_RISCV_HI20
-+      && VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (symval))
-+      && VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (symval + ELF_MAXPAGESIZE)))
-+    {
-+      /* Replace LUI with C.LUI if legal (i.e., rd != x2/sp).  */
-+      bfd_vma lui = bfd_get_32 (abfd, contents + rel->r_offset);
-+      if (((lui >> OP_SH_RD) & OP_MASK_RD) == X_SP)
-+	return TRUE;
-+
-+      lui = (lui & (OP_MASK_RD << OP_SH_RD)) | MATCH_C_LUI;
-+      bfd_put_32 (abfd, lui, contents + rel->r_offset);
-+
-+      /* Replace the R_RISCV_HI20 reloc.  */
-+      rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), R_RISCV_RVC_LUI);
-+
-+      *again = TRUE;
-+      return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + 2, 2);
-+    }
-+
-+  return TRUE;
-+}
-+
-+/* Relax non-PIC TLS references.  */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_tls_le (bfd *abfd, asection *sec,
-+			 asection *sym_sec ATTRIBUTE_UNUSED,
-+			 struct bfd_link_info *link_info,
-+			 Elf_Internal_Rela *rel,
-+			 bfd_vma symval,
-+			 bfd_boolean *again)
-+{
-+  /* See if this symbol is in range of tp.  */
-+  if (RISCV_CONST_HIGH_PART (tpoff (link_info, symval)) != 0)
-+    return TRUE;
-+
-+  /* We can delete the unnecessary LUI and tp add.  The LO12 reloc will be
-+     made directly tp-relative.  */
-+  BFD_ASSERT (rel->r_offset + 4 <= sec->size);
-+  rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
-+
-+  *again = TRUE;
-+  return riscv_relax_delete_bytes (abfd, sec, rel->r_offset, 4);
-+}
-+
-+/* Implement R_RISCV_ALIGN by deleting excess alignment NOPs.  */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_align (bfd *abfd, asection *sec,
-+			asection *sym_sec ATTRIBUTE_UNUSED,
-+			struct bfd_link_info *link_info ATTRIBUTE_UNUSED,
-+			Elf_Internal_Rela *rel,
-+			bfd_vma symval,
-+			bfd_boolean *again ATTRIBUTE_UNUSED)
-+{
-+  bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
-+  bfd_vma alignment = 1, pos;
-+  while (alignment <= rel->r_addend)
-+    alignment *= 2;
-+
-+  symval -= rel->r_addend;
-+  bfd_vma aligned_addr = ((symval - 1) & ~(alignment - 1)) + alignment;
-+  bfd_vma nop_bytes = aligned_addr - symval;
-+
-+  /* Once we've handled an R_RISCV_ALIGN, we can't relax anything else.  */
-+  sec->sec_flg0 = TRUE;
-+
-+  /* Make sure there are enough NOPs to actually achieve the alignment.  */
-+  if (rel->r_addend < nop_bytes)
-+    return FALSE;
-+
-+  /* Delete the reloc.  */
-+  rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
-+
-+  /* If the number of NOPs is already correct, there's nothing to do.  */
-+  if (nop_bytes == rel->r_addend)
-+    return TRUE;
-+
-+  /* Write as many RISC-V NOPs as we need.  */
-+  for (pos = 0; pos < (nop_bytes & -4); pos += 4)
-+    bfd_put_32 (abfd, RISCV_NOP, contents + rel->r_offset + pos);
-+
-+  /* Write a final RVC NOP if need be.  */
-+  if (nop_bytes % 4 != 0)
-+    bfd_put_16 (abfd, RVC_NOP, contents + rel->r_offset + pos);
-+
-+  /* Delete the excess bytes.  */
-+  return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + nop_bytes,
-+				   rel->r_addend - nop_bytes);
-+}
-+
-+/* Relax a section.  Pass 0 shortens code sequences unless disabled.
-+   Pass 1, which cannot be disabled, handles code alignment directives.  */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_section (bfd *abfd, asection *sec,
-+			  struct bfd_link_info *info, bfd_boolean *again)
-+{
-+  Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (abfd);
-+  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+  struct bfd_elf_section_data *data = elf_section_data (sec);
-+  Elf_Internal_Rela *relocs;
-+  bfd_boolean ret = FALSE;
-+  unsigned int i;
-+
-+  *again = FALSE;
-+
-+  if (bfd_link_relocatable (info)
-+      || sec->sec_flg0
-+      || (sec->flags & SEC_RELOC) == 0
-+      || sec->reloc_count == 0
-+      || (info->disable_target_specific_optimizations
-+	  && info->relax_pass == 0))
-+    return TRUE;
-+
-+  /* Read this BFD's relocs if we haven't done so already.  */
-+  if (data->relocs)
-+    relocs = data->relocs;
-+  else if (!(relocs = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL,
-+						 info->keep_memory)))
-+    goto fail;
-+
-+  /* Examine and consider relaxing each reloc.  */
-+  for (i = 0; i < sec->reloc_count; i++)
-+    {
-+      asection *sym_sec;
-+      Elf_Internal_Rela *rel = relocs + i;
-+      typeof (&_bfd_riscv_relax_call) relax_func = NULL;
-+      int type = ELFNN_R_TYPE (rel->r_info);
-+      bfd_vma symval;
-+
-+      if (info->relax_pass == 0)
-+	{
-+	  if (type == R_RISCV_CALL || type == R_RISCV_CALL_PLT)
-+	    relax_func = _bfd_riscv_relax_call;
-+	  else if (type == R_RISCV_HI20
-+		   || type == R_RISCV_LO12_I
-+		   || type == R_RISCV_LO12_S)
-+	    relax_func = _bfd_riscv_relax_lui;
-+	  else if (type == R_RISCV_TPREL_HI20 || type == R_RISCV_TPREL_ADD)
-+	    relax_func = _bfd_riscv_relax_tls_le;
-+	}
-+      else if (type == R_RISCV_ALIGN)
-+	relax_func = _bfd_riscv_relax_align;
-+
-+      if (!relax_func)
-+	continue;
-+
-+      data->relocs = relocs;
-+
-+      /* Read this BFD's contents if we haven't done so already.  */
-+      if (!data->this_hdr.contents
-+	  && !bfd_malloc_and_get_section (abfd, sec, &data->this_hdr.contents))
-+	goto fail;
-+
-+      /* Read this BFD's symbols if we haven't done so already.  */
-+      if (symtab_hdr->sh_info != 0
-+	  && !symtab_hdr->contents
-+	  && !(symtab_hdr->contents =
-+	       (unsigned char *) bfd_elf_get_elf_syms (abfd, symtab_hdr,
-+						       symtab_hdr->sh_info,
-+						       0, NULL, NULL, NULL)))
-+	goto fail;
-+
-+      /* Get the value of the symbol referred to by the reloc.  */
-+      if (ELFNN_R_SYM (rel->r_info) < symtab_hdr->sh_info)
-+	{
-+	  /* A local symbol.  */
-+	  Elf_Internal_Sym *isym = ((Elf_Internal_Sym *) symtab_hdr->contents
-+				    + ELFNN_R_SYM (rel->r_info));
-+
-+	  if (isym->st_shndx == SHN_UNDEF)
-+	    sym_sec = sec, symval = sec_addr (sec) + rel->r_offset;
-+	  else
-+	    {
-+	      BFD_ASSERT (isym->st_shndx < elf_numsections (abfd));
-+	      sym_sec = elf_elfsections (abfd)[isym->st_shndx]->bfd_section;
-+	      if (sec_addr (sym_sec) == 0)
-+		continue;
-+	      symval = sec_addr (sym_sec) + isym->st_value;
-+	    }
-+	}
-+      else
-+	{
-+	  unsigned long indx;
-+	  struct elf_link_hash_entry *h;
-+
-+	  indx = ELFNN_R_SYM (rel->r_info) - symtab_hdr->sh_info;
-+	  h = elf_sym_hashes (abfd)[indx];
-+
-+	  while (h->root.type == bfd_link_hash_indirect
-+		 || h->root.type == bfd_link_hash_warning)
-+	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
-+
-+	  if (h->plt.offset != MINUS_ONE)
-+	    symval = sec_addr (htab->elf.splt) + h->plt.offset;
-+	  else if (h->root.u.def.section->output_section == NULL
-+		   || (h->root.type != bfd_link_hash_defined
-+		       && h->root.type != bfd_link_hash_defweak))
-+	    continue;
-+	  else
-+	    symval = sec_addr (h->root.u.def.section) + h->root.u.def.value;
-+
-+	  sym_sec = h->root.u.def.section;
-+	}
-+
-+      symval += rel->r_addend;
-+
-+      if (!relax_func (abfd, sec, sym_sec, info, rel, symval, again))
-+	goto fail;
-+    }
-+
-+  ret = TRUE;
-+
-+fail:
-+  if (relocs != data->relocs)
-+    free (relocs);
-+
-+  return ret;
-+}
-+
-+#define TARGET_LITTLE_SYM		riscv_elfNN_vec
-+#define TARGET_LITTLE_NAME		"elfNN-littleriscv"
-+
-+#define elf_backend_reloc_type_class	     riscv_reloc_type_class
-+
-+#define bfd_elfNN_bfd_reloc_name_lookup	     riscv_reloc_name_lookup
-+#define bfd_elfNN_bfd_link_hash_table_create riscv_elf_link_hash_table_create
-+#define bfd_elfNN_bfd_reloc_type_lookup	     riscv_reloc_type_lookup
-+#define bfd_elfNN_bfd_merge_private_bfd_data \
-+  _bfd_riscv_elf_merge_private_bfd_data
-+
-+#define elf_backend_copy_indirect_symbol     riscv_elf_copy_indirect_symbol
-+#define elf_backend_create_dynamic_sections  riscv_elf_create_dynamic_sections
-+#define elf_backend_check_relocs	     riscv_elf_check_relocs
-+#define elf_backend_adjust_dynamic_symbol    riscv_elf_adjust_dynamic_symbol
-+#define elf_backend_size_dynamic_sections    riscv_elf_size_dynamic_sections
-+#define elf_backend_relocate_section	     riscv_elf_relocate_section
-+#define elf_backend_finish_dynamic_symbol    riscv_elf_finish_dynamic_symbol
-+#define elf_backend_finish_dynamic_sections  riscv_elf_finish_dynamic_sections
-+#define elf_backend_gc_mark_hook	     riscv_elf_gc_mark_hook
-+#define elf_backend_gc_sweep_hook	     riscv_elf_gc_sweep_hook
-+#define elf_backend_plt_sym_val		     riscv_elf_plt_sym_val
-+#define elf_info_to_howto_rel		     NULL
-+#define elf_info_to_howto		     riscv_info_to_howto_rela
-+#define bfd_elfNN_bfd_relax_section	     _bfd_riscv_relax_section
-+
-+#define elf_backend_init_index_section	     _bfd_elf_init_1_index_section
-+
-+#define elf_backend_can_gc_sections	1
-+#define elf_backend_can_refcount	1
-+#define elf_backend_want_got_plt	1
-+#define elf_backend_plt_readonly	1
-+#define elf_backend_plt_alignment	4
-+#define elf_backend_want_plt_sym	1
-+#define elf_backend_got_header_size	(ARCH_SIZE / 8)
-+#define elf_backend_rela_normal		1
-+#define elf_backend_default_execstack	0
-+
-+#include "elfNN-target.h"
-diff -urN empty/bfd/elfxx-riscv.c binutils-2.26.1/bfd/elfxx-riscv.c
---- empty/bfd/elfxx-riscv.c	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/bfd/elfxx-riscv.c	2016-04-03 10:33:12.062126369 +0800
-@@ -0,0 +1,814 @@
-+/* RISC-V-specific support for ELF.
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on TILE-Gx and MIPS targets.
-+
-+   This file is part of BFD, the Binary File Descriptor library.
-+
-+   This program is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3 of the License, or
-+   (at your option) any later version.
-+
-+   This program is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+#include "sysdep.h"
-+#include "bfd.h"
-+#include "libbfd.h"
-+#include "elf-bfd.h"
-+#include "elf/riscv.h"
-+#include "opcode/riscv.h"
-+#include "libiberty.h"
-+#include "elfxx-riscv.h"
-+#include <stdint.h>
-+
-+#define MINUS_ONE ((bfd_vma)0 - 1)
-+
-+/* The relocation table used for SHT_RELA sections.  */
-+
-+static reloc_howto_type howto_table[] =
-+{
-+  /* No relocation.  */
-+  HOWTO (R_RISCV_NONE,			/* type */
-+	 0,				/* rightshift */
-+	 3,				/* size */
-+	 0,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_NONE",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 0,				/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 32 bit relocation.  */
-+  HOWTO (R_RISCV_32,			/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_32",			/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 0xffffffff,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 64 bit relocation.  */
-+  HOWTO (R_RISCV_64,			/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 64,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_64",			/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* Relocation against a local symbol in a shared object.  */
-+  HOWTO (R_RISCV_RELATIVE,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_RELATIVE",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 0xffffffff,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  HOWTO (R_RISCV_COPY,			/* type */
-+	 0,				/* rightshift */
-+	 0,				/* this one is variable size */
-+	 0,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_bitfield,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_COPY",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0x0,         			/* src_mask */
-+	 0x0,		        	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  HOWTO (R_RISCV_JUMP_SLOT,		/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 64,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_bitfield,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_JUMP_SLOT",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0x0,         			/* src_mask */
-+	 0x0,		        	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* Dynamic TLS relocations.  */
-+  HOWTO (R_RISCV_TLS_DTPMOD32,		/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc, 	/* special_function */
-+	 "R_RISCV_TLS_DTPMOD32",	/* name */
-+	 FALSE,				/* partial_inplace */
-+	 MINUS_ONE,			/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  HOWTO (R_RISCV_TLS_DTPMOD64,		/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 64,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc, 	/* special_function */
-+	 "R_RISCV_TLS_DTPMOD64",	/* name */
-+	 FALSE,				/* partial_inplace */
-+	 MINUS_ONE,			/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  HOWTO (R_RISCV_TLS_DTPREL32,		/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc, 	/* special_function */
-+	 "R_RISCV_TLS_DTPREL32",	/* name */
-+	 TRUE,				/* partial_inplace */
-+	 MINUS_ONE,			/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  HOWTO (R_RISCV_TLS_DTPREL64,		/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 64,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc, 	/* special_function */
-+	 "R_RISCV_TLS_DTPREL64",	/* name */
-+	 TRUE,				/* partial_inplace */
-+	 MINUS_ONE,			/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  HOWTO (R_RISCV_TLS_TPREL32,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc, 	/* special_function */
-+	 "R_RISCV_TLS_TPREL32",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 MINUS_ONE,			/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  HOWTO (R_RISCV_TLS_TPREL64,		/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 64,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc, 	/* special_function */
-+	 "R_RISCV_TLS_TPREL64",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 MINUS_ONE,			/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* Reserved for future relocs that the dynamic linker must understand.  */
-+  EMPTY_HOWTO (12),
-+  EMPTY_HOWTO (13),
-+  EMPTY_HOWTO (14),
-+  EMPTY_HOWTO (15),
-+
-+  /* 12-bit PC-relative branch offset.  */
-+  HOWTO (R_RISCV_BRANCH,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_signed,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_BRANCH",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_SBTYPE_IMM (-1U),	/* dst_mask */
-+	 TRUE),				/* pcrel_offset */
-+
-+  /* 20-bit PC-relative jump offset.  */
-+  HOWTO (R_RISCV_JAL,			/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+					/* This needs complex overflow
-+					   detection, because the upper 36
-+					   bits must match the PC + 4.  */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_JAL",			/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UJTYPE_IMM (-1U),	/* dst_mask */
-+	 TRUE),				/* pcrel_offset */
-+
-+  /* 32-bit PC-relative function call (AUIPC/JALR).  */
-+  HOWTO (R_RISCV_CALL,			/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 64,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_CALL",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UTYPE_IMM (-1U) | ((bfd_vma) ENCODE_ITYPE_IMM (-1U) << 32),
-+					/* dst_mask */
-+	 TRUE),				/* pcrel_offset */
-+
-+  /* 32-bit PC-relative function call (AUIPC/JALR).  */
-+  HOWTO (R_RISCV_CALL_PLT,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 64,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_CALL_PLT",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UTYPE_IMM (-1U) | ((bfd_vma) ENCODE_ITYPE_IMM (-1U) << 32),
-+					/* dst_mask */
-+	 TRUE),				/* pcrel_offset */
-+
-+  /* High 20 bits of 32-bit PC-relative GOT access.  */
-+  HOWTO (R_RISCV_GOT_HI20,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_GOT_HI20",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 20 bits of 32-bit PC-relative TLS IE GOT access.  */
-+  HOWTO (R_RISCV_TLS_GOT_HI20,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_TLS_GOT_HI20",	/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 20 bits of 32-bit PC-relative TLS GD GOT reference.  */
-+  HOWTO (R_RISCV_TLS_GD_HI20,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_TLS_GD_HI20",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 20 bits of 32-bit PC-relative reference.  */
-+  HOWTO (R_RISCV_PCREL_HI20,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_PCREL_HI20",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
-+	 TRUE),				/* pcrel_offset */
-+
-+  /* Low 12 bits of a 32-bit PC-relative load or add.  */
-+  HOWTO (R_RISCV_PCREL_LO12_I,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_PCREL_LO12_I",	/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_ITYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* Low 12 bits of a 32-bit PC-relative store.  */
-+  HOWTO (R_RISCV_PCREL_LO12_S,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_PCREL_LO12_S",	/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_STYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 20 bits of 32-bit absolute address.  */
-+  HOWTO (R_RISCV_HI20,			/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_HI20",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 12 bits of 32-bit load or add.  */
-+  HOWTO (R_RISCV_LO12_I,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_LO12_I",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_ITYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 12 bits of 32-bit store.  */
-+  HOWTO (R_RISCV_LO12_S,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_LO12_S",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_STYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 20 bits of TLS LE thread pointer offset.  */
-+  HOWTO (R_RISCV_TPREL_HI20,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_signed,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_TPREL_HI20",		/* name */
-+	 TRUE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* Low 12 bits of TLS LE thread pointer offset for loads and adds.  */
-+  HOWTO (R_RISCV_TPREL_LO12_I,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_signed,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_TPREL_LO12_I",	/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_ITYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* Low 12 bits of TLS LE thread pointer offset for stores.  */
-+  HOWTO (R_RISCV_TPREL_LO12_S,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_signed,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_TPREL_LO12_S",	/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_STYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* TLS LE thread pointer usage.  */
-+  HOWTO (R_RISCV_TPREL_ADD,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_TPREL_ADD",		/* name */
-+	 TRUE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 0,				/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 8-bit in-place addition, for local label subtraction.  */
-+  HOWTO (R_RISCV_ADD8,			/* type */
-+	 0,				/* rightshift */
-+	 0,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_ADD8",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 16-bit in-place addition, for local label subtraction.  */
-+  HOWTO (R_RISCV_ADD16,			/* type */
-+	 0,				/* rightshift */
-+	 1,				/* size */
-+	 16,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_ADD16",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 32-bit in-place addition, for local label subtraction.  */
-+  HOWTO (R_RISCV_ADD32,			/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_ADD32",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 64-bit in-place addition, for local label subtraction.  */
-+  HOWTO (R_RISCV_ADD64,			/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 64,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_ADD64",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 8-bit in-place addition, for local label subtraction.  */
-+  HOWTO (R_RISCV_SUB8,			/* type */
-+	 0,				/* rightshift */
-+	 0,				/* size */
-+	 8,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_SUB8",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 16-bit in-place addition, for local label subtraction.  */
-+  HOWTO (R_RISCV_SUB16,			/* type */
-+	 0,				/* rightshift */
-+	 1,				/* size */
-+	 16,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_SUB16",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 32-bit in-place addition, for local label subtraction.  */
-+  HOWTO (R_RISCV_SUB32,			/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_SUB32",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* 64-bit in-place addition, for local label subtraction.  */
-+  HOWTO (R_RISCV_SUB64,			/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 64,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_SUB64",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 MINUS_ONE,			/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* GNU extension to record C++ vtable hierarchy */
-+  HOWTO (R_RISCV_GNU_VTINHERIT,		/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 0,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 NULL,				/* special_function */
-+	 "R_RISCV_GNU_VTINHERIT",	/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 0,				/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* GNU extension to record C++ vtable member usage */
-+  HOWTO (R_RISCV_GNU_VTENTRY,		/* type */
-+	 0,				/* rightshift */
-+	 4,				/* size */
-+	 0,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 _bfd_elf_rel_vtable_reloc_fn,	/* special_function */
-+	 "R_RISCV_GNU_VTENTRY",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 0,				/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* Indicates an alignment statement.  The addend field encodes how many
-+     bytes of NOPs follow the statement.  The desired alignment is the
-+     addend rounded up to the next power of two.  */
-+  HOWTO (R_RISCV_ALIGN,			/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 0,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_ALIGN",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 0,				/* dst_mask */
-+	 TRUE),				/* pcrel_offset */
-+
-+  /* 8-bit PC-relative branch offset.  */
-+  HOWTO (R_RISCV_RVC_BRANCH,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_signed,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_RVC_BRANCH",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_RVC_B_IMM (-1U),	/* dst_mask */
-+	 TRUE),				/* pcrel_offset */
-+
-+  /* 11-bit PC-relative jump offset.  */
-+  HOWTO (R_RISCV_RVC_JUMP,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 TRUE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+					/* This needs complex overflow
-+					   detection, because the upper 36
-+					   bits must match the PC + 4.  */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_RVC_JUMP",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_RVC_J_IMM (-1U),	/* dst_mask */
-+	 TRUE),				/* pcrel_offset */
-+
-+  /* High 6 bits of 18-bit absolute address.  */
-+  HOWTO (R_RISCV_RVC_LUI,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_RVC_LUI",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_RVC_IMM (-1U),		/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 12 bits of 32-bit load or add.  */
-+  HOWTO (R_RISCV_GPREL_I,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_GPREL_I",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_ITYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+
-+  /* High 12 bits of 32-bit store.  */
-+  HOWTO (R_RISCV_GPREL_S,		/* type */
-+	 0,				/* rightshift */
-+	 2,				/* size */
-+	 32,				/* bitsize */
-+	 FALSE,				/* pc_relative */
-+	 0,				/* bitpos */
-+	 complain_overflow_dont,	/* complain_on_overflow */
-+	 bfd_elf_generic_reloc,		/* special_function */
-+	 "R_RISCV_GPREL_S",		/* name */
-+	 FALSE,				/* partial_inplace */
-+	 0,				/* src_mask */
-+	 ENCODE_STYPE_IMM (-1U),	/* dst_mask */
-+	 FALSE),			/* pcrel_offset */
-+};
-+
-+/* A mapping from BFD reloc types to RISC-V ELF reloc types.  */
-+
-+struct elf_reloc_map {
-+  bfd_reloc_code_real_type bfd_val;
-+  enum elf_riscv_reloc_type elf_val;
-+};
-+
-+static const struct elf_reloc_map riscv_reloc_map[] =
-+{
-+  { BFD_RELOC_NONE, R_RISCV_NONE },
-+  { BFD_RELOC_32, R_RISCV_32 },
-+  { BFD_RELOC_64, R_RISCV_64 },
-+  { BFD_RELOC_RISCV_ADD8, R_RISCV_ADD8 },
-+  { BFD_RELOC_RISCV_ADD16, R_RISCV_ADD16 },
-+  { BFD_RELOC_RISCV_ADD32, R_RISCV_ADD32 },
-+  { BFD_RELOC_RISCV_ADD64, R_RISCV_ADD64 },
-+  { BFD_RELOC_RISCV_SUB8, R_RISCV_SUB8 },
-+  { BFD_RELOC_RISCV_SUB16, R_RISCV_SUB16 },
-+  { BFD_RELOC_RISCV_SUB32, R_RISCV_SUB32 },
-+  { BFD_RELOC_RISCV_SUB64, R_RISCV_SUB64 },
-+  { BFD_RELOC_CTOR, R_RISCV_64 },
-+  { BFD_RELOC_12_PCREL, R_RISCV_BRANCH },
-+  { BFD_RELOC_RISCV_HI20, R_RISCV_HI20 },
-+  { BFD_RELOC_RISCV_LO12_I, R_RISCV_LO12_I },
-+  { BFD_RELOC_RISCV_LO12_S, R_RISCV_LO12_S },
-+  { BFD_RELOC_RISCV_PCREL_LO12_I, R_RISCV_PCREL_LO12_I },
-+  { BFD_RELOC_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_S },
-+  { BFD_RELOC_RISCV_CALL, R_RISCV_CALL },
-+  { BFD_RELOC_RISCV_CALL_PLT, R_RISCV_CALL_PLT },
-+  { BFD_RELOC_RISCV_PCREL_HI20, R_RISCV_PCREL_HI20 },
-+  { BFD_RELOC_RISCV_JMP, R_RISCV_JAL },
-+  { BFD_RELOC_RISCV_GOT_HI20, R_RISCV_GOT_HI20 },
-+  { BFD_RELOC_RISCV_TLS_DTPMOD32, R_RISCV_TLS_DTPMOD32 },
-+  { BFD_RELOC_RISCV_TLS_DTPREL32, R_RISCV_TLS_DTPREL32 },
-+  { BFD_RELOC_RISCV_TLS_DTPMOD64, R_RISCV_TLS_DTPMOD64 },
-+  { BFD_RELOC_RISCV_TLS_DTPREL64, R_RISCV_TLS_DTPREL64 },
-+  { BFD_RELOC_RISCV_TLS_TPREL32, R_RISCV_TLS_TPREL32 },
-+  { BFD_RELOC_RISCV_TLS_TPREL64, R_RISCV_TLS_TPREL64 },
-+  { BFD_RELOC_RISCV_TPREL_HI20, R_RISCV_TPREL_HI20 },
-+  { BFD_RELOC_RISCV_TPREL_ADD, R_RISCV_TPREL_ADD },
-+  { BFD_RELOC_RISCV_TPREL_LO12_S, R_RISCV_TPREL_LO12_S },
-+  { BFD_RELOC_RISCV_TPREL_LO12_I, R_RISCV_TPREL_LO12_I },
-+  { BFD_RELOC_RISCV_TLS_GOT_HI20, R_RISCV_TLS_GOT_HI20 },
-+  { BFD_RELOC_RISCV_TLS_GD_HI20, R_RISCV_TLS_GD_HI20 },
-+  { BFD_RELOC_RISCV_ALIGN, R_RISCV_ALIGN },
-+  { BFD_RELOC_RISCV_RVC_BRANCH, R_RISCV_RVC_BRANCH },
-+  { BFD_RELOC_RISCV_RVC_JUMP, R_RISCV_RVC_JUMP },
-+  { BFD_RELOC_RISCV_RVC_LUI, R_RISCV_RVC_LUI },
-+  { BFD_RELOC_RISCV_GPREL_I, R_RISCV_GPREL_I },
-+  { BFD_RELOC_RISCV_GPREL_S, R_RISCV_GPREL_S },
-+};
-+
-+/* Given a BFD reloc type, return a howto structure.  */
-+
-+reloc_howto_type *
-+riscv_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
-+			 bfd_reloc_code_real_type code)
-+{
-+  unsigned int i;
-+
-+  for (i = 0; i < ARRAY_SIZE (riscv_reloc_map); i++)
-+    if (riscv_reloc_map[i].bfd_val == code)
-+      return &howto_table[(int) riscv_reloc_map[i].elf_val];
-+
-+  bfd_set_error (bfd_error_bad_value);
-+  return NULL;
-+}
-+
-+reloc_howto_type *
-+riscv_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, const char *r_name)
-+{
-+  unsigned int i;
-+
-+  for (i = 0; i < ARRAY_SIZE (howto_table); i++)
-+    if (howto_table[i].name && strcasecmp (howto_table[i].name, r_name) == 0)
-+      return &howto_table[i];
-+
-+  return NULL;
-+}
-+
-+reloc_howto_type *
-+riscv_elf_rtype_to_howto (unsigned int r_type)
-+{
-+  if (r_type >= ARRAY_SIZE (howto_table))
-+    {
-+      (*_bfd_error_handler) (_("unrecognized relocation (0x%x)"), r_type);
-+      bfd_set_error (bfd_error_bad_value);
-+      return NULL;
-+    }
-+  return &howto_table[r_type];
-+}
-diff -urN empty/bfd/elfxx-riscv.h binutils-2.26.1/bfd/elfxx-riscv.h
---- empty/bfd/elfxx-riscv.h	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/bfd/elfxx-riscv.h	2016-04-03 10:12:57.122276559 +0800
-@@ -0,0 +1,33 @@
-+/* RISC-V ELF specific backend routines.
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target.
-+
-+   This file is part of BFD, the Binary File Descriptor library.
-+
-+   This program is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3 of the License, or
-+   (at your option) any later version.
-+
-+   This program is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+#include "elf/common.h"
-+#include "elf/internal.h"
-+
-+extern reloc_howto_type *
-+riscv_reloc_name_lookup (bfd *, const char *);
-+
-+extern reloc_howto_type *
-+riscv_reloc_type_lookup (bfd *, bfd_reloc_code_real_type);
-+
-+extern reloc_howto_type *
-+riscv_elf_rtype_to_howto (unsigned int r_type);
-diff -urN empty/gas/config/tc-riscv.c binutils-2.26.1/gas/config/tc-riscv.c
---- empty/gas/config/tc-riscv.c	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/gas/config/tc-riscv.c	2016-04-09 10:50:33.576657106 +0800
-@@ -0,0 +1,2434 @@
-+/* tc-riscv.c -- RISC-V assembler
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target.
-+
-+   This file is part of GAS.
-+
-+   GAS is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3, or (at your option)
-+   any later version.
-+
-+   GAS is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+#include "as.h"
-+#include "config.h"
-+#include "subsegs.h"
-+#include "safe-ctype.h"
-+
-+#include "itbl-ops.h"
-+#include "dwarf2dbg.h"
-+#include "dw2gencfi.h"
-+
-+#include "elf/riscv.h"
-+#include "opcode/riscv.h"
-+
-+#include <execinfo.h>
-+#include <stdint.h>
-+
-+/* Information about an instruction, including its format, operands
-+   and fixups.  */
-+struct riscv_cl_insn
-+{
-+  /* The opcode's entry in riscv_opcodes.  */
-+  const struct riscv_opcode *insn_mo;
-+
-+  /* The encoded instruction bits.  */
-+  insn_t insn_opcode;
-+
-+  /* The frag that contains the instruction.  */
-+  struct frag *frag;
-+
-+  /* The offset into FRAG of the first instruction byte.  */
-+  long where;
-+
-+  /* The relocs associated with the instruction, if any.  */
-+  fixS *fixp;
-+};
-+
-+/* The default architecture.  */
-+#ifndef DEFAULT_ARCH
-+#define DEFAULT_ARCH "riscv64"
-+#endif
-+static const char default_arch[] = DEFAULT_ARCH;
-+
-+unsigned xlen = 0; /* width of an x-register */
-+#define LOAD_ADDRESS_INSN (xlen == 64 ? "ld" : "lw")
-+#define ADD32_INSN (xlen == 64 ? "addiw" : "addi")
-+
-+static unsigned elf_flags = 0;
-+
-+/* This is the set of options which the .option pseudo-op may modify.  */
-+
-+struct riscv_set_options
-+{
-+  int pic; /* Generate position-independent code.  */
-+  int rvc; /* Generate RVC code.  */
-+};
-+
-+static struct riscv_set_options riscv_opts =
-+{
-+  0,	/* pic */
-+  0,	/* rvc */
-+};
-+
-+static void
-+riscv_set_rvc (bfd_boolean rvc_value)
-+{
-+  if (rvc_value)
-+    elf_flags |= EF_RISCV_RVC;
-+
-+  riscv_opts.rvc = rvc_value;
-+}
-+
-+struct riscv_subset
-+{
-+  const char *name;
-+  int version_major;
-+  int version_minor;
-+
-+  struct riscv_subset *next;
-+};
-+
-+static struct riscv_subset *riscv_subsets;
-+
-+static bfd_boolean
-+riscv_subset_supports (const char *feature)
-+{
-+  struct riscv_subset *s;
-+  char *p;
-+  unsigned xlen_required = strtoul (feature, &p, 10);
-+
-+  if (xlen_required && xlen != xlen_required)
-+    return FALSE;
-+
-+  for (s = riscv_subsets; s != NULL; s = s->next)
-+    if (strcasecmp (s->name, p) == 0)
-+      /* FIXME: once we support version numbers:
-+	 return major == s->version_major && minor <= s->version_minor; */
-+      return TRUE;
-+
-+  return FALSE;
-+}
-+
-+static void
-+riscv_add_subset (const char *subset)
-+{
-+  struct riscv_subset *s = xmalloc (sizeof *s);
-+  s->name = xstrdup (subset);
-+  s->version_major = 2;
-+  s->version_minor = 0;
-+  s->next = riscv_subsets;
-+  riscv_subsets = s;
-+}
-+
-+/* Set which ISA and extensions are available.  Formally, ISA strings must
-+   begin with RV32 or RV64, but we allow the prefix to be omitted.
-+
-+   FIXME: Version numbers are not supported yet.  */
-+static void
-+riscv_set_arch (const char *arg)
-+{
-+  char *uppercase = xstrdup (arg);
-+  char *p = uppercase;
-+  const char *all_subsets = "IMAFDC";
-+  const char *extension = NULL;
-+  int rvc = 0;
-+  int i;
-+
-+  for (i = 0; uppercase[i]; i++)
-+    uppercase[i] = TOUPPER (uppercase[i]);
-+
-+  if (strncmp (p, "RV32", 4) == 0)
-+    {
-+      xlen = 32;
-+      p += 4;
-+    }
-+  else if (strncmp (p, "RV64", 4) == 0)
-+    {
-+      xlen = 64;
-+      p += 4;
-+    }
-+  else if (strncmp (p, "RV", 2) == 0)
-+    p += 2;
-+
-+  switch (*p)
-+    {
-+      case 'I':
-+	break;
-+
-+      case 'G':
-+	p++;
-+	/* Fall through.  */
-+
-+      case '\0':
-+	for (i = 0; all_subsets[i] != '\0'; i++)
-+	  {
-+	    const char subset[] = {all_subsets[i], '\0'};
-+	    riscv_add_subset (subset);
-+	  }
-+	break;
-+
-+      default:
-+	as_fatal ("`I' must be the first ISA subset name specified (got %c)",
-+		  *p);
-+    }
-+
-+  while (*p)
-+    {
-+      if (*p == 'X')
-+	{
-+	  char *subset = xstrdup (p), *q = subset;
-+
-+	  while (*++q != '\0' && *q != '_')
-+	    ;
-+	  *q = '\0';
-+
-+	  if (extension)
-+	    as_fatal ("only one eXtension is supported (found %s and %s)",
-+		      extension, subset);
-+	  extension = subset;
-+	  riscv_add_subset (subset);
-+	  p += strlen (subset);
-+	  free (subset);
-+	}
-+      else if (*p == '_')
-+	p++;
-+      else if ((all_subsets = strchr (all_subsets, *p)) != NULL)
-+	{
-+	  const char subset[] = {*p, 0};
-+	  riscv_add_subset (subset);
-+	  if (*p == 'C')
-+	    rvc = 1;
-+	  all_subsets++;
-+	  p++;
-+	}
-+      else
-+	as_fatal ("unsupported ISA subset %c", *p);
-+    }
-+
-+  if (rvc)
-+    /* Override -m[no-]rvc setting if C was explicitly listed.  */
-+    riscv_set_rvc (TRUE);
-+  else
-+    /* Add RVC anyway.  -m[no-]rvc toggles its availability.  */
-+    riscv_add_subset ("C");
-+
-+  free (uppercase);
-+}
-+
-+/* handle of the OPCODE hash table */
-+static struct hash_control *op_hash = NULL;
-+
-+/* This array holds the chars that always start a comment.  If the
-+    pre-processor is disabled, these aren't very useful */
-+const char comment_chars[] = "#";
-+
-+/* This array holds the chars that only start a comment at the beginning of
-+   a line.  If the line seems to have the form '# 123 filename'
-+   .line and .file directives will appear in the pre-processed output */
-+/* Note that input_file.c hand checks for '#' at the beginning of the
-+   first line of the input file.  This is because the compiler outputs
-+   #NO_APP at the beginning of its output.  */
-+/* Also note that C style comments are always supported.  */
-+const char line_comment_chars[] = "#";
-+
-+/* This array holds machine specific line separator characters.  */
-+const char line_separator_chars[] = ";";
-+
-+/* Chars that can be used to separate mant from exp in floating point nums */
-+const char EXP_CHARS[] = "eE";
-+
-+/* Chars that mean this number is a floating point constant */
-+/* As in 0f12.456 */
-+/* or    0d1.2345e12 */
-+const char FLT_CHARS[] = "rRsSfFdDxXpP";
-+
-+/* Macros for encoding relaxation state for RVC branches and far jumps.  */
-+#define RELAX_BRANCH_ENCODE(uncond, rvc, length)	\
-+  ((relax_substateT) 					\
-+   (0xc0000000						\
-+    | ((uncond) ? 1 : 0)				\
-+    | ((rvc) ? 2 : 0)					\
-+    | ((length) << 2)))
-+#define RELAX_BRANCH_P(i) (((i) & 0xf0000000) == 0xc0000000)
-+#define RELAX_BRANCH_LENGTH(i) (((i) >> 2) & 0xF)
-+#define RELAX_BRANCH_RVC(i) (((i) & 2) != 0)
-+#define RELAX_BRANCH_UNCOND(i) (((i) & 1) != 0)
-+
-+/* Is the given value a sign-extended 32-bit value?  */
-+#define IS_SEXT_32BIT_NUM(x)						\
-+  (((x) &~ (offsetT) 0x7fffffff) == 0					\
-+   || (((x) &~ (offsetT) 0x7fffffff) == ~ (offsetT) 0x7fffffff))
-+
-+/* Is the given value a zero-extended 32-bit value?  Or a negated one?  */
-+#define IS_ZEXT_32BIT_NUM(x)						\
-+  (((x) &~ (offsetT) 0xffffffff) == 0					\
-+   || (((x) &~ (offsetT) 0xffffffff) == ~ (offsetT) 0xffffffff))
-+
-+/* Change INSN's opcode so that the operand given by FIELD has value VALUE.
-+   INSN is a riscv_cl_insn structure and VALUE is evaluated exactly once.  */
-+#define INSERT_OPERAND(FIELD, INSN, VALUE) \
-+  INSERT_BITS ((INSN).insn_opcode, VALUE, OP_MASK_##FIELD, OP_SH_##FIELD)
-+
-+/* Determine if an instruction matches an opcode.  */
-+#define OPCODE_MATCHES(OPCODE, OP) \
-+  (((OPCODE) & MASK_##OP) == MATCH_##OP)
-+
-+static char *expr_end;
-+
-+/* The default target format to use.  */
-+
-+const char *
-+riscv_target_format (void)
-+{
-+  return xlen == 64 ? "elf64-littleriscv" : "elf32-littleriscv";
-+}
-+
-+/* Return the length of instruction INSN.  */
-+
-+static inline unsigned int
-+insn_length (const struct riscv_cl_insn *insn)
-+{
-+  return riscv_insn_length (insn->insn_opcode);
-+}
-+
-+/* Initialise INSN from opcode entry MO.  Leave its position unspecified.  */
-+
-+static void
-+create_insn (struct riscv_cl_insn *insn, const struct riscv_opcode *mo)
-+{
-+  insn->insn_mo = mo;
-+  insn->insn_opcode = mo->match;
-+  insn->frag = NULL;
-+  insn->where = 0;
-+  insn->fixp = NULL;
-+}
-+
-+/* Install INSN at the location specified by its "frag" and "where" fields.  */
-+
-+static void
-+install_insn (const struct riscv_cl_insn *insn)
-+{
-+  char *f = insn->frag->fr_literal + insn->where;
-+  md_number_to_chars (f, insn->insn_opcode, insn_length (insn));
-+}
-+
-+/* Move INSN to offset WHERE in FRAG.  Adjust the fixups accordingly
-+   and install the opcode in the new location.  */
-+
-+static void
-+move_insn (struct riscv_cl_insn *insn, fragS *frag, long where)
-+{
-+  insn->frag = frag;
-+  insn->where = where;
-+  if (insn->fixp != NULL)
-+    {
-+      insn->fixp->fx_frag = frag;
-+      insn->fixp->fx_where = where;
-+    }
-+  install_insn (insn);
-+}
-+
-+/* Add INSN to the end of the output.  */
-+
-+static void
-+add_fixed_insn (struct riscv_cl_insn *insn)
-+{
-+  char *f = frag_more (insn_length (insn));
-+  move_insn (insn, frag_now, f - frag_now->fr_literal);
-+}
-+
-+static void
-+add_relaxed_insn (struct riscv_cl_insn *insn, int max_chars, int var,
-+      relax_substateT subtype, symbolS *symbol, offsetT offset)
-+{
-+  frag_grow (max_chars);
-+  move_insn (insn, frag_now, frag_more (0) - frag_now->fr_literal);
-+  frag_var (rs_machine_dependent, max_chars, var,
-+      subtype, symbol, offset, NULL);
-+}
-+
-+/* Compute the length of a branch sequence, and adjust the stored length
-+   accordingly.  If FRAGP is NULL, the worst-case length is returned.  */
-+
-+static int
-+relaxed_branch_length (fragS *fragp, asection *sec, int update)
-+{
-+  int jump, rvc, length = 8;
-+
-+  if (!fragp)
-+    return length;
-+
-+  jump = RELAX_BRANCH_UNCOND (fragp->fr_subtype);
-+  rvc = RELAX_BRANCH_RVC (fragp->fr_subtype);
-+  length = RELAX_BRANCH_LENGTH (fragp->fr_subtype);
-+
-+  /* Assume jumps are in range; the linker will catch any that aren't.  */
-+  length = jump ? 4 : 8;
-+
-+  if (fragp->fr_symbol != NULL
-+      && S_IS_DEFINED (fragp->fr_symbol)
-+      && sec == S_GET_SEGMENT (fragp->fr_symbol))
-+    {
-+      offsetT val = S_GET_VALUE (fragp->fr_symbol) + fragp->fr_offset;
-+      bfd_vma rvc_range = jump ? RVC_JUMP_REACH : RVC_BRANCH_REACH;
-+      val -= fragp->fr_address + fragp->fr_fix;
-+
-+      if (rvc && (bfd_vma)(val + rvc_range/2) < rvc_range)
-+	length = 2;
-+      else if ((bfd_vma)(val + RISCV_BRANCH_REACH/2) < RISCV_BRANCH_REACH)
-+	length = 4;
-+      else if (!jump && rvc)
-+	length = 6;
-+    }
-+
-+  if (update)
-+    fragp->fr_subtype = RELAX_BRANCH_ENCODE (jump, rvc, length);
-+
-+  return length;
-+}
-+
-+struct regname {
-+  const char *name;
-+  unsigned int num;
-+};
-+
-+enum reg_class {
-+  RCLASS_GPR,
-+  RCLASS_FPR,
-+  RCLASS_CSR,
-+  RCLASS_MAX
-+};
-+
-+static struct hash_control *reg_names_hash = NULL;
-+
-+#define ENCODE_REG_HASH(cls, n) (void *)(uintptr_t)((n) * RCLASS_MAX + (cls) + 1)
-+#define DECODE_REG_CLASS(hash) (((uintptr_t)(hash) - 1) % RCLASS_MAX)
-+#define DECODE_REG_NUM(hash) (((uintptr_t)(hash) - 1) / RCLASS_MAX)
-+
-+static void
-+hash_reg_name (enum reg_class class, const char *name, unsigned n)
-+{
-+  void *hash = ENCODE_REG_HASH (class, n);
-+  const char *retval = hash_insert (reg_names_hash, name, hash);
-+
-+  if (retval != NULL)
-+    as_fatal (_("internal error: can't hash `%s': %s"), name, retval);
-+}
-+
-+static void
-+hash_reg_names (enum reg_class class, const char * const names[], unsigned n)
-+{
-+  unsigned i;
-+
-+  for (i = 0; i < n; i++)
-+    hash_reg_name (class, names[i], i);
-+}
-+
-+static unsigned int
-+reg_lookup_internal (const char *s, enum reg_class class)
-+{
-+  struct regname *r = (struct regname *) hash_find (reg_names_hash, s);
-+  if (r == NULL || DECODE_REG_CLASS (r) != class)
-+    return -1;
-+  return DECODE_REG_NUM (r);
-+}
-+
-+static int
-+reg_lookup (char **s, enum reg_class class, unsigned int *regnop)
-+{
-+  char *e;
-+  char save_c;
-+  int reg = -1;
-+
-+  /* Find end of name.  */
-+  e = *s;
-+  if (is_name_beginner (*e))
-+    ++e;
-+  while (is_part_of_name (*e))
-+    ++e;
-+
-+  /* Terminate name.  */
-+  save_c = *e;
-+  *e = '\0';
-+
-+  /* Look for the register.  Advance to next token if one was recognized.  */
-+  if ((reg = reg_lookup_internal (*s, class)) >= 0)
-+    *s = e;
-+
-+  *e = save_c;
-+  if (regnop)
-+    *regnop = reg;
-+  return reg >= 0;
-+}
-+
-+static int
-+arg_lookup (char **s, const char *const *array, size_t size, unsigned *regnop)
-+{
-+  const char *p = strchr (*s, ',');
-+  size_t i, len = p ? (size_t)(p - *s) : strlen (*s);
-+
-+  for (i = 0; i < size; i++)
-+    if (array[i] != NULL && strncmp (array[i], *s, len) == 0)
-+      {
-+	*regnop = i;
-+	*s += len;
-+	return 1;
-+      }
-+
-+  return 0;
-+}
-+
-+/* For consistency checking, verify that all bits are specified either
-+   by the match/mask part of the instruction definition, or by the
-+   operand list.  */
-+static int
-+validate_riscv_insn (const struct riscv_opcode *opc)
-+{
-+  const char *p = opc->args;
-+  char c;
-+  insn_t used_bits = opc->mask;
-+  int insn_width = 8 * riscv_insn_length (opc->match);
-+  insn_t required_bits = ~0ULL >> (64 - insn_width);
-+
-+  if ((used_bits & opc->match) != (opc->match & required_bits))
-+    {
-+      as_bad (_("internal: bad RISC-V opcode (mask error): %s %s"),
-+	      opc->name, opc->args);
-+      return 0;
-+    }
-+
-+#define USE_BITS(mask,shift)	(used_bits |= ((insn_t)(mask) << (shift)))
-+  while (*p)
-+    switch (c = *p++)
-+      {
-+      /* Xcustom */
-+      case '^':
-+	switch (c = *p++)
-+	  {
-+	  case 'd': USE_BITS (OP_MASK_RD, OP_SH_RD); break;
-+	  case 's': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
-+	  case 't': USE_BITS (OP_MASK_RS2, OP_SH_RS2); break;
-+	  case 'j': USE_BITS (OP_MASK_CUSTOM_IMM, OP_SH_CUSTOM_IMM); break;
-+	  }
-+	break;
-+      case 'C': /* RVC */
-+	switch (c = *p++)
-+	  {
-+	  case 'a': used_bits |= ENCODE_RVC_J_IMM(-1U); break;
-+	  case 'c': break; /* RS1, constrained to equal sp */
-+	  case 'i': used_bits |= ENCODE_RVC_SIMM3(-1U); break;
-+	  case 'j': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+	  case 'k': used_bits |= ENCODE_RVC_LW_IMM(-1U); break;
-+	  case 'l': used_bits |= ENCODE_RVC_LD_IMM(-1U); break;
-+	  case 'm': used_bits |= ENCODE_RVC_LWSP_IMM(-1U); break;
-+	  case 'n': used_bits |= ENCODE_RVC_LDSP_IMM(-1U); break;
-+	  case 'p': used_bits |= ENCODE_RVC_B_IMM(-1U); break;
-+	  case 's': USE_BITS (OP_MASK_CRS1S, OP_SH_CRS1S); break;
-+	  case 't': USE_BITS (OP_MASK_CRS2S, OP_SH_CRS2S); break;
-+	  case 'u': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+	  case 'v': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+	  case 'w': break; /* RS1S, constrained to equal RD */
-+	  case 'x': break; /* RS2S, constrained to equal RD */
-+	  case 'K': used_bits |= ENCODE_RVC_ADDI4SPN_IMM(-1U); break;
-+	  case 'L': used_bits |= ENCODE_RVC_ADDI16SP_IMM(-1U); break;
-+	  case 'M': used_bits |= ENCODE_RVC_SWSP_IMM(-1U); break;
-+	  case 'N': used_bits |= ENCODE_RVC_SDSP_IMM(-1U); break;
-+	  case 'U': break; /* RS1, constrained to equal RD */
-+	  case 'V': USE_BITS (OP_MASK_CRS2, OP_SH_CRS2); break;
-+	  case '<': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+	  case '>': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+	  case 'T': USE_BITS (OP_MASK_CRS2, OP_SH_CRS2); break;
-+	  case 'D': USE_BITS (OP_MASK_CRS2S, OP_SH_CRS2S); break;
-+	  default:
-+	    as_bad (_("internal: bad RISC-V opcode (unknown operand type `C%c'): %s %s"),
-+		    c, opc->name, opc->args);
-+	    return 0;
-+	  }
-+	break;
-+      case ',': break;
-+      case '(': break;
-+      case ')': break;
-+      case '<': USE_BITS (OP_MASK_SHAMTW,	OP_SH_SHAMTW);	break;
-+      case '>':	USE_BITS (OP_MASK_SHAMT,	OP_SH_SHAMT);	break;
-+      case 'A': break;
-+      case 'D':	USE_BITS (OP_MASK_RD,		OP_SH_RD);	break;
-+      case 'Z':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	break;
-+      case 'E':	USE_BITS (OP_MASK_CSR,		OP_SH_CSR);	break;
-+      case 'I': break;
-+      case 'R':	USE_BITS (OP_MASK_RS3,		OP_SH_RS3);	break;
-+      case 'S':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	break;
-+      case 'U':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	/* fallthru */
-+      case 'T':	USE_BITS (OP_MASK_RS2,		OP_SH_RS2);	break;
-+      case 'd':	USE_BITS (OP_MASK_RD,		OP_SH_RD);	break;
-+      case 'm':	USE_BITS (OP_MASK_RM,		OP_SH_RM);	break;
-+      case 's':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	break;
-+      case 't':	USE_BITS (OP_MASK_RS2,		OP_SH_RS2);	break;
-+      case 'P':	USE_BITS (OP_MASK_PRED,		OP_SH_PRED); break;
-+      case 'Q':	USE_BITS (OP_MASK_SUCC,		OP_SH_SUCC); break;
-+      case 'o':
-+      case 'j': used_bits |= ENCODE_ITYPE_IMM(-1U); break;
-+      case 'a':	used_bits |= ENCODE_UJTYPE_IMM(-1U); break;
-+      case 'p':	used_bits |= ENCODE_SBTYPE_IMM(-1U); break;
-+      case 'q':	used_bits |= ENCODE_STYPE_IMM(-1U); break;
-+      case 'u':	used_bits |= ENCODE_UTYPE_IMM(-1U); break;
-+      case '[': break;
-+      case ']': break;
-+      case '0': break;
-+      default:
-+	as_bad (_("internal: bad RISC-V opcode (unknown operand type `%c'): %s %s"),
-+		c, opc->name, opc->args);
-+	return 0;
-+      }
-+#undef USE_BITS
-+  if (used_bits != required_bits)
-+    {
-+      as_bad (_("internal: bad RISC-V opcode (bits 0x%lx undefined): %s %s"),
-+	      ~(long)(used_bits & required_bits), opc->name, opc->args);
-+      return 0;
-+    }
-+  return 1;
-+}
-+
-+struct percent_op_match
-+{
-+  const char *str;
-+  bfd_reloc_code_real_type reloc;
-+};
-+
-+/* This function is called once, at assembler startup time.  It should set up
-+   all the tables, etc. that the MD part of the assembler will need.  */
-+
-+void
-+md_begin (void)
-+{
-+  const char *retval = NULL;
-+  int i = 0;
-+
-+  if (! bfd_set_arch_mach (stdoutput, bfd_arch_riscv, 0))
-+    as_warn (_("Could not set architecture and machine"));
-+
-+  op_hash = hash_new ();
-+
-+  for (i = 0; i < NUMOPCODES;)
-+    {
-+      const char *name = riscv_opcodes[i].name;
-+
-+      retval = hash_insert (op_hash, name, (void *) &riscv_opcodes[i]);
-+
-+      if (retval != NULL)
-+	{
-+	  fprintf (stderr, _("internal error: can't hash `%s': %s\n"),
-+		   riscv_opcodes[i].name, retval);
-+	  /* Probably a memory allocation problem?  Give up now.  */
-+	  as_fatal (_("Broken assembler.  No assembly attempted."));
-+	}
-+      do
-+	{
-+	  if (riscv_opcodes[i].pinfo != INSN_MACRO)
-+	    {
-+	      if (!validate_riscv_insn (&riscv_opcodes[i]))
-+		as_fatal (_("Broken assembler.  No assembly attempted."));
-+	    }
-+	  ++i;
-+	}
-+      while ((i < NUMOPCODES) && !strcmp (riscv_opcodes[i].name, name));
-+    }
-+
-+  reg_names_hash = hash_new ();
-+  hash_reg_names (RCLASS_GPR, riscv_gpr_names_numeric, NGPR);
-+  hash_reg_names (RCLASS_GPR, riscv_gpr_names_abi, NGPR);
-+  hash_reg_names (RCLASS_FPR, riscv_fpr_names_numeric, NFPR);
-+  hash_reg_names (RCLASS_FPR, riscv_fpr_names_abi, NFPR);
-+
-+#define DECLARE_CSR(name, num) hash_reg_name (RCLASS_CSR, #name, num);
-+#include "opcode/riscv-opc.h"
-+#undef DECLARE_CSR
-+
-+  /* Set the default alignment for the text section.  */
-+  record_alignment (text_section, riscv_opts.rvc ? 1 : 2);
-+}
-+
-+/* Output an instruction.  IP is the instruction information.
-+   ADDRESS_EXPR is an operand of the instruction to be used with
-+   RELOC_TYPE.  */
-+
-+static void
-+append_insn (struct riscv_cl_insn *ip, expressionS *address_expr,
-+	     bfd_reloc_code_real_type reloc_type)
-+{
-+#ifdef OBJ_ELF
-+  dwarf2_emit_insn (0);
-+#endif
-+
-+  if (reloc_type != BFD_RELOC_UNUSED)
-+    {
-+      reloc_howto_type *howto;
-+
-+      gas_assert(address_expr);
-+      if (reloc_type == BFD_RELOC_12_PCREL
-+	  || reloc_type == BFD_RELOC_RISCV_JMP)
-+	{
-+	  int j = reloc_type == BFD_RELOC_RISCV_JMP;
-+	  int best_case = riscv_insn_length (ip->insn_opcode);
-+	  int worst_case = relaxed_branch_length (NULL, NULL, 0);
-+	  add_relaxed_insn (ip, worst_case, best_case,
-+			    RELAX_BRANCH_ENCODE (j, best_case == 2, worst_case),
-+			    address_expr->X_add_symbol,
-+			    address_expr->X_add_number);
-+	  return;
-+	}
-+      else if (address_expr->X_op == O_constant)
-+	{
-+	  switch (reloc_type)
-+	    {
-+	    case BFD_RELOC_32:
-+	      ip->insn_opcode |= address_expr->X_add_number;
-+	      goto append;
-+
-+	    case BFD_RELOC_RISCV_HI20:
-+	      {
-+		insn_t imm = RISCV_CONST_HIGH_PART (address_expr->X_add_number);
-+		ip->insn_opcode |= ENCODE_UTYPE_IMM (imm);
-+		goto append;
-+	      }
-+
-+	    case BFD_RELOC_RISCV_LO12_S:
-+	      ip->insn_opcode |= ENCODE_STYPE_IMM (address_expr->X_add_number);
-+	      goto append;
-+
-+	    case BFD_RELOC_RISCV_LO12_I:
-+	      ip->insn_opcode |= ENCODE_ITYPE_IMM (address_expr->X_add_number);
-+	      goto append;
-+
-+	    default:
-+	      break;
-+	    }
-+	}
-+
-+	howto = bfd_reloc_type_lookup (stdoutput, reloc_type);
-+	if (howto == NULL)
-+	  as_bad (_("Unsupported RISC-V relocation number %d"), reloc_type);
-+
-+	ip->fixp = fix_new_exp (ip->frag, ip->where,
-+				bfd_get_reloc_size (howto),
-+				address_expr, FALSE, reloc_type);
-+    }
-+
-+append:
-+  add_fixed_insn (ip);
-+  install_insn (ip);
-+}
-+
-+/* Build an instruction created by a macro expansion.  This is passed
-+   a pointer to the count of instructions created so far, an
-+   expression, the name of the instruction to build, an operand format
-+   string, and corresponding arguments.  */
-+
-+static void
-+macro_build (expressionS *ep, const char *name, const char *fmt, ...)
-+{
-+  const struct riscv_opcode *mo;
-+  struct riscv_cl_insn insn;
-+  bfd_reloc_code_real_type r;
-+  va_list args;
-+
-+  va_start (args, fmt);
-+
-+  r = BFD_RELOC_UNUSED;
-+  mo = (struct riscv_opcode *) hash_find (op_hash, name);
-+  gas_assert (mo);
-+
-+  /* Find a non-RVC variant of the instruction.  */
-+  while (riscv_insn_length (mo->match) < 4)
-+    mo++;
-+  gas_assert (strcmp (name, mo->name) == 0);
-+
-+  create_insn (&insn, mo);
-+  for (;;)
-+    {
-+      switch (*fmt++)
-+	{
-+	case 'd':
-+	  INSERT_OPERAND (RD, insn, va_arg (args, int));
-+	  continue;
-+
-+	case 's':
-+	  INSERT_OPERAND (RS1, insn, va_arg (args, int));
-+	  continue;
-+
-+	case 't':
-+	  INSERT_OPERAND (RS2, insn, va_arg (args, int));
-+	  continue;
-+
-+	case '>':
-+	  INSERT_OPERAND (SHAMT, insn, va_arg (args, int));
-+	  continue;
-+
-+	case 'j':
-+	case 'u':
-+	case 'q':
-+	  gas_assert (ep != NULL);
-+	  r = va_arg (args, int);
-+	  continue;
-+
-+	case '\0':
-+	  break;
-+	case ',':
-+	  continue;
-+	default:
-+	  as_fatal (_("internal error: invalid macro"));
-+	}
-+      break;
-+    }
-+  va_end (args);
-+  gas_assert (r == BFD_RELOC_UNUSED ? ep == NULL : ep != NULL);
-+
-+  append_insn (&insn, ep, r);
-+}
-+
-+/* Sign-extend 32-bit mode constants that have bit 31 set and all higher bits
-+   unset.  */
-+static void
-+normalize_constant_expr (expressionS *ex)
-+{
-+  if (xlen > 32)
-+    return;
-+  if ((ex->X_op == O_constant || ex->X_op == O_symbol)
-+      && IS_ZEXT_32BIT_NUM (ex->X_add_number))
-+    ex->X_add_number = (((ex->X_add_number & 0xffffffff) ^ 0x80000000)
-+			- 0x80000000);
-+}
-+
-+/* Warn if an expression is not a constant.  */
-+
-+static void
-+check_absolute_expr (struct riscv_cl_insn *ip, expressionS *ex)
-+{
-+  if (ex->X_op == O_big)
-+    as_bad (_("unsupported large constant"));
-+  else if (ex->X_op != O_constant)
-+    as_bad (_("Instruction %s requires absolute expression"),
-+	    ip->insn_mo->name);
-+  normalize_constant_expr (ex);
-+}
-+
-+static symbolS *
-+make_internal_label (void)
-+{
-+  return (symbolS *) local_symbol_make (FAKE_LABEL_NAME, now_seg,
-+					(valueT) frag_now_fix(), frag_now);
-+}
-+
-+/* Load an entry from the GOT.  */
-+static void
-+pcrel_access (int destreg, int tempreg, expressionS *ep,
-+	      const char *lo_insn, const char *lo_pattern,
-+	      bfd_reloc_code_real_type hi_reloc,
-+	      bfd_reloc_code_real_type lo_reloc)
-+{
-+  expressionS ep2;
-+  ep2.X_op = O_symbol;
-+  ep2.X_add_symbol = make_internal_label ();
-+  ep2.X_add_number = 0;
-+
-+  macro_build (ep, "auipc", "d,u", tempreg, hi_reloc);
-+  macro_build (&ep2, lo_insn, lo_pattern, destreg, tempreg, lo_reloc);
-+}
-+
-+static void
-+pcrel_load (int destreg, int tempreg, expressionS *ep, const char *lo_insn,
-+	    bfd_reloc_code_real_type hi_reloc,
-+	    bfd_reloc_code_real_type lo_reloc)
-+{
-+  pcrel_access (destreg, tempreg, ep, lo_insn, "d,s,j", hi_reloc, lo_reloc);
-+}
-+
-+static void
-+pcrel_store (int srcreg, int tempreg, expressionS *ep, const char *lo_insn,
-+	     bfd_reloc_code_real_type hi_reloc,
-+	     bfd_reloc_code_real_type lo_reloc)
-+{
-+  pcrel_access (srcreg, tempreg, ep, lo_insn, "t,s,q", hi_reloc, lo_reloc);
-+}
-+
-+/* PC-relative function call using AUIPC/JALR, relaxed to JAL.  */
-+static void
-+riscv_call (int destreg, int tempreg, expressionS *ep,
-+	    bfd_reloc_code_real_type reloc)
-+{
-+  macro_build (ep, "auipc", "d,u", tempreg, reloc);
-+  macro_build (NULL, "jalr", "d,s", destreg, tempreg);
-+}
-+
-+/* Load an integer constant into a register.  */
-+
-+static void
-+load_const (int reg, expressionS *ep)
-+{
-+  int shift = RISCV_IMM_BITS;
-+  expressionS upper = *ep, lower = *ep;
-+  lower.X_add_number = (int32_t) ep->X_add_number << (32-shift) >> (32-shift);
-+  upper.X_add_number -= lower.X_add_number;
-+
-+  if (ep->X_op != O_constant)
-+    {
-+      as_bad (_("unsupported large constant"));
-+      return;
-+    }
-+
-+  if (xlen > 32 && !IS_SEXT_32BIT_NUM(ep->X_add_number))
-+    {
-+      /* Reduce to a signed 32-bit constant using SLLI and ADDI, which
-+	 is not optimal but also not so bad.  */
-+      while (((upper.X_add_number >> shift) & 1) == 0)
-+	shift++;
-+
-+      upper.X_add_number = (int64_t) upper.X_add_number >> shift;
-+      load_const(reg, &upper);
-+
-+      macro_build (NULL, "slli", "d,s,>", reg, reg, shift);
-+      if (lower.X_add_number != 0)
-+	macro_build (&lower, "addi", "d,s,j", reg, reg, BFD_RELOC_RISCV_LO12_I);
-+    }
-+  else
-+    {
-+      int hi_reg = 0;
-+
-+      if (upper.X_add_number != 0)
-+	{
-+	  macro_build (ep, "lui", "d,u", reg, BFD_RELOC_RISCV_HI20);
-+	  hi_reg = reg;
-+	}
-+
-+      if (lower.X_add_number != 0 || hi_reg == 0)
-+	macro_build (ep, ADD32_INSN, "d,s,j", reg, hi_reg,
-+		     BFD_RELOC_RISCV_LO12_I);
-+    }
-+}
-+
-+/* Expand RISC-V assembly macros into one or more instructions.  */
-+static void
-+macro (struct riscv_cl_insn *ip, expressionS *imm_expr,
-+       bfd_reloc_code_real_type *imm_reloc)
-+{
-+  int rd = (ip->insn_opcode >> OP_SH_RD) & OP_MASK_RD;
-+  int rs1 = (ip->insn_opcode >> OP_SH_RS1) & OP_MASK_RS1;
-+  int rs2 = (ip->insn_opcode >> OP_SH_RS2) & OP_MASK_RS2;
-+  int mask = ip->insn_mo->mask;
-+
-+  switch (mask)
-+    {
-+    case M_LI:
-+      load_const (rd, imm_expr);
-+      break;
-+
-+    case M_LA:
-+    case M_LLA:
-+      /* Load the address of a symbol into a register.  */
-+      if (!IS_SEXT_32BIT_NUM (imm_expr->X_add_number))
-+	as_bad(_("offset too large"));
-+
-+      if (imm_expr->X_op == O_constant)
-+	load_const (rd, imm_expr);
-+      else if (riscv_opts.pic && mask == M_LA) /* Global PIC symbol */
-+	pcrel_load (rd, rd, imm_expr, LOAD_ADDRESS_INSN,
-+		    BFD_RELOC_RISCV_GOT_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      else /* Local PIC symbol, or any non-PIC symbol */
-+	pcrel_load (rd, rd, imm_expr, "addi",
-+		    BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LA_TLS_GD:
-+      pcrel_load (rd, rd, imm_expr, "addi",
-+		  BFD_RELOC_RISCV_TLS_GD_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LA_TLS_IE:
-+      pcrel_load (rd, rd, imm_expr, LOAD_ADDRESS_INSN,
-+		  BFD_RELOC_RISCV_TLS_GOT_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LB:
-+      pcrel_load (rd, rd, imm_expr, "lb",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LBU:
-+      pcrel_load (rd, rd, imm_expr, "lbu",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LH:
-+      pcrel_load (rd, rd, imm_expr, "lh",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LHU:
-+      pcrel_load (rd, rd, imm_expr, "lhu",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LW:
-+      pcrel_load (rd, rd, imm_expr, "lw",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LWU:
-+      pcrel_load (rd, rd, imm_expr, "lwu",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_LD:
-+      pcrel_load (rd, rd, imm_expr, "ld",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_FLW:
-+      pcrel_load (rd, rs1, imm_expr, "flw",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_FLD:
-+      pcrel_load (rd, rs1, imm_expr, "fld",
-+		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+      break;
-+
-+    case M_SB:
-+      pcrel_store (rs2, rs1, imm_expr, "sb",
-+		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+      break;
-+
-+    case M_SH:
-+      pcrel_store (rs2, rs1, imm_expr, "sh",
-+		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+      break;
-+
-+    case M_SW:
-+      pcrel_store (rs2, rs1, imm_expr, "sw",
-+		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+      break;
-+
-+    case M_SD:
-+      pcrel_store (rs2, rs1, imm_expr, "sd",
-+		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+      break;
-+
-+    case M_FSW:
-+      pcrel_store (rs2, rs1, imm_expr, "fsw",
-+		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+      break;
-+
-+    case M_FSD:
-+      pcrel_store (rs2, rs1, imm_expr, "fsd",
-+		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+      break;
-+
-+    case M_CALL:
-+      riscv_call (rd, rs1, imm_expr, *imm_reloc);
-+      break;
-+
-+    default:
-+      as_bad (_("Macro %s not implemented"), ip->insn_mo->name);
-+      break;
-+    }
-+}
-+
-+static const struct percent_op_match percent_op_utype[] =
-+{
-+  {"%tprel_hi", BFD_RELOC_RISCV_TPREL_HI20},
-+  {"%pcrel_hi", BFD_RELOC_RISCV_PCREL_HI20},
-+  {"%tls_ie_pcrel_hi", BFD_RELOC_RISCV_TLS_GOT_HI20},
-+  {"%tls_gd_pcrel_hi", BFD_RELOC_RISCV_TLS_GD_HI20},
-+  {"%hi", BFD_RELOC_RISCV_HI20},
-+  {0, 0}
-+};
-+
-+static const struct percent_op_match percent_op_itype[] =
-+{
-+  {"%lo", BFD_RELOC_RISCV_LO12_I},
-+  {"%tprel_lo", BFD_RELOC_RISCV_TPREL_LO12_I},
-+  {"%pcrel_lo", BFD_RELOC_RISCV_PCREL_LO12_I},
-+  {0, 0}
-+};
-+
-+static const struct percent_op_match percent_op_stype[] =
-+{
-+  {"%lo", BFD_RELOC_RISCV_LO12_S},
-+  {"%tprel_lo", BFD_RELOC_RISCV_TPREL_LO12_S},
-+  {"%pcrel_lo", BFD_RELOC_RISCV_PCREL_LO12_S},
-+  {0, 0}
-+};
-+
-+static const struct percent_op_match percent_op_rtype[] =
-+{
-+  {"%tprel_add", BFD_RELOC_RISCV_TPREL_ADD},
-+  {0, 0}
-+};
-+
-+/* Return true if *STR points to a relocation operator.  When returning true,
-+   move *STR over the operator and store its relocation code in *RELOC.
-+   Leave both *STR and *RELOC alone when returning false.  */
-+
-+static bfd_boolean
-+parse_relocation (char **str, bfd_reloc_code_real_type *reloc,
-+		  const struct percent_op_match *percent_op)
-+{
-+  for ( ; percent_op->str; percent_op++)
-+    if (strncasecmp (*str, percent_op->str, strlen (percent_op->str)) == 0)
-+      {
-+	int len = strlen (percent_op->str);
-+
-+	if (!ISSPACE ((*str)[len]) && (*str)[len] != '(')
-+	  continue;
-+
-+	*str += strlen (percent_op->str);
-+	*reloc = percent_op->reloc;
-+
-+	/* Check whether the output BFD supports this relocation.
-+	   If not, issue an error and fall back on something safe.  */
-+	if (!bfd_reloc_type_lookup (stdoutput, percent_op->reloc))
-+	  {
-+	    as_bad ("relocation %s isn't supported by the current ABI",
-+		    percent_op->str);
-+	    *reloc = BFD_RELOC_UNUSED;
-+	  }
-+	return TRUE;
-+      }
-+  return FALSE;
-+}
-+
-+static void
-+my_getExpression (expressionS *ep, char *str)
-+{
-+  char *save_in;
-+
-+  save_in = input_line_pointer;
-+  input_line_pointer = str;
-+  expression (ep);
-+  expr_end = input_line_pointer;
-+  input_line_pointer = save_in;
-+}
-+
-+/* Parse string STR as a 16-bit relocatable operand.  Store the
-+   expression in *EP and the relocation, if any, in RELOC.
-+   Return the number of relocation operators used (0 or 1).
-+
-+   On exit, EXPR_END points to the first character after the expression.  */
-+
-+static size_t
-+my_getSmallExpression (expressionS *ep, bfd_reloc_code_real_type *reloc,
-+		       char *str, const struct percent_op_match *percent_op)
-+{
-+  size_t reloc_index;
-+  unsigned crux_depth, str_depth, regno;
-+  char *crux;
-+
-+  /* First, check for integer registers.  */
-+  if (reg_lookup (&str, RCLASS_GPR, &regno))
-+    {
-+      ep->X_op = O_register;
-+      ep->X_add_number = regno;
-+      return 0;
-+    }
-+
-+  /* Search for the start of the main expression.
-+     End the loop with CRUX pointing to the start
-+     of the main expression and with CRUX_DEPTH containing the number
-+     of open brackets at that point.  */
-+  reloc_index = -1;
-+  str_depth = 0;
-+  do
-+    {
-+      reloc_index++;
-+      crux = str;
-+      crux_depth = str_depth;
-+
-+      /* Skip over whitespace and brackets, keeping count of the number
-+	 of brackets.  */
-+      while (*str == ' ' || *str == '\t' || *str == '(')
-+	if (*str++ == '(')
-+	  str_depth++;
-+    }
-+  while (*str == '%'
-+	 && reloc_index < 1
-+	 && parse_relocation (&str, reloc, percent_op));
-+
-+  my_getExpression (ep, crux);
-+  str = expr_end;
-+
-+  /* Match every open bracket.  */
-+  while (crux_depth > 0 && (*str == ')' || *str == ' ' || *str == '\t'))
-+    if (*str++ == ')')
-+      crux_depth--;
-+
-+  if (crux_depth > 0)
-+    as_bad ("unclosed '('");
-+
-+  expr_end = str;
-+
-+  return reloc_index;
-+}
-+
-+/* This routine assembles an instruction into its binary format.  As a
-+   side effect, it sets the global variable imm_reloc to the type of
-+   relocation to do if one of the operands is an address expression.  */
-+
-+static const char *
-+riscv_ip (char *str, struct riscv_cl_insn *ip, expressionS *imm_expr,
-+	  bfd_reloc_code_real_type *imm_reloc)
-+{
-+  char *s;
-+  const char *args;
-+  char c = 0;
-+  struct riscv_opcode *insn, *end = &riscv_opcodes[NUMOPCODES];
-+  char *argsStart;
-+  unsigned int regno;
-+  char save_c = 0;
-+  int argnum;
-+  const struct percent_op_match *p;
-+  const char *error = "unrecognized opcode";
-+
-+  /* Parse the name of the instruction.  Terminate the string if whitespace
-+     is found so that hash_find only sees the name part of the string.  */
-+  for (s = str; *s != '\0'; ++s)
-+    if (ISSPACE (*s))
-+      {
-+	save_c = *s;
-+	*s++ = '\0';
-+	break;
-+      }
-+
-+  insn = (struct riscv_opcode *) hash_find (op_hash, str);
-+
-+  argsStart = s;
-+  for ( ; insn && insn < end && strcmp (insn->name, str) == 0; insn++)
-+    {
-+      if (!riscv_subset_supports (insn->subset))
-+	continue;
-+
-+      create_insn (ip, insn);
-+      argnum = 1;
-+
-+      imm_expr->X_op = O_absent;
-+      *imm_reloc = BFD_RELOC_UNUSED;
-+      p = percent_op_itype;
-+
-+      for (args = insn->args;; ++args)
-+	{
-+	  s += strspn (s, " \t");
-+	  switch (*args)
-+	    {
-+	    case '\0':		/* end of args */
-+	      if (insn->pinfo != INSN_MACRO)
-+		{
-+		  if (!insn->match_func (insn, ip->insn_opcode))
-+		    break;
-+		  if (riscv_insn_length (insn->match) == 2 && !riscv_opts.rvc)
-+		    break;
-+		}
-+	      if (*s != '\0')
-+		break;
-+	      /* Successful assembly.  */
-+	      error = NULL;
-+	      goto out;
-+	    /* Xcustom */
-+	    case '^':
-+	      {
-+		unsigned long max = OP_MASK_RD;
-+		my_getExpression (imm_expr, s);
-+		check_absolute_expr (ip, imm_expr);
-+		switch (*++args)
-+		  {
-+		  case 'j':
-+		    max = OP_MASK_CUSTOM_IMM;
-+		    INSERT_OPERAND (CUSTOM_IMM, *ip, imm_expr->X_add_number);
-+		    break;
-+		  case 'd':
-+		    INSERT_OPERAND (RD, *ip, imm_expr->X_add_number);
-+		    break;
-+		  case 's':
-+		    INSERT_OPERAND (RS1, *ip, imm_expr->X_add_number);
-+		    break;
-+		  case 't':
-+		    INSERT_OPERAND (RS2, *ip, imm_expr->X_add_number);
-+		    break;
-+		  }
-+		imm_expr->X_op = O_absent;
-+		s = expr_end;
-+		if ((unsigned long) imm_expr->X_add_number > max)
-+		  as_warn ("Bad custom immediate (%lu), must be at most %lu",
-+			   (unsigned long)imm_expr->X_add_number, max);
-+		continue;
-+	      }
-+
-+	    case 'C': /* RVC */
-+	      switch (*++args)
-+		{
-+		case 's': /* RS1 x8-x15 */
-+		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
-+		      || !(regno >= 8 && regno <= 15))
-+		    break;
-+		  INSERT_OPERAND (CRS1S, *ip, regno % 8);
-+		  continue;
-+		case 'w': /* RS1 x8-x15, constrained to equal RD x8-x15 */
-+		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
-+		      || EXTRACT_OPERAND (CRS1S, ip->insn_opcode) + 8 != regno)
-+		    break;
-+		  continue;
-+		case 't': /* RS2 x8-x15 */
-+		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
-+		      || !(regno >= 8 && regno <= 15))
-+		    break;
-+		  INSERT_OPERAND (CRS2S, *ip, regno % 8);
-+		  continue;
-+		case 'x': /* RS2 x8-x15, constrained to equal RD x8-x15 */
-+		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
-+		      || EXTRACT_OPERAND (CRS2S, ip->insn_opcode) + 8 != regno)
-+		    break;
-+		  continue;
-+		case 'U': /* RS1, constrained to equal RD */
-+		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
-+		      || EXTRACT_OPERAND (RD, ip->insn_opcode) != regno)
-+		    break;
-+		  continue;
-+		case 'V': /* RS2 */
-+		  if (!reg_lookup (&s, RCLASS_GPR, &regno))
-+		    break;
-+		  INSERT_OPERAND (CRS2, *ip, regno);
-+		  continue;
-+		case 'c': /* RS1, constrained to equal sp */
-+		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
-+		      || regno != X_SP)
-+		    break;
-+		  continue;
-+		case '>':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || imm_expr->X_add_number <= 0
-+		      || imm_expr->X_add_number >= 64)
-+		    break;
-+		  ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
-+rvc_imm_done:
-+		  s = expr_end;
-+		  imm_expr->X_op = O_absent;
-+		  continue;
-+		case '<':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_IMM (imm_expr->X_add_number)
-+		      || imm_expr->X_add_number <= 0
-+		      || imm_expr->X_add_number >= 32)
-+		    break;
-+		  ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'i':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || imm_expr->X_add_number == 0
-+		      || !VALID_RVC_SIMM3 (imm_expr->X_add_number))
-+		    break;
-+		  ip->insn_opcode |= ENCODE_RVC_SIMM3 (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'j':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || imm_expr->X_add_number == 0
-+		      || !VALID_RVC_IMM (imm_expr->X_add_number))
-+		    break;
-+		  ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'k':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_LW_IMM (imm_expr->X_add_number))
-+		    break;
-+		  ip->insn_opcode |= ENCODE_RVC_LW_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'l':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_LD_IMM (imm_expr->X_add_number))
-+		    break;
-+		  ip->insn_opcode |= ENCODE_RVC_LD_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'm':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_LWSP_IMM (imm_expr->X_add_number))
-+		    break;
-+		  ip->insn_opcode |=
-+		    ENCODE_RVC_LWSP_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'n':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_LDSP_IMM (imm_expr->X_add_number))
-+		    break;
-+		  ip->insn_opcode |=
-+		    ENCODE_RVC_LDSP_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'K':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_ADDI4SPN_IMM (imm_expr->X_add_number)
-+		      || imm_expr->X_add_number == 0)
-+		    break;
-+		  ip->insn_opcode |=
-+		    ENCODE_RVC_ADDI4SPN_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'L':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_ADDI16SP_IMM (imm_expr->X_add_number)
-+		      || imm_expr->X_add_number == 0)
-+		    break;
-+		  ip->insn_opcode |=
-+		    ENCODE_RVC_ADDI16SP_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'M':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_SWSP_IMM (imm_expr->X_add_number))
-+		    break;
-+		  ip->insn_opcode |=
-+		    ENCODE_RVC_SWSP_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'N':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || imm_expr->X_op != O_constant
-+		      || !VALID_RVC_SDSP_IMM (imm_expr->X_add_number))
-+		    break;
-+		  ip->insn_opcode |=
-+		    ENCODE_RVC_SDSP_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'u':
-+		  p = percent_op_utype;
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p))
-+		    break;
-+rvc_lui:
-+		  if (imm_expr->X_op != O_constant
-+		      || imm_expr->X_add_number <= 0
-+		      || imm_expr->X_add_number >= RISCV_BIGIMM_REACH
-+		      || (imm_expr->X_add_number >= RISCV_RVC_IMM_REACH / 2
-+			  && imm_expr->X_add_number <
-+			      RISCV_BIGIMM_REACH - RISCV_RVC_IMM_REACH / 2))
-+		    break;
-+		  ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
-+		  goto rvc_imm_done;
-+		case 'v':
-+		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		      || (imm_expr->X_add_number & (RISCV_IMM_REACH - 1))
-+		      || (int32_t)imm_expr->X_add_number
-+			  != imm_expr->X_add_number)
-+		    break;
-+		  imm_expr->X_add_number =
-+		    ((uint32_t) imm_expr->X_add_number) >> RISCV_IMM_BITS;
-+		  goto rvc_lui;
-+		case 'p':
-+		  goto branch;
-+		case 'a':
-+		  goto jump;
-+		case 'D': /* floating-point RS2 x8-x15 */
-+		  if (!reg_lookup (&s, RCLASS_FPR, &regno)
-+		      || !(regno >= 8 && regno <= 15))
-+		    break;
-+		  INSERT_OPERAND (CRS2S, *ip, regno % 8);
-+		  continue;
-+		case 'T': /* floating-point RS2 */
-+		  if (!reg_lookup (&s, RCLASS_FPR, &regno))
-+		    break;
-+		  INSERT_OPERAND (CRS2, *ip, regno);
-+		  continue;
-+		default:
-+		  as_bad (_("bad RVC field specifier 'C%c'\n"), *args);
-+		}
-+	      break;
-+
-+	    case ',':
-+	      ++argnum;
-+	      if (*s++ == *args)
-+		continue;
-+	      s--;
-+	      break;
-+
-+	    case '(':
-+	    case ')':
-+	    case '[':
-+	    case ']':
-+	      if (*s++ == *args)
-+		continue;
-+	      break;
-+
-+	    case '<':		/* shift amount, 0 - 31 */
-+	      my_getExpression (imm_expr, s);
-+	      check_absolute_expr (ip, imm_expr);
-+	      if ((unsigned long) imm_expr->X_add_number > 31)
-+		as_warn (_("Improper shift amount (%lu)"),
-+			 (unsigned long) imm_expr->X_add_number);
-+	      INSERT_OPERAND (SHAMTW, *ip, imm_expr->X_add_number);
-+	      imm_expr->X_op = O_absent;
-+	      s = expr_end;
-+	      continue;
-+
-+	    case '>':		/* shift amount, 0 - (XLEN-1) */
-+	      my_getExpression (imm_expr, s);
-+	      check_absolute_expr (ip, imm_expr);
-+	      if ((unsigned long) imm_expr->X_add_number >= xlen)
-+		as_warn (_("Improper shift amount (%lu)"),
-+			 (unsigned long) imm_expr->X_add_number);
-+	      INSERT_OPERAND (SHAMT, *ip, imm_expr->X_add_number);
-+	      imm_expr->X_op = O_absent;
-+	      s = expr_end;
-+	      continue;
-+
-+	    case 'Z':		/* CSRRxI immediate */
-+	      my_getExpression (imm_expr, s);
-+	      check_absolute_expr (ip, imm_expr);
-+	      if ((unsigned long) imm_expr->X_add_number > 31)
-+		as_warn (_("Improper CSRxI immediate (%lu)"),
-+			 (unsigned long) imm_expr->X_add_number);
-+	      INSERT_OPERAND (RS1, *ip, imm_expr->X_add_number);
-+	      imm_expr->X_op = O_absent;
-+	      s = expr_end;
-+	      continue;
-+
-+	    case 'E':		/* Control register.  */
-+	      if (reg_lookup (&s, RCLASS_CSR, &regno))
-+		INSERT_OPERAND (CSR, *ip, regno);
-+	      else
-+		{
-+		  my_getExpression (imm_expr, s);
-+		  check_absolute_expr (ip, imm_expr);
-+		  if ((unsigned long) imm_expr->X_add_number > 0xfff)
-+		    as_warn(_("Improper CSR address (%lu)"),
-+			    (unsigned long) imm_expr->X_add_number);
-+		  INSERT_OPERAND (CSR, *ip, imm_expr->X_add_number);
-+		  imm_expr->X_op = O_absent;
-+		  s = expr_end;
-+		}
-+	      continue;
-+
-+	    case 'm':		/* rounding mode */
-+	      if (arg_lookup (&s, riscv_rm, ARRAY_SIZE (riscv_rm), &regno))
-+		{
-+		  INSERT_OPERAND (RM, *ip, regno);
-+		  continue;
-+		}
-+	      break;
-+
-+	    case 'P':
-+	    case 'Q':		/* fence predecessor/successor */
-+	      if (arg_lookup (&s, riscv_pred_succ, ARRAY_SIZE (riscv_pred_succ),
-+			      &regno))
-+		{
-+		  if (*args == 'P')
-+		    INSERT_OPERAND (PRED, *ip, regno);
-+		  else
-+		    INSERT_OPERAND (SUCC, *ip, regno);
-+		  continue;
-+		}
-+	      break;
-+
-+	    case 'd':		/* destination register */
-+	    case 's':		/* source register */
-+	    case 't':		/* target register */
-+	      if (reg_lookup (&s, RCLASS_GPR, &regno))
-+		{
-+		  c = *args;
-+		  if (*s == ' ')
-+		    ++s;
-+
-+		  /* Now that we have assembled one operand, we use the args
-+		     string to figure out where it goes in the instruction.  */
-+		  switch (c)
-+		    {
-+		    case 's':
-+		      INSERT_OPERAND (RS1, *ip, regno);
-+		      break;
-+		    case 'd':
-+		      INSERT_OPERAND (RD, *ip, regno);
-+		      break;
-+		    case 't':
-+		      INSERT_OPERAND (RS2, *ip, regno);
-+		      break;
-+		    }
-+		  continue;
-+		}
-+	      break;
-+
-+	    case 'D':		/* floating point rd */
-+	    case 'S':		/* floating point rs1 */
-+	    case 'T':		/* floating point rs2 */
-+	    case 'U':		/* floating point rs1 and rs2 */
-+	    case 'R':		/* floating point rs3 */
-+	      if (reg_lookup (&s, RCLASS_FPR, &regno))
-+		{
-+		  c = *args;
-+		  if (*s == ' ')
-+		    ++s;
-+		  switch (c)
-+		    {
-+		    case 'D':
-+		      INSERT_OPERAND (RD, *ip, regno);
-+		      break;
-+		    case 'S':
-+		      INSERT_OPERAND (RS1, *ip, regno);
-+		      break;
-+		    case 'U':
-+		      INSERT_OPERAND (RS1, *ip, regno);
-+		      /* fallthru */
-+		    case 'T':
-+		      INSERT_OPERAND (RS2, *ip, regno);
-+		      break;
-+		    case 'R':
-+		      INSERT_OPERAND (RS3, *ip, regno);
-+		      break;
-+		    }
-+		  continue;
-+		}
-+
-+	      break;
-+
-+	    case 'I':
-+	      my_getExpression (imm_expr, s);
-+	      if (imm_expr->X_op != O_big
-+		  && imm_expr->X_op != O_constant)
-+		break;
-+	      normalize_constant_expr (imm_expr);
-+	      s = expr_end;
-+	      continue;
-+
-+	    case 'A':
-+	      my_getExpression (imm_expr, s);
-+	      normalize_constant_expr (imm_expr);
-+	      /* The 'A' format specifier must be a symbol. */
-+	      if (imm_expr->X_op != O_symbol)
-+	        break;
-+	      *imm_reloc = BFD_RELOC_32;
-+	      s = expr_end;
-+	      continue;
-+
-+	    case 'j': /* sign-extended immediate */
-+	      *imm_reloc = BFD_RELOC_RISCV_LO12_I;
-+	      p = percent_op_itype;
-+	      goto alu_op;
-+	    case 'q': /* store displacement */
-+	      p = percent_op_stype;
-+	      *imm_reloc = BFD_RELOC_RISCV_LO12_S;
-+	      goto load_store;
-+	    case 'o': /* load displacement */
-+	      p = percent_op_itype;
-+	      *imm_reloc = BFD_RELOC_RISCV_LO12_I;
-+	      goto load_store;
-+	    case '0': /* AMO "displacement," which must be zero */
-+	      p = percent_op_rtype;
-+	      *imm_reloc = BFD_RELOC_UNUSED;
-+load_store:
-+	      /* Check whether there is only a single bracketed expression
-+		 left.  If so, it must be the base register and the
-+		 constant must be zero.  */
-+	      imm_expr->X_op = O_constant;
-+	      imm_expr->X_add_number = 0;
-+	      if (*s == '(' && strchr (s + 1, '(') == 0)
-+		continue;
-+alu_op:
-+	      /* If this value won't fit into a 16 bit offset, then go
-+		 find a macro that will generate the 32 bit offset
-+		 code pattern.  */
-+	      if (!my_getSmallExpression (imm_expr, imm_reloc, s, p))
-+		{
-+		  normalize_constant_expr (imm_expr);
-+		  if (imm_expr->X_op != O_constant
-+		      || (*args == '0' && imm_expr->X_add_number != 0)
-+		      || imm_expr->X_add_number >= (signed)RISCV_IMM_REACH/2
-+		      || imm_expr->X_add_number < -(signed)RISCV_IMM_REACH/2)
-+		    break;
-+		}
-+
-+	      s = expr_end;
-+	      continue;
-+
-+	    case 'p':		/* pc relative offset */
-+branch:
-+	      *imm_reloc = BFD_RELOC_12_PCREL;
-+	      my_getExpression (imm_expr, s);
-+	      s = expr_end;
-+	      continue;
-+
-+	    case 'u':		/* upper 20 bits */
-+	      p = percent_op_utype;
-+	      if (!my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+		  && imm_expr->X_op == O_constant)
-+		{
-+		  if (imm_expr->X_add_number < 0
-+		      || imm_expr->X_add_number >= (signed)RISCV_BIGIMM_REACH)
-+		    as_bad (_("lui expression not in range 0..1048575"));
-+
-+		  *imm_reloc = BFD_RELOC_RISCV_HI20;
-+		  imm_expr->X_add_number <<= RISCV_IMM_BITS;
-+		}
-+	      s = expr_end;
-+	      continue;
-+
-+	    case 'a':		/* 26 bit address */
-+jump:
-+	      my_getExpression (imm_expr, s);
-+	      s = expr_end;
-+	      *imm_reloc = BFD_RELOC_RISCV_JMP;
-+	      continue;
-+
-+	    case 'c':
-+	      my_getExpression (imm_expr, s);
-+	      s = expr_end;
-+	      *imm_reloc = BFD_RELOC_RISCV_CALL;
-+	      if (*s == '@')
-+		*imm_reloc = BFD_RELOC_RISCV_CALL_PLT, s++;
-+	      continue;
-+
-+	    default:
-+	      as_fatal (_("internal error: bad argument type %c"), *args);
-+	    }
-+	  break;
-+	}
-+      s = argsStart;
-+      error = _("illegal operands");
-+    }
-+
-+out:
-+  /* Restore the character we might have clobbered above.  */
-+  if (save_c)
-+    *(argsStart - 1) = save_c;
-+
-+  return error;
-+}
-+
-+void
-+md_assemble (char *str)
-+{
-+  struct riscv_cl_insn insn;
-+  expressionS imm_expr;
-+  bfd_reloc_code_real_type imm_reloc = BFD_RELOC_UNUSED;
-+
-+  const char *error = riscv_ip (str, &insn, &imm_expr, &imm_reloc);
-+
-+  if (error)
-+    {
-+      as_bad ("%s `%s'", error, str);
-+      return;
-+    }
-+
-+  if (insn.insn_mo->pinfo == INSN_MACRO)
-+    macro (&insn, &imm_expr, &imm_reloc);
-+  else
-+    append_insn (&insn, &imm_expr, imm_reloc);
-+}
-+
-+char *
-+md_atof (int type, char *litP, int *sizeP)
-+{
-+  return ieee_md_atof (type, litP, sizeP, TARGET_BYTES_BIG_ENDIAN);
-+}
-+
-+void
-+md_number_to_chars (char *buf, valueT val, int n)
-+{
-+  number_to_chars_littleendian (buf, val, n);
-+}
-+
-+const char *md_shortopts = "O::g::G:";
-+
-+enum options
-+  {
-+    OPTION_M32 = OPTION_MD_BASE,
-+    OPTION_M64,
-+    OPTION_MARCH,
-+    OPTION_PIC,
-+    OPTION_NO_PIC,
-+    OPTION_MSOFT_FLOAT,
-+    OPTION_MHARD_FLOAT,
-+    OPTION_MRVC,
-+    OPTION_MNO_RVC,
-+    OPTION_END_OF_ENUM
-+  };
-+
-+struct option md_longopts[] =
-+{
-+  {"m32", no_argument, NULL, OPTION_M32},
-+  {"m64", no_argument, NULL, OPTION_M64},
-+  {"march", required_argument, NULL, OPTION_MARCH},
-+  {"fPIC", no_argument, NULL, OPTION_PIC},
-+  {"fpic", no_argument, NULL, OPTION_PIC},
-+  {"fno-pic", no_argument, NULL, OPTION_NO_PIC},
-+  {"mrvc", no_argument, NULL, OPTION_MRVC},
-+  {"mno-rvc", no_argument, NULL, OPTION_MNO_RVC},
-+  {"msoft-float", no_argument, NULL, OPTION_MSOFT_FLOAT},
-+  {"mhard-float", no_argument, NULL, OPTION_MHARD_FLOAT},
-+
-+  {NULL, no_argument, NULL, 0}
-+};
-+size_t md_longopts_size = sizeof (md_longopts);
-+
-+enum float_mode {
-+  FLOAT_MODE_DEFAULT,
-+  FLOAT_MODE_SOFT,
-+  FLOAT_MODE_HARD
-+};
-+static enum float_mode marg_float_mode = FLOAT_MODE_DEFAULT;
-+
-+int
-+md_parse_option (int c, char *arg)
-+{
-+  switch (c)
-+    {
-+    case OPTION_MRVC:
-+      riscv_set_rvc (TRUE);
-+      break;
-+
-+    case OPTION_MNO_RVC:
-+      riscv_set_rvc (FALSE);
-+      break;
-+
-+    case OPTION_MSOFT_FLOAT:
-+      marg_float_mode = FLOAT_MODE_SOFT;
-+      break;
-+
-+    case OPTION_MHARD_FLOAT:
-+      marg_float_mode = FLOAT_MODE_HARD;
-+      break;
-+
-+    case OPTION_M32:
-+      xlen = 32;
-+      break;
-+
-+    case OPTION_M64:
-+      xlen = 64;
-+      break;
-+
-+    case OPTION_MARCH:
-+      riscv_set_arch (arg);
-+      break;
-+
-+    case OPTION_NO_PIC:
-+      riscv_opts.pic = FALSE;
-+      break;
-+
-+    case OPTION_PIC:
-+      riscv_opts.pic = TRUE;
-+      break;
-+
-+    default:
-+      return 0;
-+    }
-+
-+  return 1;
-+}
-+
-+void
-+riscv_after_parse_args (void)
-+{
-+  struct riscv_subset *subset;
-+  enum float_mode isa_float_mode, elf_float_mode;
-+
-+  if (riscv_subsets == NULL)
-+    riscv_set_arch ("RVIMAFDXcustom");
-+
-+  if (xlen == 0)
-+    {
-+      if (strcmp (default_arch, "riscv32") == 0)
-+	xlen = 32;
-+      else if (strcmp (default_arch, "riscv64") == 0)
-+	xlen = 64;
-+      else
-+	as_bad ("unknown default architecture `%s'", default_arch);
-+    }
-+
-+  isa_float_mode = FLOAT_MODE_SOFT;
-+  for (subset = riscv_subsets; subset != NULL; subset = subset->next)
-+    {
-+       if (strcasecmp(subset->name, "F") == 0)
-+         isa_float_mode = FLOAT_MODE_HARD;
-+       if (strcasecmp(subset->name, "D") == 0)
-+         isa_float_mode = FLOAT_MODE_HARD;
-+    }
-+
-+  if (marg_float_mode == FLOAT_MODE_HARD && isa_float_mode == FLOAT_MODE_SOFT)
-+    as_bad ("Architecture doesn't allow hardfloat ABI");
-+
-+  elf_float_mode = (marg_float_mode == FLOAT_MODE_DEFAULT) ? isa_float_mode
-+                                                           : marg_float_mode;
-+
-+  switch (elf_float_mode) {
-+  case FLOAT_MODE_DEFAULT:
-+    as_bad("a specific float mode must be specified for an ELF");
-+    break;
-+
-+  case FLOAT_MODE_SOFT:
-+    elf_flags |= EF_RISCV_SOFT_FLOAT;
-+    break;
-+
-+  case FLOAT_MODE_HARD:
-+    elf_flags &= ~EF_RISCV_SOFT_FLOAT;
-+    break;
-+  }
-+}
-+
-+void
-+riscv_init_after_args (void)
-+{
-+  /* initialize opcodes */
-+  bfd_riscv_num_opcodes = bfd_riscv_num_builtin_opcodes;
-+  riscv_opcodes = (struct riscv_opcode *) riscv_builtin_opcodes;
-+}
-+
-+long
-+md_pcrel_from (fixS *fixP)
-+{
-+  return fixP->fx_where + fixP->fx_frag->fr_address;
-+}
-+
-+/* Apply a fixup to the object file.  */
-+
-+void
-+md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
-+{
-+  bfd_byte *buf = (bfd_byte *) (fixP->fx_frag->fr_literal + fixP->fx_where);
-+
-+  /* Remember value for tc_gen_reloc.  */
-+  fixP->fx_addnumber = *valP;
-+
-+  switch (fixP->fx_r_type)
-+    {
-+    case BFD_RELOC_RISCV_TLS_GOT_HI20:
-+    case BFD_RELOC_RISCV_TLS_GD_HI20:
-+    case BFD_RELOC_RISCV_TLS_DTPREL32:
-+    case BFD_RELOC_RISCV_TLS_DTPREL64:
-+    case BFD_RELOC_RISCV_TPREL_HI20:
-+    case BFD_RELOC_RISCV_TPREL_LO12_I:
-+    case BFD_RELOC_RISCV_TPREL_LO12_S:
-+    case BFD_RELOC_RISCV_TPREL_ADD:
-+      S_SET_THREAD_LOCAL (fixP->fx_addsy);
-+      /* fall through */
-+
-+    case BFD_RELOC_RISCV_GOT_HI20:
-+    case BFD_RELOC_RISCV_PCREL_HI20:
-+    case BFD_RELOC_RISCV_HI20:
-+    case BFD_RELOC_RISCV_LO12_I:
-+    case BFD_RELOC_RISCV_LO12_S:
-+    case BFD_RELOC_RISCV_ADD8:
-+    case BFD_RELOC_RISCV_ADD16:
-+    case BFD_RELOC_RISCV_ADD32:
-+    case BFD_RELOC_RISCV_ADD64:
-+    case BFD_RELOC_RISCV_SUB8:
-+    case BFD_RELOC_RISCV_SUB16:
-+    case BFD_RELOC_RISCV_SUB32:
-+    case BFD_RELOC_RISCV_SUB64:
-+      gas_assert (fixP->fx_addsy != NULL);
-+      /* Nothing needed to do.  The value comes from the reloc entry.  */
-+      break;
-+
-+    case BFD_RELOC_64:
-+    case BFD_RELOC_32:
-+    case BFD_RELOC_16:
-+    case BFD_RELOC_8:
-+      if (fixP->fx_addsy && fixP->fx_subsy)
-+	{
-+	  fixP->fx_next = xmemdup (fixP, sizeof (*fixP), sizeof (*fixP));
-+	  fixP->fx_next->fx_addsy = fixP->fx_subsy;
-+	  fixP->fx_next->fx_subsy = NULL;
-+	  fixP->fx_next->fx_offset = 0;
-+	  fixP->fx_subsy = NULL;
-+
-+	  if (fixP->fx_r_type == BFD_RELOC_64)
-+	    {
-+	      fixP->fx_r_type = BFD_RELOC_RISCV_ADD64;
-+	      fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB64;
-+	    }
-+	  else if (fixP->fx_r_type == BFD_RELOC_32)
-+	    {
-+	      fixP->fx_r_type = BFD_RELOC_RISCV_ADD32;
-+	      fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB32;
-+	    }
-+	  else if (fixP->fx_r_type == BFD_RELOC_16)
-+	    {
-+	      fixP->fx_r_type = BFD_RELOC_RISCV_ADD16;
-+	      fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB16;
-+	    }
-+	  else
-+	    {
-+	      fixP->fx_r_type = BFD_RELOC_RISCV_ADD8;
-+	      fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB8;
-+	    }
-+	}
-+      /* fall through */
-+
-+    case BFD_RELOC_RVA:
-+      /* If we are deleting this reloc entry, we must fill in the
-+	 value now.  This can happen if we have a .word which is not
-+	 resolved when it appears but is later defined.  */
-+      if (fixP->fx_addsy == NULL)
-+	{
-+	  gas_assert (fixP->fx_size <= sizeof (valueT));
-+	  md_number_to_chars ((char *) buf, *valP, fixP->fx_size);
-+	  fixP->fx_done = 1;
-+	}
-+      break;
-+
-+    case BFD_RELOC_RISCV_JMP:
-+      if (fixP->fx_addsy)
-+	{
-+	  /* Fill in a tentative value to improve objdump readability.  */
-+	  bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
-+	  bfd_vma delta = target - md_pcrel_from (fixP);
-+	  bfd_putl32 (bfd_getl32 (buf) | ENCODE_UJTYPE_IMM (delta), buf);
-+	}
-+      break;
-+
-+    case BFD_RELOC_12_PCREL:
-+      if (fixP->fx_addsy)
-+	{
-+	  /* Fill in a tentative value to improve objdump readability.  */
-+	  bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
-+	  bfd_vma delta = target - md_pcrel_from (fixP);
-+	  bfd_putl32 (bfd_getl32 (buf) | ENCODE_SBTYPE_IMM (delta), buf);
-+	}
-+      break;
-+
-+    case BFD_RELOC_RISCV_RVC_BRANCH:
-+      if (fixP->fx_addsy)
-+	{
-+	  /* Fill in a tentative value to improve objdump readability.  */
-+	  bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
-+	  bfd_vma delta = target - md_pcrel_from (fixP);
-+	  bfd_putl16 (bfd_getl16 (buf) | ENCODE_RVC_B_IMM (delta), buf);
-+	}
-+      break;
-+
-+    case BFD_RELOC_RISCV_RVC_JUMP:
-+      if (fixP->fx_addsy)
-+	{
-+	  /* Fill in a tentative value to improve objdump readability.  */
-+	  bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
-+	  bfd_vma delta = target - md_pcrel_from (fixP);
-+	  bfd_putl16 (bfd_getl16 (buf) | ENCODE_RVC_J_IMM (delta), buf);
-+	}
-+      break;
-+
-+    case BFD_RELOC_RISCV_PCREL_LO12_S:
-+    case BFD_RELOC_RISCV_PCREL_LO12_I:
-+    case BFD_RELOC_RISCV_CALL:
-+    case BFD_RELOC_RISCV_CALL_PLT:
-+    case BFD_RELOC_RISCV_ALIGN:
-+      break;
-+
-+    default:
-+      /* We ignore generic BFD relocations we don't know about.  */
-+      if (bfd_reloc_type_lookup (stdoutput, fixP->fx_r_type) != NULL)
-+	as_fatal (_("internal error: bad relocation #%d"), fixP->fx_r_type);
-+    }
-+}
-+
-+/* This structure is used to hold a stack of .option values.  */
-+
-+struct riscv_option_stack
-+{
-+  struct riscv_option_stack *next;
-+  struct riscv_set_options options;
-+};
-+
-+static struct riscv_option_stack *riscv_opts_stack;
-+
-+/* Handle the .option pseudo-op.  */
-+
-+static void
-+s_riscv_option (int x ATTRIBUTE_UNUSED)
-+{
-+  char *name = input_line_pointer, ch;
-+
-+  while (!is_end_of_line[(unsigned char) *input_line_pointer])
-+    ++input_line_pointer;
-+  ch = *input_line_pointer;
-+  *input_line_pointer = '\0';
-+
-+  if (strcmp (name, "rvc") == 0)
-+    riscv_set_rvc (TRUE);
-+  else if (strcmp (name, "norvc") == 0)
-+    riscv_set_rvc (FALSE);
-+  else if (strcmp (name, "push") == 0)
-+    {
-+      struct riscv_option_stack *s;
-+
-+      s = (struct riscv_option_stack *) xmalloc (sizeof *s);
-+      s->next = riscv_opts_stack;
-+      s->options = riscv_opts;
-+      riscv_opts_stack = s;
-+    }
-+  else if (strcmp (name, "pop") == 0)
-+    {
-+      struct riscv_option_stack *s;
-+
-+      s = riscv_opts_stack;
-+      if (s == NULL)
-+	as_bad (_(".option pop with no .option push"));
-+      else
-+	{
-+	  riscv_opts = s->options;
-+	  riscv_opts_stack = s->next;
-+	  free (s);
-+	}
-+    }
-+  else
-+    {
-+      as_warn (_("Unrecognized .option directive: %s\n"), name);
-+    }
-+  *input_line_pointer = ch;
-+  demand_empty_rest_of_line ();
-+}
-+
-+/* Handle the .dtprelword and .dtpreldword pseudo-ops.  They generate
-+   a 32-bit or 64-bit DTP-relative relocation (BYTES says which) for
-+   use in DWARF debug information.  */
-+
-+static void
-+s_dtprel (int bytes)
-+{
-+  expressionS ex;
-+  char *p;
-+
-+  expression (&ex);
-+
-+  if (ex.X_op != O_symbol)
-+    {
-+      as_bad (_("Unsupported use of %s"), (bytes == 8
-+					   ? ".dtpreldword"
-+					   : ".dtprelword"));
-+      ignore_rest_of_line ();
-+    }
-+
-+  p = frag_more (bytes);
-+  md_number_to_chars (p, 0, bytes);
-+  fix_new_exp (frag_now, p - frag_now->fr_literal, bytes, &ex, FALSE,
-+	       (bytes == 8
-+		? BFD_RELOC_RISCV_TLS_DTPREL64
-+		: BFD_RELOC_RISCV_TLS_DTPREL32));
-+
-+  demand_empty_rest_of_line ();
-+}
-+
-+/* Handle the .bss pseudo-op.  */
-+
-+static void
-+s_bss (int ignore ATTRIBUTE_UNUSED)
-+{
-+  subseg_set (bss_section, 0);
-+  demand_empty_rest_of_line ();
-+}
-+
-+/* Align to a given power of two.  */
-+
-+static void
-+s_align (int bytes_p)
-+{
-+  int fill_value = 0, fill_value_specified = 0;
-+  int min_text_alignment = riscv_opts.rvc ? 2 : 4;
-+  int alignment = get_absolute_expression(), bytes;
-+
-+  if (bytes_p)
-+    {
-+      bytes = alignment;
-+      if (bytes < 1 || (bytes & (bytes-1)) != 0)
-+	as_bad (_("alignment not a power of 2: %d"), bytes);
-+      for (alignment = 0; bytes > 1; bytes >>= 1)
-+	alignment++;
-+    }
-+
-+  bytes = 1 << alignment;
-+
-+  if (alignment < 0 || alignment > 31)
-+    as_bad (_("unsatisfiable alignment: %d"), alignment);
-+
-+  if (*input_line_pointer == ',')
-+    {
-+      ++input_line_pointer;
-+      fill_value = get_absolute_expression ();
-+      fill_value_specified = 1;
-+    }
-+
-+  if (!fill_value_specified
-+      && subseg_text_p (now_seg)
-+      && bytes > min_text_alignment)
-+    {
-+      /* Emit the worst-case NOP string.  The linker will delete any
-+	 unnecessary NOPs.  This allows us to support code alignment
-+	 in spite of linker relaxations.  */
-+      bfd_vma i, worst_case_bytes = bytes - min_text_alignment;
-+      char *nops = frag_more (worst_case_bytes);
-+      for (i = 0; i < worst_case_bytes - 2; i += 4)
-+	md_number_to_chars (nops + i, RISCV_NOP, 4);
-+      if (i < worst_case_bytes)
-+	md_number_to_chars (nops + i, RVC_NOP, 2);
-+
-+      expressionS ex;
-+      ex.X_op = O_constant;
-+      ex.X_add_number = worst_case_bytes;
-+
-+      fix_new_exp (frag_now, nops - frag_now->fr_literal, 0,
-+		   &ex, FALSE, BFD_RELOC_RISCV_ALIGN);
-+    }
-+  else if (alignment)
-+    frag_align (alignment, fill_value, 0);
-+
-+  record_alignment (now_seg, alignment);
-+
-+  demand_empty_rest_of_line ();
-+}
-+
-+int
-+md_estimate_size_before_relax (fragS *fragp, asection *segtype)
-+{
-+  return (fragp->fr_var = relaxed_branch_length (fragp, segtype, FALSE));
-+}
-+
-+/* Translate internal representation of relocation info to BFD target
-+   format.  */
-+
-+arelent *
-+tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
-+{
-+  arelent *reloc = (arelent *) xmalloc (sizeof (arelent));
-+
-+  reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
-+  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
-+  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
-+  reloc->addend = fixp->fx_addnumber;
-+
-+  reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
-+  if (reloc->howto == NULL)
-+    {
-+      if ((fixp->fx_r_type == BFD_RELOC_16 || fixp->fx_r_type == BFD_RELOC_8)
-+	  && fixp->fx_addsy != NULL && fixp->fx_subsy != NULL)
-+	{
-+	  /* We don't have R_RISCV_8/16, but for this special case,
-+	     we can use R_RISCV_ADD8/16 with R_RISCV_SUB8/16.  */
-+	  return reloc;
-+	}
-+
-+      as_bad_where (fixp->fx_file, fixp->fx_line,
-+		    _("cannot represent %s relocation in object file"),
-+		    bfd_get_reloc_code_name (fixp->fx_r_type));
-+      return NULL;
-+    }
-+
-+  return reloc;
-+}
-+
-+int
-+riscv_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
-+{
-+  if (RELAX_BRANCH_P (fragp->fr_subtype))
-+    {
-+      offsetT old_var = fragp->fr_var;
-+      fragp->fr_var = relaxed_branch_length (fragp, sec, TRUE);
-+      return fragp->fr_var - old_var;
-+    }
-+
-+  return 0;
-+}
-+
-+/* Expand far branches to multi-instruction sequences.  */
-+
-+static void
-+md_convert_frag_branch (fragS *fragp)
-+{
-+  bfd_byte *buf;
-+  expressionS exp;
-+  fixS *fixp;
-+  insn_t insn;
-+  int rs1, reloc;
-+
-+  buf = (bfd_byte *)fragp->fr_literal + fragp->fr_fix;
-+
-+  exp.X_op = O_symbol;
-+  exp.X_add_symbol = fragp->fr_symbol;
-+  exp.X_add_number = fragp->fr_offset;
-+
-+  gas_assert (fragp->fr_var == RELAX_BRANCH_LENGTH (fragp->fr_subtype));
-+
-+  if (RELAX_BRANCH_RVC (fragp->fr_subtype))
-+    {
-+      switch (RELAX_BRANCH_LENGTH (fragp->fr_subtype))
-+	{
-+	  case 8:
-+	  case 4:
-+	    /* Expand the RVC branch into a RISC-V one.  */
-+	    insn = bfd_getl16 (buf);
-+	    rs1 = 8 + ((insn >> OP_SH_CRS1S) & OP_MASK_CRS1S);
-+	    if ((insn & MASK_C_J) == MATCH_C_J)
-+	      insn = MATCH_JAL;
-+	    else if ((insn & MASK_C_JAL) == MATCH_C_JAL)
-+	      insn = MATCH_JAL | (X_RA << OP_SH_RD);
-+	    else if ((insn & MASK_C_BEQZ) == MATCH_C_BEQZ)
-+	      insn = MATCH_BEQ | (rs1 << OP_SH_RS1);
-+	    else if ((insn & MASK_C_BNEZ) == MATCH_C_BNEZ)
-+	      insn = MATCH_BNE | (rs1 << OP_SH_RS1);
-+	    else
-+	      abort ();
-+	    bfd_putl32 (insn, buf);
-+	    break;
-+
-+	  case 6:
-+	    /* Invert the branch condition.  Branch over the jump.  */
-+	    insn = bfd_getl16 (buf);
-+	    insn ^= MATCH_C_BEQZ ^ MATCH_C_BNEZ;
-+	    insn |= ENCODE_RVC_B_IMM (6);
-+	    bfd_putl16 (insn, buf);
-+	    buf += 2;
-+	    goto jump;
-+
-+	  case 2:
-+	    /* Just keep the RVC branch.  */
-+	    reloc = RELAX_BRANCH_UNCOND (fragp->fr_subtype)
-+		    ? BFD_RELOC_RISCV_RVC_JUMP : BFD_RELOC_RISCV_RVC_BRANCH;
-+	    fixp = fix_new_exp (fragp, buf - (bfd_byte *)fragp->fr_literal,
-+				2, &exp, FALSE, reloc);
-+	    buf += 2;
-+	    goto done;
-+
-+	  default:
-+	    abort();
-+	}
-+    }
-+
-+  switch (RELAX_BRANCH_LENGTH (fragp->fr_subtype))
-+    {
-+    case 8:
-+      gas_assert (!RELAX_BRANCH_UNCOND (fragp->fr_subtype));
-+
-+      /* Invert the branch condition.  Branch over the jump.  */
-+      insn = bfd_getl32 (buf);
-+      insn ^= MATCH_BEQ ^ MATCH_BNE;
-+      insn |= ENCODE_SBTYPE_IMM (8);
-+      md_number_to_chars ((char *) buf, insn, 4);
-+      buf += 4;
-+
-+jump:
-+      /* Jump to the target.  */
-+      fixp = fix_new_exp (fragp, buf - (bfd_byte *)fragp->fr_literal,
-+			  4, &exp, FALSE, BFD_RELOC_RISCV_JMP);
-+      md_number_to_chars ((char *) buf, MATCH_JAL, 4);
-+      buf += 4;
-+      break;
-+
-+    case 4:
-+      reloc = RELAX_BRANCH_UNCOND (fragp->fr_subtype)
-+	      ? BFD_RELOC_RISCV_JMP : BFD_RELOC_12_PCREL;
-+      fixp = fix_new_exp (fragp, buf - (bfd_byte *)fragp->fr_literal,
-+			  4, &exp, FALSE, reloc);
-+      buf += 4;
-+      break;
-+
-+    default:
-+      abort ();
-+    }
-+
-+done:
-+  fixp->fx_file = fragp->fr_file;
-+  fixp->fx_line = fragp->fr_line;
-+
-+  gas_assert (buf == (bfd_byte *)fragp->fr_literal
-+	      + fragp->fr_fix + fragp->fr_var);
-+
-+  fragp->fr_fix += fragp->fr_var;
-+}
-+
-+/* Relax a machine dependent frag.  This returns the amount by which
-+   the current size of the frag should change.  */
-+
-+void
-+md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT asec ATTRIBUTE_UNUSED,
-+		 fragS *fragp)
-+{
-+  gas_assert (RELAX_BRANCH_P (fragp->fr_subtype));
-+  md_convert_frag_branch (fragp);
-+}
-+
-+void
-+md_show_usage (FILE *stream)
-+{
-+  fprintf (stream, _("\
-+RISC-V options:\n\
-+  -m32           assemble RV32 code\n\
-+  -m64           assemble RV64 code (default)\n\
-+  -fpic          generate position-independent code\n\
-+  -fno-pic       don't generate position-independent code (default)\n\
-+"));
-+}
-+
-+/* Standard calling conventions leave the CFA at SP on entry.  */
-+void
-+riscv_cfi_frame_initial_instructions (void)
-+{
-+  cfi_add_CFA_def_cfa_register (X_SP);
-+}
-+
-+int
-+tc_riscv_regname_to_dw2regnum (char *regname)
-+{
-+  int reg;
-+
-+  if ((reg = reg_lookup_internal (regname, RCLASS_GPR)) >= 0)
-+    return reg;
-+
-+  if ((reg = reg_lookup_internal (regname, RCLASS_FPR)) >= 0)
-+    return reg + 32;
-+
-+  as_bad (_("unknown register `%s'"), regname);
-+  return -1;
-+}
-+
-+void
-+riscv_elf_final_processing (void)
-+{
-+  elf_elfheader (stdoutput)->e_flags |= elf_flags;
-+}
-+
-+/* Pseudo-op table.  */
-+
-+static const pseudo_typeS riscv_pseudo_table[] =
-+{
-+  /* RISC-V-specific pseudo-ops.  */
-+  {"option", s_riscv_option, 0},
-+  {"half", cons, 2},
-+  {"word", cons, 4},
-+  {"dword", cons, 8},
-+  {"dtprelword", s_dtprel, 4},
-+  {"dtpreldword", s_dtprel, 8},
-+  {"bss", s_bss, 0},
-+  {"align", s_align, 0},
-+  {"p2align", s_align, 0},
-+  {"balign", s_align, 1},
-+
-+  /* leb128 doesn't work with relaxation; disallow it */
-+  {"uleb128", s_err, 0},
-+  {"sleb128", s_err, 0},
-+
-+  { NULL, NULL, 0 },
-+};
-+
-+void
-+riscv_pop_insert (void)
-+{
-+  extern void pop_insert (const pseudo_typeS *);
-+
-+  pop_insert (riscv_pseudo_table);
-+}
-diff -urN empty/gas/config/tc-riscv.h binutils-2.26.1/gas/config/tc-riscv.h
---- empty/gas/config/tc-riscv.h	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/gas/config/tc-riscv.h	2016-04-03 10:33:12.065459702 +0800
-@@ -0,0 +1,102 @@
-+/* tc-riscv.h -- header file for tc-riscv.c.
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target.
-+
-+   This file is part of GAS.
-+
-+   GAS is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3, or (at your option)
-+   any later version.
-+
-+   GAS is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+#ifndef TC_RISCV
-+#define TC_RISCV
-+
-+#include "opcode/riscv.h"
-+
-+struct frag;
-+struct expressionS;
-+
-+#define TARGET_BYTES_BIG_ENDIAN 0
-+
-+#define TARGET_ARCH bfd_arch_riscv
-+
-+#define WORKING_DOT_WORD	1
-+#define LOCAL_LABELS_FB 1
-+
-+/* Symbols named FAKE_LABEL_NAME are emitted when generating DWARF, so make
-+   sure FAKE_LABEL_NAME is printable.  It still must be distinct from any
-+   real label name.  So, append a space, which other labels can't contain.  */
-+#define FAKE_LABEL_NAME ".L0 "
-+
-+#define md_relax_frag(segment, fragp, stretch) \
-+  riscv_relax_frag(segment, fragp, stretch)
-+extern int riscv_relax_frag (asection *, struct frag *, long);
-+
-+#define md_section_align(seg,size)	(size)
-+#define md_undefined_symbol(name)	(0)
-+#define md_operand(x)
-+
-+/* FIXME: it is unclear if this is used, or if it is even correct.  */
-+#define MAX_MEM_FOR_RS_ALIGN_CODE  (1 + 2)
-+
-+/* The ISA of the target may change based on command-line arguments.  */
-+#define TARGET_FORMAT riscv_target_format()
-+extern const char *riscv_target_format (void);
-+
-+#define md_after_parse_args() riscv_after_parse_args()
-+extern void riscv_after_parse_args (void);
-+
-+#define tc_init_after_args() riscv_init_after_args()
-+extern void riscv_init_after_args (void);
-+
-+#define md_parse_long_option(arg) riscv_parse_long_option (arg)
-+extern int riscv_parse_long_option (const char *);
-+
-+/* Let the linker resolve all the relocs due to relaxation.  */
-+#define tc_fix_adjustable(fixp) 0
-+#define md_allow_local_subtract(l,r,s) 0
-+
-+/* Values passed to md_apply_fix don't include symbol values.  */
-+#define MD_APPLY_SYM_VALUE(FIX) 0
-+
-+/* Global syms must not be resolved, to support ELF shared libraries.  */
-+#define EXTERN_FORCE_RELOC			\
-+  (OUTPUT_FLAVOR == bfd_target_elf_flavour)
-+
-+#define TC_FORCE_RELOCATION_SUB_SAME(FIX, SEG) ((SEG)->flags & SEC_CODE)
-+#define TC_FORCE_RELOCATION_SUB_LOCAL(FIX, SEG) 1
-+#define TC_VALIDATE_FIX_SUB(FIX, SEG) 1
-+#define TC_FORCE_RELOCATION_LOCAL(FIX) 1
-+#define DIFF_EXPR_OK 1
-+
-+extern void riscv_pop_insert (void);
-+#define md_pop_insert()		riscv_pop_insert()
-+
-+#define TARGET_USE_CFIPOP 1
-+
-+#define tc_cfi_frame_initial_instructions riscv_cfi_frame_initial_instructions
-+extern void riscv_cfi_frame_initial_instructions (void);
-+
-+#define tc_regname_to_dw2regnum tc_riscv_regname_to_dw2regnum
-+extern int tc_riscv_regname_to_dw2regnum (char *regname);
-+
-+extern unsigned xlen;
-+#define DWARF2_DEFAULT_RETURN_COLUMN X_RA
-+#define DWARF2_CIE_DATA_ALIGNMENT (-(int) (xlen / 8))
-+
-+#define elf_tc_final_processing riscv_elf_final_processing
-+extern void riscv_elf_final_processing (void);
-+
-+#endif /* TC_RISCV */
-diff -urN empty/include/elf/riscv.h binutils-2.26.1/include/elf/riscv.h
---- empty/include/elf/riscv.h	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/include/elf/riscv.h	2016-04-03 10:33:12.065459702 +0800
-@@ -0,0 +1,92 @@
-+/* RISC-V ELF support for BFD.
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrw Waterman <waterman at cs.berkeley.edu> at UC Berkeley.
-+   Based on MIPS ELF support for BFD, by Ian Lance Taylor.
-+
-+   This file is part of BFD, the Binary File Descriptor library.
-+
-+   This program is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3 of the License, or
-+   (at your option) any later version.
-+
-+   This program is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+/* This file holds definitions specific to the RISCV ELF ABI.  Note
-+   that most of this is not actually implemented by BFD.  */
-+
-+#ifndef _ELF_RISCV_H
-+#define _ELF_RISCV_H
-+
-+#include "elf/reloc-macros.h"
-+#include "libiberty.h"
-+
-+/* Relocation types.  */
-+START_RELOC_NUMBERS (elf_riscv_reloc_type)
-+  /* Relocation types used by the dynamic linker.  */
-+  RELOC_NUMBER (R_RISCV_NONE, 0)
-+  RELOC_NUMBER (R_RISCV_32, 1)
-+  RELOC_NUMBER (R_RISCV_64, 2)
-+  RELOC_NUMBER (R_RISCV_RELATIVE, 3)
-+  RELOC_NUMBER (R_RISCV_COPY, 4)
-+  RELOC_NUMBER (R_RISCV_JUMP_SLOT, 5)
-+  RELOC_NUMBER (R_RISCV_TLS_DTPMOD32, 6)
-+  RELOC_NUMBER (R_RISCV_TLS_DTPMOD64, 7)
-+  RELOC_NUMBER (R_RISCV_TLS_DTPREL32, 8)
-+  RELOC_NUMBER (R_RISCV_TLS_DTPREL64, 9)
-+  RELOC_NUMBER (R_RISCV_TLS_TPREL32, 10)
-+  RELOC_NUMBER (R_RISCV_TLS_TPREL64, 11)
-+
-+  /* Relocation types not used by the dynamic linker.  */
-+  RELOC_NUMBER (R_RISCV_BRANCH, 16)
-+  RELOC_NUMBER (R_RISCV_JAL, 17)
-+  RELOC_NUMBER (R_RISCV_CALL, 18)
-+  RELOC_NUMBER (R_RISCV_CALL_PLT, 19)
-+  RELOC_NUMBER (R_RISCV_GOT_HI20, 20)
-+  RELOC_NUMBER (R_RISCV_TLS_GOT_HI20, 21)
-+  RELOC_NUMBER (R_RISCV_TLS_GD_HI20, 22)
-+  RELOC_NUMBER (R_RISCV_PCREL_HI20, 23)
-+  RELOC_NUMBER (R_RISCV_PCREL_LO12_I, 24)
-+  RELOC_NUMBER (R_RISCV_PCREL_LO12_S, 25)
-+  RELOC_NUMBER (R_RISCV_HI20, 26)
-+  RELOC_NUMBER (R_RISCV_LO12_I, 27)
-+  RELOC_NUMBER (R_RISCV_LO12_S, 28)
-+  RELOC_NUMBER (R_RISCV_TPREL_HI20, 29)
-+  RELOC_NUMBER (R_RISCV_TPREL_LO12_I, 30)
-+  RELOC_NUMBER (R_RISCV_TPREL_LO12_S, 31)
-+  RELOC_NUMBER (R_RISCV_TPREL_ADD, 32)
-+  RELOC_NUMBER (R_RISCV_ADD8, 33)
-+  RELOC_NUMBER (R_RISCV_ADD16, 34)
-+  RELOC_NUMBER (R_RISCV_ADD32, 35)
-+  RELOC_NUMBER (R_RISCV_ADD64, 36)
-+  RELOC_NUMBER (R_RISCV_SUB8, 37)
-+  RELOC_NUMBER (R_RISCV_SUB16, 38)
-+  RELOC_NUMBER (R_RISCV_SUB32, 39)
-+  RELOC_NUMBER (R_RISCV_SUB64, 40)
-+  RELOC_NUMBER (R_RISCV_GNU_VTINHERIT, 41)
-+  RELOC_NUMBER (R_RISCV_GNU_VTENTRY, 42)
-+  RELOC_NUMBER (R_RISCV_ALIGN, 43)
-+  RELOC_NUMBER (R_RISCV_RVC_BRANCH, 44)
-+  RELOC_NUMBER (R_RISCV_RVC_JUMP, 45)
-+  RELOC_NUMBER (R_RISCV_RVC_LUI, 46)
-+  RELOC_NUMBER (R_RISCV_GPREL_I, 47)
-+  RELOC_NUMBER (R_RISCV_GPREL_S, 48)
-+END_RELOC_NUMBERS (R_RISCV_max)
-+
-+/* Processor specific flags for the ELF header e_flags field.  */
-+
-+/* File may contain compressed instructions.  */
-+#define EF_RISCV_RVC 0x0001
-+
-+/* File uses the soft-float calling convention.  */
-+#define EF_RISCV_SOFT_FLOAT 0x0002
-+
-+#endif /* _ELF_RISCV_H */
-diff -urN empty/include/opcode/riscv.h binutils-2.26.1/include/opcode/riscv.h
---- empty/include/opcode/riscv.h	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/include/opcode/riscv.h	2016-04-03 10:33:12.065459702 +0800
-@@ -0,0 +1,344 @@
-+/* riscv.h.  RISC-V opcode list for GDB, the GNU debugger.
-+   Copyright 2011
-+   Free Software Foundation, Inc.
-+   Contributed by Andrew Waterman
-+
-+This file is part of GDB, GAS, and the GNU binutils.
-+
-+GDB, GAS, and the GNU binutils are free software; you can redistribute
-+them and/or modify them under the terms of the GNU General Public
-+License as published by the Free Software Foundation; either version
-+1, or (at your option) any later version.
-+
-+GDB, GAS, and the GNU binutils are distributed in the hope that they
-+will be useful, but WITHOUT ANY WARRANTY; without even the implied
-+warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
-+the GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with this file; see the file COPYING.  If not, write to the Free
-+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
-+
-+#ifndef _RISCV_H_
-+#define _RISCV_H_
-+
-+#include "riscv-opc.h"
-+#include <stdlib.h>
-+#include <stdint.h>
-+
-+typedef uint64_t insn_t;
-+
-+static inline unsigned int riscv_insn_length (insn_t insn)
-+{
-+  if ((insn & 0x3) != 0x3) /* RVC.  */
-+    return 2;
-+  if ((insn & 0x1f) != 0x1f) /* Base ISA and extensions in 32-bit space.  */
-+    return 4;
-+  if ((insn & 0x3f) == 0x1f) /* 48-bit extensions.  */
-+    return 6;
-+  if ((insn & 0x7f) == 0x3f) /* 64-bit extensions.  */
-+    return 8;
-+  /* Longer instructions not supported at the moment.  */
-+  return 2;
-+}
-+
-+static const char * const riscv_rm[8] = {
-+  "rne", "rtz", "rdn", "rup", "rmm", 0, 0, "dyn"
-+};
-+static const char * const riscv_pred_succ[16] = {
-+  0,   "w",  "r",  "rw",  "o",  "ow",  "or",  "orw",
-+  "i", "iw", "ir", "irw", "io", "iow", "ior", "iorw",
-+};
-+
-+#define RVC_JUMP_BITS 11
-+#define RVC_JUMP_REACH ((1ULL << RVC_JUMP_BITS) * RISCV_JUMP_ALIGN)
-+
-+#define RVC_BRANCH_BITS 8
-+#define RVC_BRANCH_REACH ((1ULL << RVC_BRANCH_BITS) * RISCV_BRANCH_ALIGN)
-+
-+#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
-+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
-+
-+#define EXTRACT_ITYPE_IMM(x) \
-+  (RV_X(x, 20, 12) | (RV_IMM_SIGN(x) << 12))
-+#define EXTRACT_STYPE_IMM(x) \
-+  (RV_X(x, 7, 5) | (RV_X(x, 25, 7) << 5) | (RV_IMM_SIGN(x) << 12))
-+#define EXTRACT_SBTYPE_IMM(x) \
-+  ((RV_X(x, 8, 4) << 1) | (RV_X(x, 25, 6) << 5) | (RV_X(x, 7, 1) << 11) | (RV_IMM_SIGN(x) << 12))
-+#define EXTRACT_UTYPE_IMM(x) \
-+  ((RV_X(x, 12, 20) << 12) | (RV_IMM_SIGN(x) << 32))
-+#define EXTRACT_UJTYPE_IMM(x) \
-+  ((RV_X(x, 21, 10) << 1) | (RV_X(x, 20, 1) << 11) | (RV_X(x, 12, 8) << 12) | (RV_IMM_SIGN(x) << 20))
-+#define EXTRACT_RVC_IMM(x) \
-+  (RV_X(x, 2, 5) | (-RV_X(x, 12, 1) << 5))
-+#define EXTRACT_RVC_LUI_IMM(x) \
-+  (EXTRACT_RVC_IMM (x) << RISCV_IMM_BITS)
-+#define EXTRACT_RVC_SIMM3(x) \
-+  (RV_X(x, 10, 2) | (-RV_X(x, 12, 1) << 2))
-+#define EXTRACT_RVC_ADDI4SPN_IMM(x) \
-+  ((RV_X(x, 6, 1) << 2) | (RV_X(x, 5, 1) << 3) | (RV_X(x, 11, 2) << 4) | (RV_X(x, 7, 4) << 6))
-+#define EXTRACT_RVC_ADDI16SP_IMM(x) \
-+  ((RV_X(x, 6, 1) << 4) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 5, 1) << 6) | (RV_X(x, 3, 2) << 7) | (-RV_X(x, 12, 1) << 9))
-+#define EXTRACT_RVC_LW_IMM(x) \
-+  ((RV_X(x, 6, 1) << 2) | (RV_X(x, 10, 3) << 3) | (RV_X(x, 5, 1) << 6))
-+#define EXTRACT_RVC_LD_IMM(x) \
-+  ((RV_X(x, 10, 3) << 3) | (RV_X(x, 5, 2) << 6))
-+#define EXTRACT_RVC_LWSP_IMM(x) \
-+  ((RV_X(x, 4, 3) << 2) | (RV_X(x, 12, 1) << 5) | (RV_X(x, 2, 2) << 6))
-+#define EXTRACT_RVC_LDSP_IMM(x) \
-+  ((RV_X(x, 5, 2) << 3) | (RV_X(x, 12, 1) << 5) | (RV_X(x, 2, 3) << 6))
-+#define EXTRACT_RVC_SWSP_IMM(x) \
-+  ((RV_X(x, 9, 4) << 2) | (RV_X(x, 7, 2) << 6))
-+#define EXTRACT_RVC_SDSP_IMM(x) \
-+  ((RV_X(x, 10, 3) << 3) | (RV_X(x, 7, 3) << 6))
-+#define EXTRACT_RVC_B_IMM(x) \
-+  ((RV_X(x, 3, 2) << 1) | (RV_X(x, 10, 2) << 3) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 5, 2) << 6) | (-RV_X(x, 12, 1) << 8))
-+#define EXTRACT_RVC_J_IMM(x) \
-+  ((RV_X(x, 3, 3) << 1) | (RV_X(x, 11, 1) << 4) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 6, 1) << 7) | (RV_X(x, 9, 2) << 8) | (RV_X(x, 8, 1) << 10) | (-RV_X(x, 12, 1) << 11))
-+
-+#define ENCODE_ITYPE_IMM(x) \
-+  (RV_X(x, 0, 12) << 20)
-+#define ENCODE_STYPE_IMM(x) \
-+  ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25))
-+#define ENCODE_SBTYPE_IMM(x) \
-+  ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
-+#define ENCODE_UTYPE_IMM(x) \
-+  (RV_X(x, 12, 20) << 12)
-+#define ENCODE_UJTYPE_IMM(x) \
-+  ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
-+#define ENCODE_RVC_IMM(x) \
-+  ((RV_X(x, 0, 5) << 2) | (RV_X(x, 5, 1) << 12))
-+#define ENCODE_RVC_LUI_IMM(x) \
-+  ENCODE_RVC_IMM ((x) >> RISCV_IMM_BITS)
-+#define ENCODE_RVC_SIMM3(x) \
-+  (RV_X(x, 0, 3) << 10)
-+#define ENCODE_RVC_ADDI4SPN_IMM(x) \
-+  ((RV_X(x, 2, 1) << 6) | (RV_X(x, 3, 1) << 5) | (RV_X(x, 4, 2) << 11) | (RV_X(x, 6, 4) << 7))
-+#define ENCODE_RVC_ADDI16SP_IMM(x) \
-+  ((RV_X(x, 4, 1) << 6) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 1) << 5) | (RV_X(x, 7, 2) << 3) | (RV_X(x, 9, 1) << 12))
-+#define ENCODE_RVC_LW_IMM(x) \
-+  ((RV_X(x, 2, 1) << 6) | (RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 1) << 5))
-+#define ENCODE_RVC_LD_IMM(x) \
-+  ((RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 2) << 5))
-+#define ENCODE_RVC_LWSP_IMM(x) \
-+  ((RV_X(x, 2, 3) << 4) | (RV_X(x, 5, 1) << 12) | (RV_X(x, 6, 2) << 2))
-+#define ENCODE_RVC_LDSP_IMM(x) \
-+  ((RV_X(x, 3, 2) << 5) | (RV_X(x, 5, 1) << 12) | (RV_X(x, 6, 3) << 2))
-+#define ENCODE_RVC_SWSP_IMM(x) \
-+  ((RV_X(x, 2, 4) << 9) | (RV_X(x, 6, 2) << 7))
-+#define ENCODE_RVC_SDSP_IMM(x) \
-+  ((RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 3) << 7))
-+#define ENCODE_RVC_B_IMM(x) \
-+  ((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12))
-+#define ENCODE_RVC_J_IMM(x) \
-+  ((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | (RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12))
-+
-+#define VALID_ITYPE_IMM(x) (EXTRACT_ITYPE_IMM(ENCODE_ITYPE_IMM(x)) == (x))
-+#define VALID_STYPE_IMM(x) (EXTRACT_STYPE_IMM(ENCODE_STYPE_IMM(x)) == (x))
-+#define VALID_SBTYPE_IMM(x) (EXTRACT_SBTYPE_IMM(ENCODE_SBTYPE_IMM(x)) == (x))
-+#define VALID_UTYPE_IMM(x) (EXTRACT_UTYPE_IMM(ENCODE_UTYPE_IMM(x)) == (x))
-+#define VALID_UJTYPE_IMM(x) (EXTRACT_UJTYPE_IMM(ENCODE_UJTYPE_IMM(x)) == (x))
-+#define VALID_RVC_IMM(x) (EXTRACT_RVC_IMM(ENCODE_RVC_IMM(x)) == (x))
-+#define VALID_RVC_LUI_IMM(x) (EXTRACT_RVC_LUI_IMM(ENCODE_RVC_LUI_IMM(x)) == (x))
-+#define VALID_RVC_SIMM3(x) (EXTRACT_RVC_SIMM3(ENCODE_RVC_SIMM3(x)) == (x))
-+#define VALID_RVC_ADDI4SPN_IMM(x) (EXTRACT_RVC_ADDI4SPN_IMM(ENCODE_RVC_ADDI4SPN_IMM(x)) == (x))
-+#define VALID_RVC_ADDI16SP_IMM(x) (EXTRACT_RVC_ADDI16SP_IMM(ENCODE_RVC_ADDI16SP_IMM(x)) == (x))
-+#define VALID_RVC_LW_IMM(x) (EXTRACT_RVC_LW_IMM(ENCODE_RVC_LW_IMM(x)) == (x))
-+#define VALID_RVC_LD_IMM(x) (EXTRACT_RVC_LD_IMM(ENCODE_RVC_LD_IMM(x)) == (x))
-+#define VALID_RVC_LWSP_IMM(x) (EXTRACT_RVC_LWSP_IMM(ENCODE_RVC_LWSP_IMM(x)) == (x))
-+#define VALID_RVC_LDSP_IMM(x) (EXTRACT_RVC_LDSP_IMM(ENCODE_RVC_LDSP_IMM(x)) == (x))
-+#define VALID_RVC_SWSP_IMM(x) (EXTRACT_RVC_SWSP_IMM(ENCODE_RVC_SWSP_IMM(x)) == (x))
-+#define VALID_RVC_SDSP_IMM(x) (EXTRACT_RVC_SDSP_IMM(ENCODE_RVC_SDSP_IMM(x)) == (x))
-+#define VALID_RVC_B_IMM(x) (EXTRACT_RVC_B_IMM(ENCODE_RVC_B_IMM(x)) == (x))
-+#define VALID_RVC_J_IMM(x) (EXTRACT_RVC_J_IMM(ENCODE_RVC_J_IMM(x)) == (x))
-+
-+#define RISCV_RTYPE(insn, rd, rs1, rs2) \
-+  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2))
-+#define RISCV_ITYPE(insn, rd, rs1, imm) \
-+  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ENCODE_ITYPE_IMM(imm))
-+#define RISCV_STYPE(insn, rs1, rs2, imm) \
-+  ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_STYPE_IMM(imm))
-+#define RISCV_SBTYPE(insn, rs1, rs2, target) \
-+  ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_SBTYPE_IMM(target))
-+#define RISCV_UTYPE(insn, rd, bigimm) \
-+  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UTYPE_IMM(bigimm))
-+#define RISCV_UJTYPE(insn, rd, target) \
-+  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UJTYPE_IMM(target))
-+
-+#define RISCV_NOP RISCV_ITYPE(ADDI, 0, 0, 0)
-+#define RVC_NOP MATCH_C_ADDI
-+
-+#define RISCV_CONST_HIGH_PART(VALUE) \
-+  (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1))
-+#define RISCV_CONST_LOW_PART(VALUE) ((VALUE) - RISCV_CONST_HIGH_PART (VALUE))
-+#define RISCV_PCREL_HIGH_PART(VALUE, PC) RISCV_CONST_HIGH_PART((VALUE) - (PC))
-+#define RISCV_PCREL_LOW_PART(VALUE, PC) RISCV_CONST_LOW_PART((VALUE) - (PC))
-+
-+#define RISCV_JUMP_BITS RISCV_BIGIMM_BITS
-+#define RISCV_JUMP_ALIGN_BITS 1
-+#define RISCV_JUMP_ALIGN (1 << RISCV_JUMP_ALIGN_BITS)
-+#define RISCV_JUMP_REACH ((1ULL << RISCV_JUMP_BITS) * RISCV_JUMP_ALIGN)
-+
-+#define RISCV_IMM_BITS 12
-+#define RISCV_BIGIMM_BITS (32 - RISCV_IMM_BITS)
-+#define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS)
-+#define RISCV_BIGIMM_REACH (1LL << RISCV_BIGIMM_BITS)
-+#define RISCV_RVC_IMM_REACH (1LL << 6)
-+#define RISCV_BRANCH_BITS RISCV_IMM_BITS
-+#define RISCV_BRANCH_ALIGN_BITS RISCV_JUMP_ALIGN_BITS
-+#define RISCV_BRANCH_ALIGN (1 << RISCV_BRANCH_ALIGN_BITS)
-+#define RISCV_BRANCH_REACH (RISCV_IMM_REACH * RISCV_BRANCH_ALIGN)
-+
-+/* RV fields.  */
-+
-+#define OP_MASK_OP		0x7f
-+#define OP_SH_OP		0
-+#define OP_MASK_RS2		0x1f
-+#define OP_SH_RS2		20
-+#define OP_MASK_RS1		0x1f
-+#define OP_SH_RS1		15
-+#define OP_MASK_RS3		0x1f
-+#define OP_SH_RS3		27
-+#define OP_MASK_RD		0x1f
-+#define OP_SH_RD		7
-+#define OP_MASK_SHAMT		0x3f
-+#define OP_SH_SHAMT		20
-+#define OP_MASK_SHAMTW		0x1f
-+#define OP_SH_SHAMTW		20
-+#define OP_MASK_RM		0x7
-+#define OP_SH_RM		12
-+#define OP_MASK_PRED		0xf
-+#define OP_SH_PRED		24
-+#define OP_MASK_SUCC		0xf
-+#define OP_SH_SUCC		20
-+#define OP_MASK_AQ		0x1
-+#define OP_SH_AQ		26
-+#define OP_MASK_RL		0x1
-+#define OP_SH_RL		25
-+
-+#define OP_MASK_CUSTOM_IMM	0x7f
-+#define OP_SH_CUSTOM_IMM	25
-+#define OP_MASK_CSR		0xfff
-+#define OP_SH_CSR		20
-+
-+/* RVC fields.  */
-+
-+#define OP_MASK_CRS2 0x1f
-+#define OP_SH_CRS2 2
-+#define OP_MASK_CRS1S 0x7
-+#define OP_SH_CRS1S 7
-+#define OP_MASK_CRS2S 0x7
-+#define OP_SH_CRS2S 2
-+
-+/* ABI names for selected x-registers.  */
-+
-+#define X_RA 1
-+#define X_SP 2
-+#define X_GP 3
-+#define X_TP 4
-+#define X_T0 5
-+#define X_T1 6
-+#define X_T2 7
-+#define X_T3 28
-+
-+#define NGPR 32
-+#define NFPR 32
-+
-+/* Replace bits MASK << SHIFT of STRUCT with the equivalent bits in
-+   VALUE << SHIFT.  VALUE is evaluated exactly once.  */
-+#define INSERT_BITS(STRUCT, VALUE, MASK, SHIFT) \
-+  (STRUCT) = (((STRUCT) & ~((insn_t)(MASK) << (SHIFT))) \
-+	      | ((insn_t)((VALUE) & (MASK)) << (SHIFT)))
-+
-+/* Extract bits MASK << SHIFT from STRUCT and shift them right
-+   SHIFT places.  */
-+#define EXTRACT_BITS(STRUCT, MASK, SHIFT) \
-+  (((STRUCT) >> (SHIFT)) & (MASK))
-+
-+/* Extract the operand given by FIELD from integer INSN.  */
-+#define EXTRACT_OPERAND(FIELD, INSN) \
-+  EXTRACT_BITS ((INSN), OP_MASK_##FIELD, OP_SH_##FIELD)
-+
-+/* This structure holds information for a particular instruction.  */
-+
-+struct riscv_opcode
-+{
-+  /* The name of the instruction.  */
-+  const char *name;
-+  /* The ISA subset name (I, M, A, F, D, Xextension).  */
-+  const char *subset;
-+  /* A string describing the arguments for this instruction.  */
-+  const char *args;
-+  /* The basic opcode for the instruction.  When assembling, this
-+     opcode is modified by the arguments to produce the actual opcode
-+     that is used.  If pinfo is INSN_MACRO, then this is 0.  */
-+  insn_t match;
-+  /* If pinfo is not INSN_MACRO, then this is a bit mask for the
-+     relevant portions of the opcode when disassembling.  If the
-+     actual opcode anded with the match field equals the opcode field,
-+     then we have found the correct instruction.  If pinfo is
-+     INSN_MACRO, then this field is the macro identifier.  */
-+  insn_t mask;
-+  /* A function to determine if a word corresponds to this instruction.
-+     Usually, this computes ((word & mask) == match).  */
-+  int (*match_func) (const struct riscv_opcode *op, insn_t word);
-+  /* For a macro, this is INSN_MACRO.  Otherwise, it is a collection
-+     of bits describing the instruction, notably any relevant hazard
-+     information.  */
-+  unsigned long pinfo;
-+};
-+
-+/* Instruction is a simple alias (e.g. "mv" for "addi").  */
-+#define	INSN_ALIAS		0x00000001
-+/* Instruction is actually a macro.  It should be ignored by the
-+   disassembler, and requires special treatment by the assembler.  */
-+#define INSN_MACRO		0xffffffff
-+
-+/* This is a list of macro expanded instructions.
-+
-+   _I appended means immediate
-+   _A appended means address
-+   _AB appended means address with base register
-+   _D appended means 64 bit floating point constant
-+   _S appended means 32 bit floating point constant.  */
-+
-+enum
-+{
-+  M_LA,
-+  M_LLA,
-+  M_LA_TLS_GD,
-+  M_LA_TLS_IE,
-+  M_LB,
-+  M_LBU,
-+  M_LH,
-+  M_LHU,
-+  M_LW,
-+  M_LWU,
-+  M_LD,
-+  M_SB,
-+  M_SH,
-+  M_SW,
-+  M_SD,
-+  M_FLW,
-+  M_FLD,
-+  M_FSW,
-+  M_FSD,
-+  M_CALL,
-+  M_J,
-+  M_LI,
-+  M_NUM_MACROS
-+};
-+
-+
-+extern const char * const riscv_gpr_names_numeric[NGPR];
-+extern const char * const riscv_gpr_names_abi[NGPR];
-+extern const char * const riscv_fpr_names_numeric[NFPR];
-+extern const char * const riscv_fpr_names_abi[NFPR];
-+
-+extern const struct riscv_opcode riscv_builtin_opcodes[];
-+extern const int bfd_riscv_num_builtin_opcodes;
-+extern struct riscv_opcode *riscv_opcodes;
-+extern int bfd_riscv_num_opcodes;
-+#define NUMOPCODES bfd_riscv_num_opcodes
-+
-+#endif /* _RISCV_H_ */
-diff -urN empty/include/opcode/riscv-opc.h binutils-2.26.1/include/opcode/riscv-opc.h
---- empty/include/opcode/riscv-opc.h	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/include/opcode/riscv-opc.h	2016-04-03 10:33:12.065459702 +0800
-@@ -0,0 +1,931 @@
-+/* Automatically generated by parse-opcodes */
-+#ifndef RISCV_ENCODING_H
-+#define RISCV_ENCODING_H
-+#define MATCH_SLLI_RV32 0x1013
-+#define MASK_SLLI_RV32  0xfe00707f
-+#define MATCH_SRLI_RV32 0x5013
-+#define MASK_SRLI_RV32  0xfe00707f
-+#define MATCH_SRAI_RV32 0x40005013
-+#define MASK_SRAI_RV32  0xfe00707f
-+#define MATCH_FRFLAGS 0x102073
-+#define MASK_FRFLAGS  0xfffff07f
-+#define MATCH_FSFLAGS 0x101073
-+#define MASK_FSFLAGS  0xfff0707f
-+#define MATCH_FSFLAGSI 0x105073
-+#define MASK_FSFLAGSI  0xfff0707f
-+#define MATCH_FRRM 0x202073
-+#define MASK_FRRM  0xfffff07f
-+#define MATCH_FSRM 0x201073
-+#define MASK_FSRM  0xfff0707f
-+#define MATCH_FSRMI 0x205073
-+#define MASK_FSRMI  0xfff0707f
-+#define MATCH_FSCSR 0x301073
-+#define MASK_FSCSR  0xfff0707f
-+#define MATCH_FRCSR 0x302073
-+#define MASK_FRCSR  0xfffff07f
-+#define MATCH_RDCYCLE 0xc0002073
-+#define MASK_RDCYCLE  0xfffff07f
-+#define MATCH_RDTIME 0xc0102073
-+#define MASK_RDTIME  0xfffff07f
-+#define MATCH_RDINSTRET 0xc0202073
-+#define MASK_RDINSTRET  0xfffff07f
-+#define MATCH_RDCYCLEH 0xc8002073
-+#define MASK_RDCYCLEH  0xfffff07f
-+#define MATCH_RDTIMEH 0xc8102073
-+#define MASK_RDTIMEH  0xfffff07f
-+#define MATCH_RDINSTRETH 0xc8202073
-+#define MASK_RDINSTRETH  0xfffff07f
-+#define MATCH_ECALL 0x73
-+#define MASK_ECALL  0xffffffff
-+#define MATCH_EBREAK 0x100073
-+#define MASK_EBREAK  0xffffffff
-+#define MATCH_ERET 0x10000073
-+#define MASK_ERET  0xffffffff
-+#define MATCH_BEQ 0x63
-+#define MASK_BEQ  0x707f
-+#define MATCH_BNE 0x1063
-+#define MASK_BNE  0x707f
-+#define MATCH_BLT 0x4063
-+#define MASK_BLT  0x707f
-+#define MATCH_BGE 0x5063
-+#define MASK_BGE  0x707f
-+#define MATCH_BLTU 0x6063
-+#define MASK_BLTU  0x707f
-+#define MATCH_BGEU 0x7063
-+#define MASK_BGEU  0x707f
-+#define MATCH_JALR 0x67
-+#define MASK_JALR  0x707f
-+#define MATCH_JAL 0x6f
-+#define MASK_JAL  0x7f
-+#define MATCH_LUI 0x37
-+#define MASK_LUI  0x7f
-+#define MATCH_AUIPC 0x17
-+#define MASK_AUIPC  0x7f
-+#define MATCH_ADDI 0x13
-+#define MASK_ADDI  0x707f
-+#define MATCH_SLLI 0x1013
-+#define MASK_SLLI  0xfc00707f
-+#define MATCH_SLTI 0x2013
-+#define MASK_SLTI  0x707f
-+#define MATCH_SLTIU 0x3013
-+#define MASK_SLTIU  0x707f
-+#define MATCH_XORI 0x4013
-+#define MASK_XORI  0x707f
-+#define MATCH_SRLI 0x5013
-+#define MASK_SRLI  0xfc00707f
-+#define MATCH_SRAI 0x40005013
-+#define MASK_SRAI  0xfc00707f
-+#define MATCH_ORI 0x6013
-+#define MASK_ORI  0x707f
-+#define MATCH_ANDI 0x7013
-+#define MASK_ANDI  0x707f
-+#define MATCH_ADD 0x33
-+#define MASK_ADD  0xfe00707f
-+#define MATCH_SUB 0x40000033
-+#define MASK_SUB  0xfe00707f
-+#define MATCH_SLL 0x1033
-+#define MASK_SLL  0xfe00707f
-+#define MATCH_SLT 0x2033
-+#define MASK_SLT  0xfe00707f
-+#define MATCH_SLTU 0x3033
-+#define MASK_SLTU  0xfe00707f
-+#define MATCH_XOR 0x4033
-+#define MASK_XOR  0xfe00707f
-+#define MATCH_SRL 0x5033
-+#define MASK_SRL  0xfe00707f
-+#define MATCH_SRA 0x40005033
-+#define MASK_SRA  0xfe00707f
-+#define MATCH_OR 0x6033
-+#define MASK_OR  0xfe00707f
-+#define MATCH_AND 0x7033
-+#define MASK_AND  0xfe00707f
-+#define MATCH_ADDIW 0x1b
-+#define MASK_ADDIW  0x707f
-+#define MATCH_SLLIW 0x101b
-+#define MASK_SLLIW  0xfe00707f
-+#define MATCH_SRLIW 0x501b
-+#define MASK_SRLIW  0xfe00707f
-+#define MATCH_SRAIW 0x4000501b
-+#define MASK_SRAIW  0xfe00707f
-+#define MATCH_ADDW 0x3b
-+#define MASK_ADDW  0xfe00707f
-+#define MATCH_SUBW 0x4000003b
-+#define MASK_SUBW  0xfe00707f
-+#define MATCH_SLLW 0x103b
-+#define MASK_SLLW  0xfe00707f
-+#define MATCH_SRLW 0x503b
-+#define MASK_SRLW  0xfe00707f
-+#define MATCH_SRAW 0x4000503b
-+#define MASK_SRAW  0xfe00707f
-+#define MATCH_LB 0x3
-+#define MASK_LB  0x707f
-+#define MATCH_LH 0x1003
-+#define MASK_LH  0x707f
-+#define MATCH_LW 0x2003
-+#define MASK_LW  0x707f
-+#define MATCH_LD 0x3003
-+#define MASK_LD  0x707f
-+#define MATCH_LBU 0x4003
-+#define MASK_LBU  0x707f
-+#define MATCH_LHU 0x5003
-+#define MASK_LHU  0x707f
-+#define MATCH_LWU 0x6003
-+#define MASK_LWU  0x707f
-+#define MATCH_SB 0x23
-+#define MASK_SB  0x707f
-+#define MATCH_SH 0x1023
-+#define MASK_SH  0x707f
-+#define MATCH_SW 0x2023
-+#define MASK_SW  0x707f
-+#define MATCH_SD 0x3023
-+#define MASK_SD  0x707f
-+#define MATCH_FENCE 0xf
-+#define MASK_FENCE  0x707f
-+#define MATCH_FENCE_I 0x100f
-+#define MASK_FENCE_I  0x707f
-+#define MATCH_MUL 0x2000033
-+#define MASK_MUL  0xfe00707f
-+#define MATCH_MULH 0x2001033
-+#define MASK_MULH  0xfe00707f
-+#define MATCH_MULHSU 0x2002033
-+#define MASK_MULHSU  0xfe00707f
-+#define MATCH_MULHU 0x2003033
-+#define MASK_MULHU  0xfe00707f
-+#define MATCH_DIV 0x2004033
-+#define MASK_DIV  0xfe00707f
-+#define MATCH_DIVU 0x2005033
-+#define MASK_DIVU  0xfe00707f
-+#define MATCH_REM 0x2006033
-+#define MASK_REM  0xfe00707f
-+#define MATCH_REMU 0x2007033
-+#define MASK_REMU  0xfe00707f
-+#define MATCH_MULW 0x200003b
-+#define MASK_MULW  0xfe00707f
-+#define MATCH_DIVW 0x200403b
-+#define MASK_DIVW  0xfe00707f
-+#define MATCH_DIVUW 0x200503b
-+#define MASK_DIVUW  0xfe00707f
-+#define MATCH_REMW 0x200603b
-+#define MASK_REMW  0xfe00707f
-+#define MATCH_REMUW 0x200703b
-+#define MASK_REMUW  0xfe00707f
-+#define MATCH_AMOADD_W 0x202f
-+#define MASK_AMOADD_W  0xf800707f
-+#define MATCH_AMOXOR_W 0x2000202f
-+#define MASK_AMOXOR_W  0xf800707f
-+#define MATCH_AMOOR_W 0x4000202f
-+#define MASK_AMOOR_W  0xf800707f
-+#define MATCH_AMOAND_W 0x6000202f
-+#define MASK_AMOAND_W  0xf800707f
-+#define MATCH_AMOMIN_W 0x8000202f
-+#define MASK_AMOMIN_W  0xf800707f
-+#define MATCH_AMOMAX_W 0xa000202f
-+#define MASK_AMOMAX_W  0xf800707f
-+#define MATCH_AMOMINU_W 0xc000202f
-+#define MASK_AMOMINU_W  0xf800707f
-+#define MATCH_AMOMAXU_W 0xe000202f
-+#define MASK_AMOMAXU_W  0xf800707f
-+#define MATCH_AMOSWAP_W 0x800202f
-+#define MASK_AMOSWAP_W  0xf800707f
-+#define MATCH_LR_W 0x1000202f
-+#define MASK_LR_W  0xf9f0707f
-+#define MATCH_SC_W 0x1800202f
-+#define MASK_SC_W  0xf800707f
-+#define MATCH_AMOADD_D 0x302f
-+#define MASK_AMOADD_D  0xf800707f
-+#define MATCH_AMOXOR_D 0x2000302f
-+#define MASK_AMOXOR_D  0xf800707f
-+#define MATCH_AMOOR_D 0x4000302f
-+#define MASK_AMOOR_D  0xf800707f
-+#define MATCH_AMOAND_D 0x6000302f
-+#define MASK_AMOAND_D  0xf800707f
-+#define MATCH_AMOMIN_D 0x8000302f
-+#define MASK_AMOMIN_D  0xf800707f
-+#define MATCH_AMOMAX_D 0xa000302f
-+#define MASK_AMOMAX_D  0xf800707f
-+#define MATCH_AMOMINU_D 0xc000302f
-+#define MASK_AMOMINU_D  0xf800707f
-+#define MATCH_AMOMAXU_D 0xe000302f
-+#define MASK_AMOMAXU_D  0xf800707f
-+#define MATCH_AMOSWAP_D 0x800302f
-+#define MASK_AMOSWAP_D  0xf800707f
-+#define MATCH_LR_D 0x1000302f
-+#define MASK_LR_D  0xf9f0707f
-+#define MATCH_SC_D 0x1800302f
-+#define MASK_SC_D  0xf800707f
-+#define MATCH_SCALL 0x73
-+#define MASK_SCALL  0xffffffff
-+#define MATCH_SBREAK 0x100073
-+#define MASK_SBREAK  0xffffffff
-+#define MATCH_SRET 0x10200073
-+#define MASK_SRET  0xffffffff
-+#define MATCH_SFENCE_VM 0x10400073
-+#define MASK_SFENCE_VM  0xfff07fff
-+#define MATCH_WFI 0x10500073
-+#define MASK_WFI  0xffffffff
-+#define MATCH_CSRRW 0x1073
-+#define MASK_CSRRW  0x707f
-+#define MATCH_CSRRS 0x2073
-+#define MASK_CSRRS  0x707f
-+#define MATCH_CSRRC 0x3073
-+#define MASK_CSRRC  0x707f
-+#define MATCH_CSRRWI 0x5073
-+#define MASK_CSRRWI  0x707f
-+#define MATCH_CSRRSI 0x6073
-+#define MASK_CSRRSI  0x707f
-+#define MATCH_CSRRCI 0x7073
-+#define MASK_CSRRCI  0x707f
-+#define MATCH_FADD_S 0x53
-+#define MASK_FADD_S  0xfe00007f
-+#define MATCH_FSUB_S 0x8000053
-+#define MASK_FSUB_S  0xfe00007f
-+#define MATCH_FMUL_S 0x10000053
-+#define MASK_FMUL_S  0xfe00007f
-+#define MATCH_FDIV_S 0x18000053
-+#define MASK_FDIV_S  0xfe00007f
-+#define MATCH_FSGNJ_S 0x20000053
-+#define MASK_FSGNJ_S  0xfe00707f
-+#define MATCH_FSGNJN_S 0x20001053
-+#define MASK_FSGNJN_S  0xfe00707f
-+#define MATCH_FSGNJX_S 0x20002053
-+#define MASK_FSGNJX_S  0xfe00707f
-+#define MATCH_FMIN_S 0x28000053
-+#define MASK_FMIN_S  0xfe00707f
-+#define MATCH_FMAX_S 0x28001053
-+#define MASK_FMAX_S  0xfe00707f
-+#define MATCH_FSQRT_S 0x58000053
-+#define MASK_FSQRT_S  0xfff0007f
-+#define MATCH_FADD_D 0x2000053
-+#define MASK_FADD_D  0xfe00007f
-+#define MATCH_FSUB_D 0xa000053
-+#define MASK_FSUB_D  0xfe00007f
-+#define MATCH_FMUL_D 0x12000053
-+#define MASK_FMUL_D  0xfe00007f
-+#define MATCH_FDIV_D 0x1a000053
-+#define MASK_FDIV_D  0xfe00007f
-+#define MATCH_FSGNJ_D 0x22000053
-+#define MASK_FSGNJ_D  0xfe00707f
-+#define MATCH_FSGNJN_D 0x22001053
-+#define MASK_FSGNJN_D  0xfe00707f
-+#define MATCH_FSGNJX_D 0x22002053
-+#define MASK_FSGNJX_D  0xfe00707f
-+#define MATCH_FMIN_D 0x2a000053
-+#define MASK_FMIN_D  0xfe00707f
-+#define MATCH_FMAX_D 0x2a001053
-+#define MASK_FMAX_D  0xfe00707f
-+#define MATCH_FCVT_S_D 0x40100053
-+#define MASK_FCVT_S_D  0xfff0007f
-+#define MATCH_FCVT_D_S 0x42000053
-+#define MASK_FCVT_D_S  0xfff0007f
-+#define MATCH_FSQRT_D 0x5a000053
-+#define MASK_FSQRT_D  0xfff0007f
-+#define MATCH_FLE_S 0xa0000053
-+#define MASK_FLE_S  0xfe00707f
-+#define MATCH_FLT_S 0xa0001053
-+#define MASK_FLT_S  0xfe00707f
-+#define MATCH_FEQ_S 0xa0002053
-+#define MASK_FEQ_S  0xfe00707f
-+#define MATCH_FLE_D 0xa2000053
-+#define MASK_FLE_D  0xfe00707f
-+#define MATCH_FLT_D 0xa2001053
-+#define MASK_FLT_D  0xfe00707f
-+#define MATCH_FEQ_D 0xa2002053
-+#define MASK_FEQ_D  0xfe00707f
-+#define MATCH_FCVT_W_S 0xc0000053
-+#define MASK_FCVT_W_S  0xfff0007f
-+#define MATCH_FCVT_WU_S 0xc0100053
-+#define MASK_FCVT_WU_S  0xfff0007f
-+#define MATCH_FCVT_L_S 0xc0200053
-+#define MASK_FCVT_L_S  0xfff0007f
-+#define MATCH_FCVT_LU_S 0xc0300053
-+#define MASK_FCVT_LU_S  0xfff0007f
-+#define MATCH_FMV_X_S 0xe0000053
-+#define MASK_FMV_X_S  0xfff0707f
-+#define MATCH_FCLASS_S 0xe0001053
-+#define MASK_FCLASS_S  0xfff0707f
-+#define MATCH_FCVT_W_D 0xc2000053
-+#define MASK_FCVT_W_D  0xfff0007f
-+#define MATCH_FCVT_WU_D 0xc2100053
-+#define MASK_FCVT_WU_D  0xfff0007f
-+#define MATCH_FCVT_L_D 0xc2200053
-+#define MASK_FCVT_L_D  0xfff0007f
-+#define MATCH_FCVT_LU_D 0xc2300053
-+#define MASK_FCVT_LU_D  0xfff0007f
-+#define MATCH_FMV_X_D 0xe2000053
-+#define MASK_FMV_X_D  0xfff0707f
-+#define MATCH_FCLASS_D 0xe2001053
-+#define MASK_FCLASS_D  0xfff0707f
-+#define MATCH_FCVT_S_W 0xd0000053
-+#define MASK_FCVT_S_W  0xfff0007f
-+#define MATCH_FCVT_S_WU 0xd0100053
-+#define MASK_FCVT_S_WU  0xfff0007f
-+#define MATCH_FCVT_S_L 0xd0200053
-+#define MASK_FCVT_S_L  0xfff0007f
-+#define MATCH_FCVT_S_LU 0xd0300053
-+#define MASK_FCVT_S_LU  0xfff0007f
-+#define MATCH_FMV_S_X 0xf0000053
-+#define MASK_FMV_S_X  0xfff0707f
-+#define MATCH_FCVT_D_W 0xd2000053
-+#define MASK_FCVT_D_W  0xfff0007f
-+#define MATCH_FCVT_D_WU 0xd2100053
-+#define MASK_FCVT_D_WU  0xfff0007f
-+#define MATCH_FCVT_D_L 0xd2200053
-+#define MASK_FCVT_D_L  0xfff0007f
-+#define MATCH_FCVT_D_LU 0xd2300053
-+#define MASK_FCVT_D_LU  0xfff0007f
-+#define MATCH_FMV_D_X 0xf2000053
-+#define MASK_FMV_D_X  0xfff0707f
-+#define MATCH_FLW 0x2007
-+#define MASK_FLW  0x707f
-+#define MATCH_FLD 0x3007
-+#define MASK_FLD  0x707f
-+#define MATCH_FSW 0x2027
-+#define MASK_FSW  0x707f
-+#define MATCH_FSD 0x3027
-+#define MASK_FSD  0x707f
-+#define MATCH_FMADD_S 0x43
-+#define MASK_FMADD_S  0x600007f
-+#define MATCH_FMSUB_S 0x47
-+#define MASK_FMSUB_S  0x600007f
-+#define MATCH_FNMSUB_S 0x4b
-+#define MASK_FNMSUB_S  0x600007f
-+#define MATCH_FNMADD_S 0x4f
-+#define MASK_FNMADD_S  0x600007f
-+#define MATCH_FMADD_D 0x2000043
-+#define MASK_FMADD_D  0x600007f
-+#define MATCH_FMSUB_D 0x2000047
-+#define MASK_FMSUB_D  0x600007f
-+#define MATCH_FNMSUB_D 0x200004b
-+#define MASK_FNMSUB_D  0x600007f
-+#define MATCH_FNMADD_D 0x200004f
-+#define MASK_FNMADD_D  0x600007f
-+#define MATCH_C_ADDI4SPN 0x0
-+#define MASK_C_ADDI4SPN  0xe003
-+#define MATCH_C_FLD 0x2000
-+#define MASK_C_FLD  0xe003
-+#define MATCH_C_LW 0x4000
-+#define MASK_C_LW  0xe003
-+#define MATCH_C_FLW 0x6000
-+#define MASK_C_FLW  0xe003
-+#define MATCH_C_FSD 0xa000
-+#define MASK_C_FSD  0xe003
-+#define MATCH_C_SW 0xc000
-+#define MASK_C_SW  0xe003
-+#define MATCH_C_FSW 0xe000
-+#define MASK_C_FSW  0xe003
-+#define MATCH_C_ADDI 0x1
-+#define MASK_C_ADDI  0xe003
-+#define MATCH_C_JAL 0x2001
-+#define MASK_C_JAL  0xe003
-+#define MATCH_C_LI 0x4001
-+#define MASK_C_LI  0xe003
-+#define MATCH_C_LUI 0x6001
-+#define MASK_C_LUI  0xe003
-+#define MATCH_C_SRLI 0x8001
-+#define MASK_C_SRLI  0xec03
-+#define MATCH_C_SRAI 0x8401
-+#define MASK_C_SRAI  0xec03
-+#define MATCH_C_ANDI 0x8801
-+#define MASK_C_ANDI  0xec03
-+#define MATCH_C_SUB 0x8c01
-+#define MASK_C_SUB  0xfc63
-+#define MATCH_C_XOR 0x8c21
-+#define MASK_C_XOR  0xfc63
-+#define MATCH_C_OR 0x8c41
-+#define MASK_C_OR  0xfc63
-+#define MATCH_C_AND 0x8c61
-+#define MASK_C_AND  0xfc63
-+#define MATCH_C_SUBW 0x9c01
-+#define MASK_C_SUBW  0xfc63
-+#define MATCH_C_ADDW 0x9c21
-+#define MASK_C_ADDW  0xfc63
-+#define MATCH_C_J 0xa001
-+#define MASK_C_J  0xe003
-+#define MATCH_C_BEQZ 0xc001
-+#define MASK_C_BEQZ  0xe003
-+#define MATCH_C_BNEZ 0xe001
-+#define MASK_C_BNEZ  0xe003
-+#define MATCH_C_SLLI 0x2
-+#define MASK_C_SLLI  0xe003
-+#define MATCH_C_FLDSP 0x2002
-+#define MASK_C_FLDSP  0xe003
-+#define MATCH_C_LWSP 0x4002
-+#define MASK_C_LWSP  0xe003
-+#define MATCH_C_FLWSP 0x6002
-+#define MASK_C_FLWSP  0xe003
-+#define MATCH_C_MV 0x8002
-+#define MASK_C_MV  0xf003
-+#define MATCH_C_ADD 0x9002
-+#define MASK_C_ADD  0xf003
-+#define MATCH_C_FSDSP 0xa002
-+#define MASK_C_FSDSP  0xe003
-+#define MATCH_C_SWSP 0xc002
-+#define MASK_C_SWSP  0xe003
-+#define MATCH_C_FSWSP 0xe002
-+#define MASK_C_FSWSP  0xe003
-+#define MATCH_C_NOP 0x1
-+#define MASK_C_NOP  0xffff
-+#define MATCH_C_ADDI16SP 0x6101
-+#define MASK_C_ADDI16SP  0xef83
-+#define MATCH_C_JR 0x8002
-+#define MASK_C_JR  0xf07f
-+#define MATCH_C_JALR 0x9002
-+#define MASK_C_JALR  0xf07f
-+#define MATCH_C_EBREAK 0x9002
-+#define MASK_C_EBREAK  0xffff
-+#define MATCH_C_LD 0x6000
-+#define MASK_C_LD  0xe003
-+#define MATCH_C_SD 0xe000
-+#define MASK_C_SD  0xe003
-+#define MATCH_C_ADDIW 0x2001
-+#define MASK_C_ADDIW  0xe003
-+#define MATCH_C_LDSP 0x6002
-+#define MASK_C_LDSP  0xe003
-+#define MATCH_C_SDSP 0xe002
-+#define MASK_C_SDSP  0xe003
-+#define MATCH_CUSTOM0 0xb
-+#define MASK_CUSTOM0  0x707f
-+#define MATCH_CUSTOM0_RS1 0x200b
-+#define MASK_CUSTOM0_RS1  0x707f
-+#define MATCH_CUSTOM0_RS1_RS2 0x300b
-+#define MASK_CUSTOM0_RS1_RS2  0x707f
-+#define MATCH_CUSTOM0_RD 0x400b
-+#define MASK_CUSTOM0_RD  0x707f
-+#define MATCH_CUSTOM0_RD_RS1 0x600b
-+#define MASK_CUSTOM0_RD_RS1  0x707f
-+#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
-+#define MASK_CUSTOM0_RD_RS1_RS2  0x707f
-+#define MATCH_CUSTOM1 0x2b
-+#define MASK_CUSTOM1  0x707f
-+#define MATCH_CUSTOM1_RS1 0x202b
-+#define MASK_CUSTOM1_RS1  0x707f
-+#define MATCH_CUSTOM1_RS1_RS2 0x302b
-+#define MASK_CUSTOM1_RS1_RS2  0x707f
-+#define MATCH_CUSTOM1_RD 0x402b
-+#define MASK_CUSTOM1_RD  0x707f
-+#define MATCH_CUSTOM1_RD_RS1 0x602b
-+#define MASK_CUSTOM1_RD_RS1  0x707f
-+#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
-+#define MASK_CUSTOM1_RD_RS1_RS2  0x707f
-+#define MATCH_CUSTOM2 0x5b
-+#define MASK_CUSTOM2  0x707f
-+#define MATCH_CUSTOM2_RS1 0x205b
-+#define MASK_CUSTOM2_RS1  0x707f
-+#define MATCH_CUSTOM2_RS1_RS2 0x305b
-+#define MASK_CUSTOM2_RS1_RS2  0x707f
-+#define MATCH_CUSTOM2_RD 0x405b
-+#define MASK_CUSTOM2_RD  0x707f
-+#define MATCH_CUSTOM2_RD_RS1 0x605b
-+#define MASK_CUSTOM2_RD_RS1  0x707f
-+#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
-+#define MASK_CUSTOM2_RD_RS1_RS2  0x707f
-+#define MATCH_CUSTOM3 0x7b
-+#define MASK_CUSTOM3  0x707f
-+#define MATCH_CUSTOM3_RS1 0x207b
-+#define MASK_CUSTOM3_RS1  0x707f
-+#define MATCH_CUSTOM3_RS1_RS2 0x307b
-+#define MASK_CUSTOM3_RS1_RS2  0x707f
-+#define MATCH_CUSTOM3_RD 0x407b
-+#define MASK_CUSTOM3_RD  0x707f
-+#define MATCH_CUSTOM3_RD_RS1 0x607b
-+#define MASK_CUSTOM3_RD_RS1  0x707f
-+#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
-+#define MASK_CUSTOM3_RD_RS1_RS2  0x707f
-+#define CSR_FFLAGS 0x1
-+#define CSR_FRM 0x2
-+#define CSR_FCSR 0x3
-+#define CSR_CYCLE 0xc00
-+#define CSR_TIME 0xc01
-+#define CSR_INSTRET 0xc02
-+#define CSR_STATS 0xc0
-+#define CSR_UARCH0 0xcc0
-+#define CSR_UARCH1 0xcc1
-+#define CSR_UARCH2 0xcc2
-+#define CSR_UARCH3 0xcc3
-+#define CSR_UARCH4 0xcc4
-+#define CSR_UARCH5 0xcc5
-+#define CSR_UARCH6 0xcc6
-+#define CSR_UARCH7 0xcc7
-+#define CSR_UARCH8 0xcc8
-+#define CSR_UARCH9 0xcc9
-+#define CSR_UARCH10 0xcca
-+#define CSR_UARCH11 0xccb
-+#define CSR_UARCH12 0xccc
-+#define CSR_UARCH13 0xccd
-+#define CSR_UARCH14 0xcce
-+#define CSR_UARCH15 0xccf
-+#define CSR_SSTATUS 0x100
-+#define CSR_SIE 0x104
-+#define CSR_STVEC 0x105
-+#define CSR_SSCRATCH 0x140
-+#define CSR_SEPC 0x141
-+#define CSR_SCAUSE 0x142
-+#define CSR_SBADADDR 0x143
-+#define CSR_SIP 0x144
-+#define CSR_SPTBR 0x180
-+#define CSR_SASID 0x181
-+#define CSR_SCYCLE 0xd00
-+#define CSR_STIME 0xd01
-+#define CSR_SINSTRET 0xd02
-+#define CSR_MSTATUS 0x300
-+#define CSR_MEDELEG 0x302
-+#define CSR_MIDELEG 0x303
-+#define CSR_MIE 0x304
-+#define CSR_MTVEC 0x305
-+#define CSR_MTIMECMP 0x321
-+#define CSR_MSCRATCH 0x340
-+#define CSR_MEPC 0x341
-+#define CSR_MCAUSE 0x342
-+#define CSR_MBADADDR 0x343
-+#define CSR_MIP 0x344
-+#define CSR_MIPI 0x345
-+#define CSR_MUCOUNTEREN 0x310
-+#define CSR_MSCOUNTEREN 0x311
-+#define CSR_MUCYCLE_DELTA 0x700
-+#define CSR_MUTIME_DELTA 0x701
-+#define CSR_MUINSTRET_DELTA 0x702
-+#define CSR_MSCYCLE_DELTA 0x704
-+#define CSR_MSTIME_DELTA 0x705
-+#define CSR_MSINSTRET_DELTA 0x706
-+#define CSR_MCYCLE 0xf00
-+#define CSR_MTIME 0xf01
-+#define CSR_MINSTRET 0xf02
-+#define CSR_MISA 0xf10
-+#define CSR_MVENDORID 0xf11
-+#define CSR_MARCHID 0xf12
-+#define CSR_MIMPID 0xf13
-+#define CSR_MCFGADDR 0xf14
-+#define CSR_MHARTID 0xf15
-+#define CSR_MTOHOST 0x7c0
-+#define CSR_MFROMHOST 0x7c1
-+#define CSR_MRESET 0x7c2
-+#define CSR_CYCLEH 0xc80
-+#define CSR_TIMEH 0xc81
-+#define CSR_INSTRETH 0xc82
-+#define CSR_MTIMECMPH 0x361
-+#define CSR_MUCYCLE_DELTAH 0x780
-+#define CSR_MUTIME_DELTAH 0x781
-+#define CSR_MUINSTRET_DELTAH 0x782
-+#define CSR_MSCYCLE_DELTAH 0x784
-+#define CSR_MSTIME_DELTAH 0x785
-+#define CSR_MSINSTRET_DELTAH 0x786
-+#define CSR_MCYCLEH 0xf80
-+#define CSR_MTIMEH 0xf81
-+#define CSR_MINSTRETH 0xf82
-+#define CAUSE_MISALIGNED_FETCH 0x0
-+#define CAUSE_FAULT_FETCH 0x1
-+#define CAUSE_ILLEGAL_INSTRUCTION 0x2
-+#define CAUSE_BREAKPOINT 0x3
-+#define CAUSE_MISALIGNED_LOAD 0x4
-+#define CAUSE_FAULT_LOAD 0x5
-+#define CAUSE_MISALIGNED_STORE 0x6
-+#define CAUSE_FAULT_STORE 0x7
-+#define CAUSE_USER_ECALL 0x8
-+#define CAUSE_SUPERVISOR_ECALL 0x9
-+#define CAUSE_HYPERVISOR_ECALL 0xa
-+#define CAUSE_MACHINE_ECALL 0xb
-+#endif
-+#ifdef DECLARE_INSN
-+DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
-+DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
-+DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
-+DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
-+DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
-+DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
-+DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
-+DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
-+DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
-+DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
-+DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
-+DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
-+DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
-+DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
-+DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
-+DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
-+DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
-+DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
-+DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
-+DECLARE_INSN(eret, MATCH_ERET, MASK_ERET)
-+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
-+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
-+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
-+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
-+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
-+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
-+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
-+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
-+DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
-+DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
-+DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
-+DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
-+DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
-+DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
-+DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
-+DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
-+DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
-+DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
-+DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
-+DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
-+DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
-+DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
-+DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
-+DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
-+DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
-+DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
-+DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
-+DECLARE_INSN(or, MATCH_OR, MASK_OR)
-+DECLARE_INSN(and, MATCH_AND, MASK_AND)
-+DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
-+DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
-+DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
-+DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
-+DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
-+DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
-+DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
-+DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
-+DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
-+DECLARE_INSN(lb, MATCH_LB, MASK_LB)
-+DECLARE_INSN(lh, MATCH_LH, MASK_LH)
-+DECLARE_INSN(lw, MATCH_LW, MASK_LW)
-+DECLARE_INSN(ld, MATCH_LD, MASK_LD)
-+DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
-+DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
-+DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
-+DECLARE_INSN(sb, MATCH_SB, MASK_SB)
-+DECLARE_INSN(sh, MATCH_SH, MASK_SH)
-+DECLARE_INSN(sw, MATCH_SW, MASK_SW)
-+DECLARE_INSN(sd, MATCH_SD, MASK_SD)
-+DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
-+DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
-+DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
-+DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
-+DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
-+DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
-+DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
-+DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
-+DECLARE_INSN(rem, MATCH_REM, MASK_REM)
-+DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
-+DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
-+DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
-+DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
-+DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
-+DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
-+DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
-+DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
-+DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
-+DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
-+DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
-+DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
-+DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
-+DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
-+DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
-+DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
-+DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
-+DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
-+DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
-+DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
-+DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
-+DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
-+DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
-+DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
-+DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
-+DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
-+DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
-+DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
-+DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
-+DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
-+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
-+DECLARE_INSN(sfence_vm, MATCH_SFENCE_VM, MASK_SFENCE_VM)
-+DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
-+DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
-+DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
-+DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
-+DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
-+DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
-+DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
-+DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
-+DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
-+DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
-+DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
-+DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
-+DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
-+DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
-+DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
-+DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
-+DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
-+DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
-+DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
-+DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
-+DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
-+DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
-+DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
-+DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
-+DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
-+DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
-+DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
-+DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
-+DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
-+DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
-+DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
-+DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
-+DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
-+DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
-+DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
-+DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
-+DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
-+DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
-+DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
-+DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
-+DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
-+DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
-+DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
-+DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
-+DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
-+DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
-+DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
-+DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
-+DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
-+DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
-+DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
-+DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
-+DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
-+DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
-+DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
-+DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
-+DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
-+DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
-+DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
-+DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
-+DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
-+DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
-+DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
-+DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
-+DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
-+DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
-+DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
-+DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
-+DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
-+DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN)
-+DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
-+DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
-+DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW)
-+DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD)
-+DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
-+DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW)
-+DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
-+DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
-+DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
-+DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI)
-+DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
-+DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
-+DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI)
-+DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
-+DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR)
-+DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR)
-+DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND)
-+DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW)
-+DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
-+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
-+DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
-+DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
-+DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
-+DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP)
-+DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
-+DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP)
-+DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV)
-+DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
-+DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP)
-+DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
-+DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP)
-+DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP)
-+DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP)
-+DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
-+DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
-+DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK)
-+DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
-+DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
-+DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
-+DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
-+DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
-+DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
-+DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
-+DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
-+DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
-+DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
-+DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
-+DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
-+DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
-+DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
-+DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
-+DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
-+DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
-+DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
-+DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
-+DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
-+DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
-+DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
-+DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
-+DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
-+DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
-+DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
-+DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
-+DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
-+DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
-+#endif
-+#ifdef DECLARE_CSR
-+DECLARE_CSR(fflags, CSR_FFLAGS)
-+DECLARE_CSR(frm, CSR_FRM)
-+DECLARE_CSR(fcsr, CSR_FCSR)
-+DECLARE_CSR(cycle, CSR_CYCLE)
-+DECLARE_CSR(time, CSR_TIME)
-+DECLARE_CSR(instret, CSR_INSTRET)
-+DECLARE_CSR(stats, CSR_STATS)
-+DECLARE_CSR(uarch0, CSR_UARCH0)
-+DECLARE_CSR(uarch1, CSR_UARCH1)
-+DECLARE_CSR(uarch2, CSR_UARCH2)
-+DECLARE_CSR(uarch3, CSR_UARCH3)
-+DECLARE_CSR(uarch4, CSR_UARCH4)
-+DECLARE_CSR(uarch5, CSR_UARCH5)
-+DECLARE_CSR(uarch6, CSR_UARCH6)
-+DECLARE_CSR(uarch7, CSR_UARCH7)
-+DECLARE_CSR(uarch8, CSR_UARCH8)
-+DECLARE_CSR(uarch9, CSR_UARCH9)
-+DECLARE_CSR(uarch10, CSR_UARCH10)
-+DECLARE_CSR(uarch11, CSR_UARCH11)
-+DECLARE_CSR(uarch12, CSR_UARCH12)
-+DECLARE_CSR(uarch13, CSR_UARCH13)
-+DECLARE_CSR(uarch14, CSR_UARCH14)
-+DECLARE_CSR(uarch15, CSR_UARCH15)
-+DECLARE_CSR(sstatus, CSR_SSTATUS)
-+DECLARE_CSR(sie, CSR_SIE)
-+DECLARE_CSR(stvec, CSR_STVEC)
-+DECLARE_CSR(sscratch, CSR_SSCRATCH)
-+DECLARE_CSR(sepc, CSR_SEPC)
-+DECLARE_CSR(scause, CSR_SCAUSE)
-+DECLARE_CSR(sbadaddr, CSR_SBADADDR)
-+DECLARE_CSR(sip, CSR_SIP)
-+DECLARE_CSR(sptbr, CSR_SPTBR)
-+DECLARE_CSR(sasid, CSR_SASID)
-+DECLARE_CSR(scycle, CSR_SCYCLE)
-+DECLARE_CSR(stime, CSR_STIME)
-+DECLARE_CSR(sinstret, CSR_SINSTRET)
-+DECLARE_CSR(mstatus, CSR_MSTATUS)
-+DECLARE_CSR(medeleg, CSR_MEDELEG)
-+DECLARE_CSR(mideleg, CSR_MIDELEG)
-+DECLARE_CSR(mie, CSR_MIE)
-+DECLARE_CSR(mtvec, CSR_MTVEC)
-+DECLARE_CSR(mtimecmp, CSR_MTIMECMP)
-+DECLARE_CSR(mscratch, CSR_MSCRATCH)
-+DECLARE_CSR(mepc, CSR_MEPC)
-+DECLARE_CSR(mcause, CSR_MCAUSE)
-+DECLARE_CSR(mbadaddr, CSR_MBADADDR)
-+DECLARE_CSR(mip, CSR_MIP)
-+DECLARE_CSR(mipi, CSR_MIPI)
-+DECLARE_CSR(mucounteren, CSR_MUCOUNTEREN)
-+DECLARE_CSR(mscounteren, CSR_MSCOUNTEREN)
-+DECLARE_CSR(mucycle_delta, CSR_MUCYCLE_DELTA)
-+DECLARE_CSR(mutime_delta, CSR_MUTIME_DELTA)
-+DECLARE_CSR(muinstret_delta, CSR_MUINSTRET_DELTA)
-+DECLARE_CSR(mscycle_delta, CSR_MSCYCLE_DELTA)
-+DECLARE_CSR(mstime_delta, CSR_MSTIME_DELTA)
-+DECLARE_CSR(msinstret_delta, CSR_MSINSTRET_DELTA)
-+DECLARE_CSR(mcycle, CSR_MCYCLE)
-+DECLARE_CSR(mtime, CSR_MTIME)
-+DECLARE_CSR(minstret, CSR_MINSTRET)
-+DECLARE_CSR(misa, CSR_MISA)
-+DECLARE_CSR(mvendorid, CSR_MVENDORID)
-+DECLARE_CSR(marchid, CSR_MARCHID)
-+DECLARE_CSR(mimpid, CSR_MIMPID)
-+DECLARE_CSR(mcfgaddr, CSR_MCFGADDR)
-+DECLARE_CSR(mhartid, CSR_MHARTID)
-+DECLARE_CSR(mtohost, CSR_MTOHOST)
-+DECLARE_CSR(mfromhost, CSR_MFROMHOST)
-+DECLARE_CSR(mreset, CSR_MRESET)
-+DECLARE_CSR(cycleh, CSR_CYCLEH)
-+DECLARE_CSR(timeh, CSR_TIMEH)
-+DECLARE_CSR(instreth, CSR_INSTRETH)
-+DECLARE_CSR(mtimecmph, CSR_MTIMECMPH)
-+DECLARE_CSR(mucycle_deltah, CSR_MUCYCLE_DELTAH)
-+DECLARE_CSR(mutime_deltah, CSR_MUTIME_DELTAH)
-+DECLARE_CSR(muinstret_deltah, CSR_MUINSTRET_DELTAH)
-+DECLARE_CSR(mscycle_deltah, CSR_MSCYCLE_DELTAH)
-+DECLARE_CSR(mstime_deltah, CSR_MSTIME_DELTAH)
-+DECLARE_CSR(msinstret_deltah, CSR_MSINSTRET_DELTAH)
-+DECLARE_CSR(mcycleh, CSR_MCYCLEH)
-+DECLARE_CSR(mtimeh, CSR_MTIMEH)
-+DECLARE_CSR(minstreth, CSR_MINSTRETH)
-+#endif
-+#ifdef DECLARE_CAUSE
-+DECLARE_CAUSE("misaligned fetch", CAUSE_MISALIGNED_FETCH)
-+DECLARE_CAUSE("fault fetch", CAUSE_FAULT_FETCH)
-+DECLARE_CAUSE("illegal instruction", CAUSE_ILLEGAL_INSTRUCTION)
-+DECLARE_CAUSE("breakpoint", CAUSE_BREAKPOINT)
-+DECLARE_CAUSE("misaligned load", CAUSE_MISALIGNED_LOAD)
-+DECLARE_CAUSE("fault load", CAUSE_FAULT_LOAD)
-+DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE)
-+DECLARE_CAUSE("fault store", CAUSE_FAULT_STORE)
-+DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL)
-+DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL)
-+DECLARE_CAUSE("hypervisor_ecall", CAUSE_HYPERVISOR_ECALL)
-+DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL)
-+#endif
-diff -urN empty/ld/emulparams/elf32lriscv-defs.sh binutils-2.26.1/ld/emulparams/elf32lriscv-defs.sh
---- empty/ld/emulparams/elf32lriscv-defs.sh	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/ld/emulparams/elf32lriscv-defs.sh	2016-04-03 10:33:12.065459702 +0800
-@@ -0,0 +1,48 @@
-+# This is an ELF platform.
-+SCRIPT_NAME=elf
-+ARCH=riscv
-+OUTPUT_FORMAT="elf32-littleriscv"
-+NO_REL_RELOCS=yes
-+
-+TEMPLATE_NAME=elf32
-+EXTRA_EM_FILE=riscvelf
-+
-+case x"$EMULATION_NAME" in
-+xelf32*) ELFSIZE=32; LIBPATH_SUFFIX=32 ;;
-+xelf64*) ELFSIZE=64; LIBPATH_SUFFIX=   ;;
-+x) ;;
-+*) echo $0: unhandled emulation $EMULATION_NAME >&2; exit 1 ;;
-+esac
-+
-+if test `echo "$host" | sed -e s/64//` = `echo "$target" | sed -e s/64//`; then
-+  case " $EMULATION_LIBPATH " in
-+    *" ${EMULATION_NAME} "*)
-+      NATIVE=yes
-+      ;;
-+  esac
-+fi
-+
-+GENERATE_SHLIB_SCRIPT=yes
-+GENERATE_PIE_SCRIPT=yes
-+
-+TEXT_START_ADDR=0x10000
-+MAXPAGESIZE="CONSTANT (MAXPAGESIZE)"
-+COMMONPAGESIZE="CONSTANT (COMMONPAGESIZE)"
-+
-+SDATA_START_SYMBOLS="_gp = . + 0x800;
-+    *(.srodata.cst16) *(.srodata.cst8) *(.srodata.cst4) *(.srodata.cst2) *(.srodata .srodata.*)"
-+
-+# Place the data section before text section.  This enables more compact
-+# global variable access for RVC code via linker relaxation.
-+INITIAL_READONLY_SECTIONS="
-+  .data           : { *(.data) *(.data.*) *(.gnu.linkonce.d.*) }
-+  .rodata         : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r.*) }
-+  .srodata        : { ${SDATA_START_SYMBOLS} }
-+  .sdata          : { *(.sdata .sdata.* .gnu.linkonce.s.*) }
-+  .sbss           : { *(.dynsbss) *(.sbss .sbss.* .gnu.linkonce.sb.*) }
-+  .bss            : { *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(COMMON) }
-+  . = ALIGN(${SEGMENT_SIZE}) + (. & (${MAXPAGESIZE} - 1));"
-+INITIAL_READONLY_SECTIONS=".interp         : { *(.interp) } ${CREATE_PIE-${INITIAL_READONLY_SECTIONS}}"
-+INITIAL_READONLY_SECTIONS="${RELOCATING+${CREATE_SHLIB-${INITIAL_READONLY_SECTIONS}}}"
-+
-+SDATA_START_SYMBOLS="${CREATE_PIE+${SDATA_START_SYMBOLS}}"
-diff -urN empty/ld/emulparams/elf32lriscv.sh binutils-2.26.1/ld/emulparams/elf32lriscv.sh
---- empty/ld/emulparams/elf32lriscv.sh	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/ld/emulparams/elf32lriscv.sh	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,2 @@
-+. ${srcdir}/emulparams/elf32lriscv-defs.sh
-+OUTPUT_FORMAT="elf32-littleriscv"
-diff -urN empty/ld/emulparams/elf64lriscv-defs.sh binutils-2.26.1/ld/emulparams/elf64lriscv-defs.sh
---- empty/ld/emulparams/elf64lriscv-defs.sh	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/ld/emulparams/elf64lriscv-defs.sh	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1 @@
-+. ${srcdir}/emulparams/elf32lriscv-defs.sh
-diff -urN empty/ld/emulparams/elf64lriscv.sh binutils-2.26.1/ld/emulparams/elf64lriscv.sh
---- empty/ld/emulparams/elf64lriscv.sh	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/ld/emulparams/elf64lriscv.sh	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,2 @@
-+. ${srcdir}/emulparams/elf64lriscv-defs.sh
-+OUTPUT_FORMAT="elf64-littleriscv"
-diff -urN empty/ld/emultempl/riscvelf.em binutils-2.26.1/ld/emultempl/riscvelf.em
---- empty/ld/emultempl/riscvelf.em	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/ld/emultempl/riscvelf.em	2016-04-03 10:33:12.065459702 +0800
-@@ -0,0 +1,68 @@
-+# This shell script emits a C file. -*- C -*-
-+#   Copyright 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
-+#
-+# This file is part of the GNU Binutils.
-+#
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 3 of the License, or
-+# (at your option) any later version.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program; if not, write to the Free Software
-+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
-+# MA 02110-1301, USA.
-+
-+fragment <<EOF
-+
-+#include "ldmain.h"
-+#include "ldctor.h"
-+#include "elf/riscv.h"
-+#include "elfxx-riscv.h"
-+
-+static void
-+riscv_elf_before_allocation (void)
-+{
-+  gld${EMULATION_NAME}_before_allocation ();
-+
-+  if (link_info.discard == discard_sec_merge)
-+    link_info.discard = discard_l;
-+
-+  /* We always need at least some relaxation to handle code alignment.  */
-+  if (RELAXATION_DISABLED_BY_USER)
-+    TARGET_ENABLE_RELAXATION;
-+  else
-+    ENABLE_RELAXATION;
-+
-+  link_info.relax_pass = 2;
-+}
-+
-+static void
-+gld${EMULATION_NAME}_after_allocation (void)
-+{
-+  int need_layout = 0;
-+
-+  /* Don't attempt to discard unused .eh_frame sections until the final link,
-+     as we can't reliably tell if they're used until after relaxation.  */
-+  if (!bfd_link_relocatable (&link_info))
-+    {
-+      need_layout = bfd_elf_discard_info (link_info.output_bfd, &link_info);
-+      if (need_layout < 0)
-+	{
-+	  einfo ("%X%P: .eh_frame/.stab edit: %E\n");
-+	  return;
-+	}
-+    }
-+
-+  gld${EMULATION_NAME}_map_segments (need_layout);
-+}
-+
-+EOF
-+
-+LDEMUL_BEFORE_ALLOCATION=riscv_elf_before_allocation
-+LDEMUL_AFTER_ALLOCATION=gld${EMULATION_NAME}_after_allocation
-diff -urN empty/opcodes/riscv-dis.c binutils-2.26.1/opcodes/riscv-dis.c
---- empty/opcodes/riscv-dis.c	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/opcodes/riscv-dis.c	2016-04-03 10:33:12.065459702 +0800
-@@ -0,0 +1,521 @@
-+/* RISC-V disassembler
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target.
-+
-+   This file is part of the GNU opcodes library.
-+
-+   This library is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3, or (at your option)
-+   any later version.
-+
-+   It is distributed in the hope that it will be useful, but WITHOUT
-+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
-+   License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+#include "sysdep.h"
-+#include "dis-asm.h"
-+#include "libiberty.h"
-+#include "opcode/riscv.h"
-+#include "opintl.h"
-+#include "elf-bfd.h"
-+#include "elf/riscv.h"
-+
-+#include <stdint.h>
-+#include <ctype.h>
-+
-+struct riscv_private_data
-+{
-+  bfd_vma gp;
-+  bfd_vma print_addr;
-+  bfd_vma hi_addr[OP_MASK_RD + 1];
-+};
-+
-+static const char * const *riscv_gpr_names;
-+static const char * const *riscv_fpr_names;
-+
-+/* Other options */
-+static int no_aliases;	/* If set disassemble as most general inst.  */
-+
-+static void
-+set_default_riscv_dis_options (void)
-+{
-+  riscv_gpr_names = riscv_gpr_names_abi;
-+  riscv_fpr_names = riscv_fpr_names_abi;
-+  no_aliases = 0;
-+}
-+
-+static void
-+parse_riscv_dis_option (const char *option)
-+{
-+  if (CONST_STRNEQ (option, "no-aliases"))
-+    no_aliases = 1;
-+  else if (CONST_STRNEQ (option, "numeric"))
-+    {
-+      riscv_gpr_names = riscv_gpr_names_numeric;
-+      riscv_fpr_names = riscv_fpr_names_numeric;
-+    }
-+  else
-+    {
-+      /* Invalid option.  */
-+      fprintf (stderr, _("Unrecognized disassembler option: %s\n"), option);
-+    }
-+}
-+
-+static void
-+parse_riscv_dis_options (const char *opts_in)
-+{
-+  char *opts = xstrdup (opts_in), *opt = opts, *opt_end = opts;
-+
-+  set_default_riscv_dis_options ();
-+
-+  for ( ; opt_end != NULL; opt = opt_end + 1)
-+    {
-+      if ((opt_end = strchr (opt, ',')) != NULL)
-+	*opt_end = 0;
-+      parse_riscv_dis_option (opt);
-+    }
-+
-+  free (opts);
-+}
-+
-+/* Print one argument from an array.  */
-+
-+static void
-+arg_print (struct disassemble_info *info, unsigned long val,
-+	   const char* const* array, size_t size)
-+{
-+  const char *s = val >= size || array[val] == NULL ? "unknown" : array[val];
-+  (*info->fprintf_func) (info->stream, "%s", s);
-+}
-+
-+static void
-+maybe_print_address (struct riscv_private_data *pd, int base_reg, int offset)
-+{
-+  if (pd->hi_addr[base_reg] != (bfd_vma)-1)
-+    {
-+      pd->print_addr = pd->hi_addr[base_reg] + offset;
-+      pd->hi_addr[base_reg] = -1;
-+    }
-+  else if (base_reg == X_GP && pd->gp != (bfd_vma)-1)
-+    pd->print_addr = pd->gp + offset;
-+  else if (base_reg == X_TP || base_reg == 0)
-+    pd->print_addr = offset;
-+}
-+
-+/* Print insn arguments for 32/64-bit code.  */
-+
-+static void
-+print_insn_args (const char *d, insn_t l, bfd_vma pc, disassemble_info *info)
-+{
-+  struct riscv_private_data *pd = info->private_data;
-+  int rs1 = (l >> OP_SH_RS1) & OP_MASK_RS1;
-+  int rd = (l >> OP_SH_RD) & OP_MASK_RD;
-+  fprintf_ftype print = info->fprintf_func;
-+
-+  if (*d != '\0')
-+    print (info->stream, "\t");
-+
-+  for (; *d != '\0'; d++)
-+    {
-+      switch (*d)
-+	{
-+	/* Xcustom */
-+	case '^':
-+	  switch (*++d)
-+	    {
-+	    case 'd':
-+	      print (info->stream, "%d", rd);
-+	      break;
-+	    case 's':
-+	      print (info->stream, "%d", rs1);
-+	      break;
-+	    case 't':
-+	      print (info->stream, "%d", (int) EXTRACT_OPERAND (RS2, l));
-+	      break;
-+	    case 'j':
-+	      print (info->stream, "%d", (int) EXTRACT_OPERAND (CUSTOM_IMM, l));
-+	      break;
-+	    }
-+	  break;
-+
-+	case 'C': /* RVC */
-+	  switch (*++d)
-+	    {
-+	    case 's': /* RS1 x8-x15 */
-+	    case 'w': /* RS1 x8-x15 */
-+	      print (info->stream, "%s",
-+		     riscv_gpr_names[EXTRACT_OPERAND (CRS1S, l) + 8]);
-+	      break;
-+	    case 't': /* RS2 x8-x15 */
-+	    case 'x': /* RS2 x8-x15 */
-+	      print (info->stream, "%s",
-+		     riscv_gpr_names[EXTRACT_OPERAND (CRS2S, l) + 8]);
-+	      break;
-+	    case 'U': /* RS1, constrained to equal RD */
-+	      print (info->stream, "%s", riscv_gpr_names[rd]);
-+	      break;
-+	    case 'c': /* RS1, constrained to equal sp */
-+	      print (info->stream, "%s", riscv_gpr_names[X_SP]);
-+	      break;
-+	    case 'V': /* RS2 */
-+	      print (info->stream, "%s",
-+		     riscv_gpr_names[EXTRACT_OPERAND (CRS2, l)]);
-+	      break;
-+	    case 'i':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_SIMM3 (l));
-+	      break;
-+	    case 'j':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_IMM (l));
-+	      break;
-+	    case 'k':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_LW_IMM (l));
-+	      break;
-+	    case 'l':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_LD_IMM (l));
-+	      break;
-+	    case 'm':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_LWSP_IMM (l));
-+	      break;
-+	    case 'n':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_LDSP_IMM (l));
-+	      break;
-+	    case 'K':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_ADDI4SPN_IMM (l));
-+	      break;
-+	    case 'L':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_ADDI16SP_IMM (l));
-+	      break;
-+	    case 'M':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_SWSP_IMM (l));
-+	      break;
-+	    case 'N':
-+	      print (info->stream, "%d", (int)EXTRACT_RVC_SDSP_IMM (l));
-+	      break;
-+	    case 'p':
-+	      info->target = EXTRACT_RVC_B_IMM (l) + pc;
-+	      (*info->print_address_func) (info->target, info);
-+	      break;
-+	    case 'a':
-+	      info->target = EXTRACT_RVC_J_IMM (l) + pc;
-+	      (*info->print_address_func) (info->target, info);
-+	      break;
-+	    case 'u':
-+	      print (info->stream, "0x%x",
-+		     (int) (EXTRACT_RVC_IMM (l) & (RISCV_BIGIMM_REACH-1)));
-+	      break;
-+	    case '>':
-+	      print (info->stream, "0x%x", (int) EXTRACT_RVC_IMM (l) & 0x3f);
-+	      break;
-+	    case '<':
-+	      print (info->stream, "0x%x", (int) EXTRACT_RVC_IMM (l) & 0x1f);
-+	      break;
-+	    case 'T': /* floating-point RS2 */
-+	      print (info->stream, "%s",
-+		     riscv_fpr_names[EXTRACT_OPERAND (CRS2, l)]);
-+	      break;
-+	    case 'D': /* floating-point RS2 x8-x15 */
-+	      print (info->stream, "%s",
-+		     riscv_fpr_names[EXTRACT_OPERAND (CRS2S, l) + 8]);
-+	      break;
-+	    }
-+	  break;
-+
-+	case ',':
-+	case '(':
-+	case ')':
-+	case '[':
-+	case ']':
-+	  print (info->stream, "%c", *d);
-+	  break;
-+
-+	case '0':
-+	  /* Only print constant 0 if it is the last argument */
-+	  if (!d[1])
-+	    print (info->stream, "0");
-+	  break;
-+
-+	case 'b':
-+	case 's':
-+	  print (info->stream, "%s", riscv_gpr_names[rs1]);
-+	  break;
-+
-+	case 't':
-+	  print (info->stream, "%s",
-+		 riscv_gpr_names[EXTRACT_OPERAND (RS2, l)]);
-+	  break;
-+
-+	case 'u':
-+	  print (info->stream, "0x%x",
-+		 (unsigned) EXTRACT_UTYPE_IMM (l) >> RISCV_IMM_BITS);
-+	  break;
-+
-+	case 'm':
-+	  arg_print (info, EXTRACT_OPERAND (RM, l),
-+		     riscv_rm, ARRAY_SIZE (riscv_rm));
-+	  break;
-+
-+	case 'P':
-+	  arg_print (info, EXTRACT_OPERAND (PRED, l),
-+		     riscv_pred_succ, ARRAY_SIZE (riscv_pred_succ));
-+	  break;
-+
-+	case 'Q':
-+	  arg_print (info, EXTRACT_OPERAND (SUCC, l),
-+		     riscv_pred_succ, ARRAY_SIZE (riscv_pred_succ));
-+	  break;
-+
-+	case 'o':
-+	  maybe_print_address (pd, rs1, EXTRACT_ITYPE_IMM (l));
-+	case 'j':
-+	  if (((l & MASK_ADDI) == MATCH_ADDI && rs1 != 0)
-+	      || (l & MASK_JALR) == MATCH_JALR)
-+	    maybe_print_address (pd, rs1, EXTRACT_ITYPE_IMM (l));
-+	  print (info->stream, "%d", (int) EXTRACT_ITYPE_IMM (l));
-+	  break;
-+
-+	case 'q':
-+	  maybe_print_address (pd, rs1, EXTRACT_STYPE_IMM (l));
-+	  print (info->stream, "%d", (int) EXTRACT_STYPE_IMM (l));
-+	  break;
-+
-+	case 'a':
-+	  info->target = EXTRACT_UJTYPE_IMM (l) + pc;
-+	  (*info->print_address_func) (info->target, info);
-+	  break;
-+
-+	case 'p':
-+	  info->target = EXTRACT_SBTYPE_IMM (l) + pc;
-+	  (*info->print_address_func) (info->target, info);
-+	  break;
-+
-+	case 'd':
-+	  if ((l & MASK_AUIPC) == MATCH_AUIPC)
-+	    pd->hi_addr[rd] = pc + EXTRACT_UTYPE_IMM (l);
-+	  else if ((l & MASK_LUI) == MATCH_LUI)
-+	    pd->hi_addr[rd] = EXTRACT_UTYPE_IMM (l);
-+	  else if ((l & MASK_C_LUI) == MATCH_C_LUI)
-+	    pd->hi_addr[rd] = EXTRACT_RVC_LUI_IMM (l);
-+	  print (info->stream, "%s", riscv_gpr_names[rd]);
-+	  break;
-+
-+	case 'z':
-+	  print (info->stream, "%s", riscv_gpr_names[0]);
-+	  break;
-+
-+	case '>':
-+	  print (info->stream, "0x%x", (int) EXTRACT_OPERAND (SHAMT, l));
-+	  break;
-+
-+	case '<':
-+	  print (info->stream, "0x%x", (int) EXTRACT_OPERAND (SHAMTW, l));
-+	  break;
-+
-+	case 'S':
-+	case 'U':
-+	  print (info->stream, "%s", riscv_fpr_names[rs1]);
-+	  break;
-+
-+	case 'T':
-+	  print (info->stream, "%s", riscv_fpr_names[EXTRACT_OPERAND (RS2, l)]);
-+	  break;
-+
-+	case 'D':
-+	  print (info->stream, "%s", riscv_fpr_names[rd]);
-+	  break;
-+
-+	case 'R':
-+	  print (info->stream, "%s", riscv_fpr_names[EXTRACT_OPERAND (RS3, l)]);
-+	  break;
-+
-+	case 'E':
-+	  {
-+	    const char* csr_name = NULL;
-+	    unsigned int csr = EXTRACT_OPERAND (CSR, l);
-+	    switch (csr)
-+	      {
-+	      #define DECLARE_CSR(name, num) case num: csr_name = #name; break;
-+	      #include "opcode/riscv-opc.h"
-+	      #undef DECLARE_CSR
-+	      }
-+	    if (csr_name)
-+	      print (info->stream, "%s", csr_name);
-+	    else
-+	      print (info->stream, "0x%x", csr);
-+	    break;
-+	  }
-+
-+	case 'Z':
-+	  print (info->stream, "%d", rs1);
-+	  break;
-+
-+	default:
-+	  /* xgettext:c-format */
-+	  print (info->stream, _("# internal error, undefined modifier (%c)"),
-+		 *d);
-+	  return;
-+	}
-+    }
-+}
-+
-+/* Print the RISC-V instruction at address MEMADDR in debugged memory,
-+   on using INFO.  Returns length of the instruction, in bytes.
-+   BIGENDIAN must be 1 if this is big-endian code, 0 if
-+   this is little-endian code.  */
-+
-+static int
-+riscv_disassemble_insn (bfd_vma memaddr, insn_t word, disassemble_info *info)
-+{
-+  const struct riscv_opcode *op;
-+  static bfd_boolean init = 0;
-+  static const struct riscv_opcode *riscv_hash[OP_MASK_OP + 1];
-+  struct riscv_private_data *pd;
-+  int insnlen;
-+
-+#define OP_HASH_IDX(i) ((i) & (riscv_insn_length (i) == 2 ? 0x3 : OP_MASK_OP))
-+
-+  /* Build a hash table to shorten the search time.  */
-+  if (! init)
-+    {
-+      for (op = riscv_opcodes; op < &riscv_opcodes[NUMOPCODES]; op++)
-+	if (!riscv_hash[OP_HASH_IDX (op->match)])
-+	  riscv_hash[OP_HASH_IDX (op->match)] = op;
-+
-+      init = 1;
-+    }
-+
-+  if (info->private_data == NULL)
-+    {
-+      int i;
-+
-+      pd = info->private_data = xcalloc (1, sizeof (struct riscv_private_data));
-+      pd->gp = -1;
-+      pd->print_addr = -1;
-+      for (i = 0; i < (int) ARRAY_SIZE (pd->hi_addr); i++)
-+	pd->hi_addr[i] = -1;
-+
-+      for (i = 0; i < info->symtab_size; i++)
-+	if (strcmp (bfd_asymbol_name (info->symtab[i]), "_gp") == 0)
-+	  pd->gp = bfd_asymbol_value (info->symtab[i]);
-+    }
-+  else
-+    pd = info->private_data;
-+
-+  insnlen = riscv_insn_length (word);
-+
-+  info->bytes_per_chunk = insnlen % 4 == 0 ? 4 : 2;
-+  info->bytes_per_line = 8;
-+  info->display_endian = info->endian;
-+  info->insn_info_valid = 1;
-+  info->branch_delay_insns = 0;
-+  info->data_size = 0;
-+  info->insn_type = dis_nonbranch;
-+  info->target = 0;
-+  info->target2 = 0;
-+
-+  op = riscv_hash[OP_HASH_IDX (word)];
-+  if (op != NULL)
-+    {
-+      int xlen = 0;
-+
-+      /* The incoming section might not always be complete.  */
-+      if (info->section != NULL)
-+	{
-+	  Elf_Internal_Ehdr *ehdr = elf_elfheader (info->section->owner);
-+	  xlen = ehdr->e_ident[EI_CLASS] == ELFCLASS64 ? 64 : 32;
-+	}
-+
-+      for (; op < &riscv_opcodes[NUMOPCODES]; op++)
-+	{
-+	  /* Does the opcode match?  */
-+	  if (! (op->match_func) (op, word))
-+	    continue;
-+	  /* Is this a pseudo-instruction and may we print it as such?  */
-+	  if (no_aliases && (op->pinfo & INSN_ALIAS))
-+	    continue;
-+	  /* Is this instruction restricted to a certain value of XLEN?  */
-+	  if (isdigit (op->subset[0]) && atoi (op->subset) != xlen)
-+	    continue;
-+
-+	  /* It's a match.  */
-+	  (*info->fprintf_func) (info->stream, "%s", op->name);
-+	  print_insn_args (op->args, word, memaddr, info);
-+
-+	  /* Try to disassemble multi-instruction addressing sequences.  */
-+	  if (pd->print_addr != (bfd_vma)-1)
-+	    {
-+	      info->target = pd->print_addr;
-+	      (*info->fprintf_func) (info->stream, " # ");
-+	      (*info->print_address_func) (info->target, info);
-+	      pd->print_addr = -1;
-+	    }
-+
-+	  return insnlen;
-+	}
-+    }
-+
-+  /* We did not find a match, so just print the instruction bits.  */
-+  info->insn_type = dis_noninsn;
-+  (*info->fprintf_func) (info->stream, "0x%llx", (unsigned long long)word);
-+  return insnlen;
-+}
-+
-+int
-+print_insn_riscv (bfd_vma memaddr, struct disassemble_info *info)
-+{
-+  bfd_byte packet[2];
-+  insn_t insn = 0;
-+  bfd_vma n;
-+  int status;
-+
-+  if (info->disassembler_options != NULL)
-+    {
-+      parse_riscv_dis_options (info->disassembler_options);
-+      /* Avoid repeatedly parsing the options.  */
-+      info->disassembler_options = NULL;
-+    }
-+  else if (riscv_gpr_names == NULL)
-+    set_default_riscv_dis_options ();
-+
-+  /* Instructions are a sequence of 2-byte packets in little-endian order.  */
-+  for (n = 0; n < sizeof (insn) && n < riscv_insn_length (insn); n += 2)
-+    {
-+      status = (*info->read_memory_func) (memaddr + n, packet, 2, info);
-+      if (status != 0)
-+	{
-+	  /* Don't fail just because we fell off the end.  */
-+	  if (n > 0)
-+	    break;
-+	  (*info->memory_error_func) (status, memaddr, info);
-+	  return status;
-+	}
-+
-+      insn |= ((insn_t) bfd_getl16 (packet)) << (8 * n);
-+    }
-+
-+  return riscv_disassemble_insn (memaddr, insn, info);
-+}
-+
-+void
-+print_riscv_disassembler_options (FILE *stream)
-+{
-+  fprintf (stream, _("\n\
-+The following RISC-V-specific disassembler options are supported for use\n\
-+with the -M switch (multiple options should be separated by commas):\n"));
-+
-+  fprintf (stream, _("\n\
-+  numeric       Print numeric reigster names, rather than ABI names.\n"));
-+
-+  fprintf (stream, _("\n\
-+  no-aliases    Disassemble only into canonical instructions, rather\n\
-+                than into pseudoinstructions.\n"));
-+
-+  fprintf (stream, _("\n"));
-+}
-diff -urN empty/opcodes/riscv-opc.c binutils-2.26.1/opcodes/riscv-opc.c
---- empty/opcodes/riscv-opc.c	1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/opcodes/riscv-opc.c	2016-04-16 11:38:25.314563423 +0800
-@@ -0,0 +1,647 @@
-+/* RISC-V opcode list
-+   Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target.
-+
-+   This file is part of the GNU opcodes library.
-+
-+   This library is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 3, or (at your option)
-+   any later version.
-+
-+   It is distributed in the hope that it will be useful, but WITHOUT
-+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
-+   License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; see the file COPYING3. If not,
-+   see <http://www.gnu.org/licenses/>.  */
-+
-+#include "sysdep.h"
-+#include "opcode/riscv.h"
-+#include <stdio.h>
-+
-+/* Register names used by gas and objdump.  */
-+
-+const char * const riscv_gpr_names_numeric[32] =
-+{
-+  "x0",   "x1",   "x2",   "x3",   "x4",   "x5",   "x6",   "x7",
-+  "x8",   "x9",   "x10",  "x11",  "x12",  "x13",  "x14",  "x15",
-+  "x16",  "x17",  "x18",  "x19",  "x20",  "x21",  "x22",  "x23",
-+  "x24",  "x25",  "x26",  "x27",  "x28",  "x29",  "x30",  "x31"
-+};
-+
-+const char * const riscv_gpr_names_abi[32] = {
-+  "zero", "ra", "sp",  "gp",  "tp", "t0",  "t1",  "t2",
-+  "s0",   "s1", "a0",  "a1",  "a2", "a3",  "a4",  "a5",
-+  "a6",   "a7", "s2",  "s3",  "s4", "s5",  "s6",  "s7",
-+  "s8",   "s9", "s10", "s11", "t3", "t4",  "t5",  "t6"
-+};
-+
-+const char * const riscv_fpr_names_numeric[32] =
-+{
-+  "f0",   "f1",   "f2",   "f3",   "f4",   "f5",   "f6",   "f7",
-+  "f8",   "f9",   "f10",  "f11",  "f12",  "f13",  "f14",  "f15",
-+  "f16",  "f17",  "f18",  "f19",  "f20",  "f21",  "f22",  "f23",
-+  "f24",  "f25",  "f26",  "f27",  "f28",  "f29",  "f30",  "f31"
-+};
-+
-+const char * const riscv_fpr_names_abi[32] = {
-+  "ft0", "ft1", "ft2",  "ft3",  "ft4", "ft5", "ft6",  "ft7",
-+  "fs0", "fs1", "fa0",  "fa1",  "fa2", "fa3", "fa4",  "fa5",
-+  "fa6", "fa7", "fs2",  "fs3",  "fs4", "fs5", "fs6",  "fs7",
-+  "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"
-+};
-+
-+/* The order of overloaded instructions matters.  Label arguments and
-+   register arguments look the same. Instructions that can have either
-+   for arguments must apear in the correct order in this table for the
-+   assembler to pick the right one. In other words, entries with
-+   immediate operands must apear after the same instruction with
-+   registers.
-+
-+   Because of the lookup algorithm used, entries with the same opcode
-+   name must be contiguous.  */
-+
-+#define MASK_RS1 (OP_MASK_RS1 << OP_SH_RS1)
-+#define MASK_RS2 (OP_MASK_RS2 << OP_SH_RS2)
-+#define MASK_RD (OP_MASK_RD << OP_SH_RD)
-+#define MASK_CRS2 (OP_MASK_CRS2 << OP_SH_CRS2)
-+#define MASK_IMM ENCODE_ITYPE_IMM(-1U)
-+#define MASK_RVC_IMM ENCODE_RVC_IMM(-1U)
-+#define MASK_UIMM ENCODE_UTYPE_IMM(-1U)
-+#define MASK_RM (OP_MASK_RM << OP_SH_RM)
-+#define MASK_PRED (OP_MASK_PRED << OP_SH_PRED)
-+#define MASK_SUCC (OP_MASK_SUCC << OP_SH_SUCC)
-+#define MASK_AQ (OP_MASK_AQ << OP_SH_AQ)
-+#define MASK_RL (OP_MASK_RL << OP_SH_RL)
-+#define MASK_AQRL (MASK_AQ | MASK_RL)
-+
-+static int match_opcode(const struct riscv_opcode *op, insn_t insn)
-+{
-+  return ((insn ^ op->match) & op->mask) == 0;
-+}
-+
-+static int match_never(const struct riscv_opcode *op ATTRIBUTE_UNUSED,
-+		       insn_t insn ATTRIBUTE_UNUSED)
-+{
-+  return 0;
-+}
-+
-+static int match_rs1_eq_rs2(const struct riscv_opcode *op, insn_t insn)
-+{
-+  int rs1 = (insn & MASK_RS1) >> OP_SH_RS1;
-+  int rs2 = (insn & MASK_RS2) >> OP_SH_RS2;
-+  return match_opcode (op, insn) && rs1 == rs2;
-+}
-+
-+static int match_rd_nonzero(const struct riscv_opcode *op, insn_t insn)
-+{
-+  return match_opcode (op, insn) && ((insn & MASK_RD) != 0);
-+}
-+
-+static int match_c_add(const struct riscv_opcode *op, insn_t insn)
-+{
-+  return match_rd_nonzero (op, insn) && ((insn & MASK_CRS2) != 0);
-+}
-+
-+static int match_c_lui(const struct riscv_opcode *op, insn_t insn)
-+{
-+  return match_rd_nonzero (op, insn) && (((insn & MASK_RD) >> OP_SH_RD) != 2);
-+}
-+
-+const struct riscv_opcode riscv_builtin_opcodes[] =
-+{
-+/* name,      isa,   operands, match, mask, match_func, pinfo */
-+{"unimp",     "C",   "",  0, 0xffffU,  match_opcode, 0 },
-+{"unimp",     "I",   "",  MATCH_CSRRW | (CSR_CYCLE << OP_SH_CSR), 0xffffffffU,  match_opcode, 0 }, /* csrw cycle, x0 */
-+{"ebreak",    "C",   "",  MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, INSN_ALIAS },
-+{"ebreak",    "I",   "",    MATCH_EBREAK, MASK_EBREAK, match_opcode, 0 },
-+{"sbreak",    "C",   "",  MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, INSN_ALIAS },
-+{"sbreak",    "I",   "",    MATCH_EBREAK, MASK_EBREAK, match_opcode, INSN_ALIAS },
-+{"ret",       "C",   "",  MATCH_C_JR | (X_RA << OP_SH_RD), MASK_C_JR | MASK_RD, match_opcode, INSN_ALIAS },
-+{"ret",       "I",   "",  MATCH_JALR | (X_RA << OP_SH_RS1), MASK_JALR | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"jr",        "C",   "d",  MATCH_C_JR, MASK_C_JR, match_rd_nonzero, INSN_ALIAS },
-+{"jr",        "I",   "s",  MATCH_JALR, MASK_JALR | MASK_RD | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"jr",        "I",   "s,j",  MATCH_JALR, MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS },
-+{"jalr",      "C",   "d",  MATCH_C_JALR, MASK_C_JALR, match_rd_nonzero, INSN_ALIAS },
-+{"jalr",      "I",   "s",  MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"jalr",      "I",   "s,j",  MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS },
-+{"jalr",      "I",   "d,s",  MATCH_JALR, MASK_JALR | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"jalr",      "I",   "d,s,j",  MATCH_JALR, MASK_JALR, match_opcode, 0 },
-+{"j",         "C",   "Ca",  MATCH_C_J, MASK_C_J, match_opcode, INSN_ALIAS },
-+{"j",         "I",   "a",  MATCH_JAL, MASK_JAL | MASK_RD, match_opcode, INSN_ALIAS },
-+{"jal",       "32C", "Ca",  MATCH_C_JAL, MASK_C_JAL, match_opcode, INSN_ALIAS },
-+{"jal",       "I",   "a",  MATCH_JAL | (X_RA << OP_SH_RD), MASK_JAL | MASK_RD, match_opcode, INSN_ALIAS },
-+{"jal",       "I",   "d,a",  MATCH_JAL, MASK_JAL, match_opcode, 0 },
-+{"call",      "I",   "c", (X_T1 << OP_SH_RS1) | (X_RA << OP_SH_RD), (int) M_CALL,  match_never, INSN_MACRO },
-+{"call",      "I",   "d,c", (X_T1 << OP_SH_RS1), (int) M_CALL,  match_never, INSN_MACRO },
-+{"tail",      "I",   "c", (X_T1 << OP_SH_RS1), (int) M_CALL,  match_never, INSN_MACRO },
-+{"jump",      "I",   "c,s", 0, (int) M_CALL,  match_never, INSN_MACRO },
-+{"nop",       "C",   "",  MATCH_C_ADDI, 0xffff, match_opcode, INSN_ALIAS },
-+{"nop",       "I",   "",         MATCH_ADDI, MASK_ADDI | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"lui",       "C",   "d,Cu",  MATCH_C_LUI, MASK_C_LUI, match_c_lui, INSN_ALIAS },
-+{"lui",       "I",   "d,u",  MATCH_LUI, MASK_LUI, match_opcode, 0 },
-+{"li",        "C",   "d,Cv",  MATCH_C_LUI, MASK_C_LUI, match_c_lui, INSN_ALIAS },
-+{"li",        "C",   "d,Cj",  MATCH_C_LI, MASK_C_LI, match_rd_nonzero, INSN_ALIAS },
-+{"li",        "C",   "d,0",  MATCH_C_LI, MASK_C_LI | MASK_RVC_IMM, match_rd_nonzero, INSN_ALIAS },
-+{"li",        "I",   "d,j",      MATCH_ADDI, MASK_ADDI | MASK_RS1, match_opcode, INSN_ALIAS }, /* addi */
-+{"li",        "I",   "d,I",  0,    (int) M_LI,  match_never, INSN_MACRO },
-+{"mv",        "C",   "d,CV",  MATCH_C_MV, MASK_C_MV, match_c_add, INSN_ALIAS },
-+{"mv",        "I",   "d,s",  MATCH_ADDI, MASK_ADDI | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"move",      "C",   "d,CV",  MATCH_C_MV, MASK_C_MV, match_c_add, INSN_ALIAS },
-+{"move",      "I",   "d,s",  MATCH_ADDI, MASK_ADDI | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"andi",      "C",   "Cs,Cw,Cj",  MATCH_C_ANDI, MASK_C_ANDI, match_opcode, INSN_ALIAS },
-+{"andi",      "I",   "d,s,j",  MATCH_ANDI, MASK_ANDI, match_opcode, 0 },
-+{"and",       "C",   "Cs,Cw,Ct",  MATCH_C_AND, MASK_C_AND, match_opcode, INSN_ALIAS },
-+{"and",       "C",   "Cs,Ct,Cw",  MATCH_C_AND, MASK_C_AND, match_opcode, INSN_ALIAS },
-+{"and",       "C",   "Cs,Cw,Cj",  MATCH_C_ANDI, MASK_C_ANDI, match_opcode, INSN_ALIAS },
-+{"and",       "I",   "d,s,t",  MATCH_AND, MASK_AND, match_opcode, 0 },
-+{"and",       "I",   "d,s,j",  MATCH_ANDI, MASK_ANDI, match_opcode, INSN_ALIAS },
-+{"beqz",      "C",   "Cs,Cp",  MATCH_C_BEQZ, MASK_C_BEQZ, match_opcode, INSN_ALIAS },
-+{"beqz",      "I",   "s,p",  MATCH_BEQ, MASK_BEQ | MASK_RS2, match_opcode, INSN_ALIAS },
-+{"beq",       "I",   "s,t,p",  MATCH_BEQ, MASK_BEQ, match_opcode, 0 },
-+{"blez",      "I",   "t,p",  MATCH_BGE, MASK_BGE | MASK_RS1, match_opcode, INSN_ALIAS },
-+{"bgez",      "I",   "s,p",  MATCH_BGE, MASK_BGE | MASK_RS2, match_opcode, INSN_ALIAS },
-+{"ble",       "I",   "t,s,p",  MATCH_BGE, MASK_BGE, match_opcode, INSN_ALIAS },
-+{"bleu",      "I",   "t,s,p",  MATCH_BGEU, MASK_BGEU, match_opcode, INSN_ALIAS },
-+{"bge",       "I",   "s,t,p",  MATCH_BGE, MASK_BGE, match_opcode, 0 },
-+{"bgeu",      "I",   "s,t,p",  MATCH_BGEU, MASK_BGEU, match_opcode, 0 },
-+{"bltz",      "I",   "s,p",  MATCH_BLT, MASK_BLT | MASK_RS2, match_opcode, INSN_ALIAS },
-+{"bgtz",      "I",   "t,p",  MATCH_BLT, MASK_BLT | MASK_RS1, match_opcode, INSN_ALIAS },
-+{"blt",       "I",   "s,t,p",  MATCH_BLT, MASK_BLT, match_opcode, 0 },
-+{"bltu",      "I",   "s,t,p",  MATCH_BLTU, MASK_BLTU, match_opcode, 0 },
-+{"bgt",       "I",   "t,s,p",  MATCH_BLT, MASK_BLT, match_opcode, INSN_ALIAS },
-+{"bgtu",      "I",   "t,s,p",  MATCH_BLTU, MASK_BLTU, match_opcode, INSN_ALIAS },
-+{"bnez",      "C",   "Cs,Cp",  MATCH_C_BNEZ, MASK_C_BNEZ, match_opcode, INSN_ALIAS },
-+{"bnez",      "I",   "s,p",  MATCH_BNE, MASK_BNE | MASK_RS2, match_opcode, INSN_ALIAS },
-+{"bne",       "I",   "s,t,p",  MATCH_BNE, MASK_BNE, match_opcode, 0 },
-+{"addi",      "C",   "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_opcode, INSN_ALIAS },
-+{"addi",      "C",   "d,CU,Cj",  MATCH_C_ADDI, MASK_C_ADDI, match_rd_nonzero, INSN_ALIAS },
-+{"addi",      "C",   "Cc,Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_opcode, INSN_ALIAS },
-+{"addi",      "I",   "d,s,j",  MATCH_ADDI, MASK_ADDI, match_opcode, 0 },
-+{"add",       "C",   "d,CU,CV",  MATCH_C_ADD, MASK_C_ADD, match_c_add, INSN_ALIAS },
-+{"add",       "C",   "d,CV,CU",  MATCH_C_ADD, MASK_C_ADD, match_c_add, INSN_ALIAS },
-+{"add",       "C",   "d,CU,Cj",  MATCH_C_ADDI, MASK_C_ADDI, match_rd_nonzero, INSN_ALIAS },
-+{"add",       "C",   "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_opcode, INSN_ALIAS },
-+{"add",       "C",   "Cc,Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_opcode, INSN_ALIAS },
-+{"add",       "I",   "d,s,t",  MATCH_ADD, MASK_ADD, match_opcode, 0 },
-+{"add",       "I",   "d,s,t,0",MATCH_ADD, MASK_ADD, match_opcode, 0 },
-+{"add",       "I",   "d,s,j",  MATCH_ADDI, MASK_ADDI, match_opcode, INSN_ALIAS },
-+{"la",        "I",   "d,A",  0,    (int) M_LA,  match_never, INSN_MACRO },
-+{"lla",       "I",   "d,A",  0,    (int) M_LLA,  match_never, INSN_MACRO },
-+{"la.tls.gd", "I",   "d,A",  0,    (int) M_LA_TLS_GD,  match_never, INSN_MACRO },
-+{"la.tls.ie", "I",   "d,A",  0,    (int) M_LA_TLS_IE,  match_never, INSN_MACRO },
-+{"neg",       "I",   "d,t",  MATCH_SUB, MASK_SUB | MASK_RS1, match_opcode, INSN_ALIAS }, /* sub 0 */
-+{"slli",      "C",   "d,CU,C>",  MATCH_C_SLLI, MASK_C_SLLI, match_rd_nonzero, INSN_ALIAS },
-+{"slli",      "I",   "d,s,>",   MATCH_SLLI, MASK_SLLI, match_opcode, 0 },
-+{"sll",       "C",   "d,CU,C>",  MATCH_C_SLLI, MASK_C_SLLI, match_rd_nonzero, INSN_ALIAS },
-+{"sll",       "I",   "d,s,t",   MATCH_SLL, MASK_SLL, match_opcode, 0 },
-+{"sll",       "I",   "d,s,>",   MATCH_SLLI, MASK_SLLI, match_opcode, INSN_ALIAS },
-+{"srli",      "C",   "Cs,Cw,C>",  MATCH_C_SRLI, MASK_C_SRLI, match_rd_nonzero, INSN_ALIAS },
-+{"srli",      "I",   "d,s,>",   MATCH_SRLI, MASK_SRLI, match_opcode, 0 },
-+{"srl",       "C",   "Cs,Cw,C>",  MATCH_C_SRLI, MASK_C_SRLI, match_rd_nonzero, INSN_ALIAS },
-+{"srl",       "I",   "d,s,t",   MATCH_SRL, MASK_SRL, match_opcode, 0 },
-+{"srl",       "I",   "d,s,>",   MATCH_SRLI, MASK_SRLI, match_opcode, INSN_ALIAS },
-+{"srai",      "C",   "Cs,Cw,C>",  MATCH_C_SRAI, MASK_C_SRAI, match_rd_nonzero, INSN_ALIAS },
-+{"srai",      "I",   "d,s,>",   MATCH_SRAI, MASK_SRAI, match_opcode, 0 },
-+{"sra",       "C",   "Cs,Cw,C>",  MATCH_C_SRAI, MASK_C_SRAI, match_rd_nonzero, INSN_ALIAS },
-+{"sra",       "I",   "d,s,t",   MATCH_SRA, MASK_SRA, match_opcode, 0 },
-+{"sra",       "I",   "d,s,>",   MATCH_SRAI, MASK_SRAI, match_opcode, INSN_ALIAS },
-+{"sub",       "C",   "Cs,Cw,Ct",  MATCH_C_SUB, MASK_C_SUB, match_opcode, INSN_ALIAS },
-+{"sub",       "I",   "d,s,t",  MATCH_SUB, MASK_SUB, match_opcode, 0 },
-+{"lb",        "I",   "d,o(s)",  MATCH_LB, MASK_LB, match_opcode, 0 },
-+{"lb",        "I",   "d,A",  0, (int) M_LB, match_never, INSN_MACRO },
-+{"lbu",       "I",   "d,o(s)",  MATCH_LBU, MASK_LBU, match_opcode, 0 },
-+{"lbu",       "I",   "d,A",  0, (int) M_LBU, match_never, INSN_MACRO },
-+{"lh",        "I",   "d,o(s)",  MATCH_LH, MASK_LH, match_opcode, 0 },
-+{"lh",        "I",   "d,A",  0, (int) M_LH, match_never, INSN_MACRO },
-+{"lhu",       "I",   "d,o(s)",  MATCH_LHU, MASK_LHU, match_opcode, 0 },
-+{"lhu",       "I",   "d,A",  0, (int) M_LHU, match_never, INSN_MACRO },
-+{"lw",        "C",   "d,Cm(Cc)",  MATCH_C_LWSP, MASK_C_LWSP, match_rd_nonzero, INSN_ALIAS },
-+{"lw",        "C",   "Ct,Ck(Cs)",  MATCH_C_LW, MASK_C_LW, match_opcode, INSN_ALIAS },
-+{"lw",        "I",   "d,o(s)",  MATCH_LW, MASK_LW, match_opcode, 0 },
-+{"lw",        "I",   "d,A",  0, (int) M_LW, match_never, INSN_MACRO },
-+{"not",       "I",   "d,s",  MATCH_XORI | MASK_IMM, MASK_XORI | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"ori",       "I",   "d,s,j",  MATCH_ORI, MASK_ORI, match_opcode, 0 },
-+{"or",       "C",   "Cs,Cw,Ct",  MATCH_C_OR, MASK_C_OR, match_opcode, INSN_ALIAS },
-+{"or",       "C",   "Cs,Ct,Cw",  MATCH_C_OR, MASK_C_OR, match_opcode, INSN_ALIAS },
-+{"or",        "I",   "d,s,t",  MATCH_OR, MASK_OR, match_opcode, 0 },
-+{"or",        "I",   "d,s,j",  MATCH_ORI, MASK_ORI, match_opcode, INSN_ALIAS },
-+{"auipc",     "I",   "d,u",  MATCH_AUIPC, MASK_AUIPC, match_opcode, 0 },
-+{"seqz",      "I",   "d,s",  MATCH_SLTIU | ENCODE_ITYPE_IMM(1), MASK_SLTIU | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"snez",      "I",   "d,t",  MATCH_SLTU, MASK_SLTU | MASK_RS1, match_opcode, INSN_ALIAS },
-+{"sltz",      "I",   "d,s",  MATCH_SLT, MASK_SLT | MASK_RS2, match_opcode, INSN_ALIAS },
-+{"sgtz",      "I",   "d,t",  MATCH_SLT, MASK_SLT | MASK_RS1, match_opcode, INSN_ALIAS },
-+{"slti",      "I",   "d,s,j",  MATCH_SLTI, MASK_SLTI, match_opcode, INSN_ALIAS },
-+{"slt",       "I",   "d,s,t",  MATCH_SLT, MASK_SLT, match_opcode, 0 },
-+{"slt",       "I",   "d,s,j",  MATCH_SLTI, MASK_SLTI, match_opcode, 0 },
-+{"sltiu",     "I",   "d,s,j",  MATCH_SLTIU, MASK_SLTIU, match_opcode, 0 },
-+{"sltu",      "I",   "d,s,t",  MATCH_SLTU, MASK_SLTU, match_opcode, 0 },
-+{"sltu",      "I",   "d,s,j",  MATCH_SLTIU, MASK_SLTIU, match_opcode, INSN_ALIAS },
-+{"sgt",       "I",   "d,t,s",  MATCH_SLT, MASK_SLT, match_opcode, INSN_ALIAS },
-+{"sgtu",      "I",   "d,t,s",  MATCH_SLTU, MASK_SLTU, match_opcode, INSN_ALIAS },
-+{"sb",        "I",   "t,q(s)",  MATCH_SB, MASK_SB, match_opcode, 0 },
-+{"sb",        "I",   "t,A,s",  0, (int) M_SB, match_never, INSN_MACRO },
-+{"sh",        "I",   "t,q(s)",  MATCH_SH, MASK_SH, match_opcode, 0 },
-+{"sh",        "I",   "t,A,s",  0, (int) M_SH, match_never, INSN_MACRO },
-+{"sw",        "C",   "CV,CM(Cc)",  MATCH_C_SWSP, MASK_C_SWSP, match_opcode, INSN_ALIAS },
-+{"sw",        "C",   "Ct,Ck(Cs)",  MATCH_C_SW, MASK_C_SW, match_opcode, INSN_ALIAS },
-+{"sw",        "I",   "t,q(s)",  MATCH_SW, MASK_SW, match_opcode, 0 },
-+{"sw",        "I",   "t,A,s",  0, (int) M_SW, match_never, INSN_MACRO },
-+{"fence",     "I",   "",  MATCH_FENCE | MASK_PRED | MASK_SUCC, MASK_FENCE | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"fence",     "I",   "P,Q",  MATCH_FENCE, MASK_FENCE | MASK_RD | MASK_RS1 | (MASK_IMM & ~MASK_PRED & ~MASK_SUCC), match_opcode, 0 },
-+{"fence.i",   "I",   "",  MATCH_FENCE_I, MASK_FENCE | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, 0 },
-+{"rdcycle",   "I",   "d",  MATCH_RDCYCLE, MASK_RDCYCLE, match_opcode, 0 },
-+{"rdinstret", "I",   "d",  MATCH_RDINSTRET, MASK_RDINSTRET, match_opcode, 0 },
-+{"rdtime",    "I",   "d",  MATCH_RDTIME, MASK_RDTIME, match_opcode, 0 },
-+{"rdcycleh",  "32I", "d",  MATCH_RDCYCLEH, MASK_RDCYCLEH, match_opcode, 0 },
-+{"rdinstreth","32I", "d",  MATCH_RDINSTRETH, MASK_RDINSTRETH, match_opcode, 0 },
-+{"rdtimeh",   "32I", "d",  MATCH_RDTIMEH, MASK_RDTIMEH, match_opcode, 0 },
-+{"ecall",     "I",   "",    MATCH_SCALL, MASK_SCALL, match_opcode, 0 },
-+{"scall",     "I",   "",    MATCH_SCALL, MASK_SCALL, match_opcode, 0 },
-+{"xori",      "I",   "d,s,j",  MATCH_XORI, MASK_XORI, match_opcode, 0 },
-+{"xor",       "C",   "Cs,Cw,Ct",  MATCH_C_XOR, MASK_C_XOR, match_opcode, INSN_ALIAS },
-+{"xor",       "C",   "Cs,Ct,Cw",  MATCH_C_XOR, MASK_C_XOR, match_opcode, INSN_ALIAS },
-+{"xor",       "I",   "d,s,t",  MATCH_XOR, MASK_XOR, match_opcode, 0 },
-+{"xor",       "I",   "d,s,j",  MATCH_XORI, MASK_XORI, match_opcode, INSN_ALIAS },
-+{"lwu",       "64I", "d,o(s)",  MATCH_LWU, MASK_LWU, match_opcode, 0 },
-+{"lwu",       "64I", "d,A",  0, (int) M_LWU, match_never, INSN_MACRO },
-+{"ld",        "64C", "d,Cn(Cc)",  MATCH_C_LDSP, MASK_C_LDSP, match_rd_nonzero, INSN_ALIAS },
-+{"ld",        "64C", "Ct,Cl(Cs)",  MATCH_C_LD, MASK_C_LD, match_opcode, INSN_ALIAS },
-+{"ld",        "64I", "d,o(s)", MATCH_LD, MASK_LD, match_opcode, 0 },
-+{"ld",        "64I", "d,A",  0, (int) M_LD, match_never, INSN_MACRO },
-+{"sd",        "64C", "CV,CN(Cc)",  MATCH_C_SDSP, MASK_C_SDSP, match_opcode, INSN_ALIAS },
-+{"sd",        "64C", "Ct,Cl(Cs)",  MATCH_C_SD, MASK_C_SD, match_opcode, INSN_ALIAS },
-+{"sd",        "64I", "t,q(s)",  MATCH_SD, MASK_SD, match_opcode, 0 },
-+{"sd",        "64I", "t,A,s",  0, (int) M_SD, match_never, INSN_MACRO },
-+{"sext.w",    "64C", "d,CU",  MATCH_C_ADDIW, MASK_C_ADDIW | MASK_RVC_IMM, match_rd_nonzero, INSN_ALIAS },
-+{"sext.w",    "64I", "d,s",  MATCH_ADDIW, MASK_ADDIW | MASK_IMM, match_opcode, INSN_ALIAS },
-+{"addiw",     "64C", "d,CU,Cj",  MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, INSN_ALIAS },
-+{"addiw",     "64I", "d,s,j",  MATCH_ADDIW, MASK_ADDIW, match_opcode, 0 },
-+{"addw",      "64C", "Cs,Cw,Ct",  MATCH_C_ADDW, MASK_C_ADDW, match_opcode, INSN_ALIAS },
-+{"addw",      "64C", "Cs,Ct,Cw",  MATCH_C_ADDW, MASK_C_ADDW, match_opcode, INSN_ALIAS },
-+{"addw",      "64C", "d,CU,Cj",  MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, INSN_ALIAS },
-+{"addw",      "64I", "d,s,t",  MATCH_ADDW, MASK_ADDW, match_opcode, 0 },
-+{"addw",      "64I", "d,s,j",  MATCH_ADDIW, MASK_ADDIW, match_opcode, INSN_ALIAS },
-+{"negw",      "64I", "d,t",  MATCH_SUBW, MASK_SUBW | MASK_RS1, match_opcode, INSN_ALIAS }, /* sub 0 */
-+{"slliw",     "64I", "d,s,<",   MATCH_SLLIW, MASK_SLLIW, match_opcode, 0 },
-+{"sllw",      "64I", "d,s,t",   MATCH_SLLW, MASK_SLLW, match_opcode, 0 },
-+{"sllw",      "64I", "d,s,<",   MATCH_SLLIW, MASK_SLLIW, match_opcode, INSN_ALIAS },
-+{"srliw",     "64I", "d,s,<",   MATCH_SRLIW, MASK_SRLIW, match_opcode, 0 },
-+{"srlw",      "64I", "d,s,t",   MATCH_SRLW, MASK_SRLW, match_opcode, 0 },
-+{"srlw",      "64I", "d,s,<",   MATCH_SRLIW, MASK_SRLIW, match_opcode, INSN_ALIAS },
-+{"sraiw",     "64I", "d,s,<",   MATCH_SRAIW, MASK_SRAIW, match_opcode, 0 },
-+{"sraw",      "64I", "d,s,t",   MATCH_SRAW, MASK_SRAW, match_opcode, 0 },
-+{"sraw",      "64I", "d,s,<",   MATCH_SRAIW, MASK_SRAIW, match_opcode, INSN_ALIAS },
-+{"subw",      "64C", "Cs,Cw,Ct",  MATCH_C_SUBW, MASK_C_SUBW, match_opcode, INSN_ALIAS },
-+{"subw",      "64I", "d,s,t",  MATCH_SUBW, MASK_SUBW, match_opcode, 0 },
-+
-+/* Atomic memory operation instruction subset */
-+{"lr.w",         "A",   "d,0(s)",    MATCH_LR_W, MASK_LR_W | MASK_AQRL, match_opcode, 0 },
-+{"sc.w",         "A",   "d,t,0(s)",  MATCH_SC_W, MASK_SC_W | MASK_AQRL, match_opcode, 0 },
-+{"amoadd.w",     "A",   "d,t,0(s)",  MATCH_AMOADD_W, MASK_AMOADD_W | MASK_AQRL, match_opcode, 0 },
-+{"amoswap.w",    "A",   "d,t,0(s)",  MATCH_AMOSWAP_W, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, 0 },
-+{"amoand.w",     "A",   "d,t,0(s)",  MATCH_AMOAND_W, MASK_AMOAND_W | MASK_AQRL, match_opcode, 0 },
-+{"amoor.w",      "A",   "d,t,0(s)",  MATCH_AMOOR_W, MASK_AMOOR_W | MASK_AQRL, match_opcode, 0 },
-+{"amoxor.w",     "A",   "d,t,0(s)",  MATCH_AMOXOR_W, MASK_AMOXOR_W | MASK_AQRL, match_opcode, 0 },
-+{"amomax.w",     "A",   "d,t,0(s)",  MATCH_AMOMAX_W, MASK_AMOMAX_W | MASK_AQRL, match_opcode, 0 },
-+{"amomaxu.w",    "A",   "d,t,0(s)",  MATCH_AMOMAXU_W, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, 0 },
-+{"amomin.w",     "A",   "d,t,0(s)",  MATCH_AMOMIN_W, MASK_AMOMIN_W | MASK_AQRL, match_opcode, 0 },
-+{"amominu.w",    "A",   "d,t,0(s)",  MATCH_AMOMINU_W, MASK_AMOMINU_W | MASK_AQRL, match_opcode, 0 },
-+{"lr.w.aq",      "A",   "d,0(s)",    MATCH_LR_W | MASK_AQ, MASK_LR_W | MASK_AQRL, match_opcode, 0 },
-+{"sc.w.aq",      "A",   "d,t,0(s)",  MATCH_SC_W | MASK_AQ, MASK_SC_W | MASK_AQRL, match_opcode, 0 },
-+{"amoadd.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOADD_W | MASK_AQ, MASK_AMOADD_W | MASK_AQRL, match_opcode, 0 },
-+{"amoswap.w.aq", "A",   "d,t,0(s)",  MATCH_AMOSWAP_W | MASK_AQ, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, 0 },
-+{"amoand.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOAND_W | MASK_AQ, MASK_AMOAND_W | MASK_AQRL, match_opcode, 0 },
-+{"amoor.w.aq",   "A",   "d,t,0(s)",  MATCH_AMOOR_W | MASK_AQ, MASK_AMOOR_W | MASK_AQRL, match_opcode, 0 },
-+{"amoxor.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOXOR_W | MASK_AQ, MASK_AMOXOR_W | MASK_AQRL, match_opcode, 0 },
-+{"amomax.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOMAX_W | MASK_AQ, MASK_AMOMAX_W | MASK_AQRL, match_opcode, 0 },
-+{"amomaxu.w.aq", "A",   "d,t,0(s)",  MATCH_AMOMAXU_W | MASK_AQ, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, 0 },
-+{"amomin.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOMIN_W | MASK_AQ, MASK_AMOMIN_W | MASK_AQRL, match_opcode, 0 },
-+{"amominu.w.aq", "A",   "d,t,0(s)",  MATCH_AMOMINU_W | MASK_AQ, MASK_AMOMINU_W | MASK_AQRL, match_opcode, 0 },
-+{"lr.w.rl",      "A",   "d,0(s)",    MATCH_LR_W | MASK_RL, MASK_LR_W | MASK_AQRL, match_opcode, 0 },
-+{"sc.w.rl",      "A",   "d,t,0(s)",  MATCH_SC_W | MASK_RL, MASK_SC_W | MASK_AQRL, match_opcode, 0 },
-+{"amoadd.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOADD_W | MASK_RL, MASK_AMOADD_W | MASK_AQRL, match_opcode, 0 },
-+{"amoswap.w.rl", "A",   "d,t,0(s)",  MATCH_AMOSWAP_W | MASK_RL, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, 0 },
-+{"amoand.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOAND_W | MASK_RL, MASK_AMOAND_W | MASK_AQRL, match_opcode, 0 },
-+{"amoor.w.rl",   "A",   "d,t,0(s)",  MATCH_AMOOR_W | MASK_RL, MASK_AMOOR_W | MASK_AQRL, match_opcode, 0 },
-+{"amoxor.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOXOR_W | MASK_RL, MASK_AMOXOR_W | MASK_AQRL, match_opcode, 0 },
-+{"amomax.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOMAX_W | MASK_RL, MASK_AMOMAX_W | MASK_AQRL, match_opcode, 0 },
-+{"amomaxu.w.rl", "A",   "d,t,0(s)",  MATCH_AMOMAXU_W | MASK_RL, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, 0 },
-+{"amomin.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOMIN_W | MASK_RL, MASK_AMOMIN_W | MASK_AQRL, match_opcode, 0 },
-+{"amominu.w.rl", "A",   "d,t,0(s)",  MATCH_AMOMINU_W | MASK_RL, MASK_AMOMINU_W | MASK_AQRL, match_opcode, 0 },
-+{"lr.w.sc",      "A",   "d,0(s)",    MATCH_LR_W | MASK_AQRL, MASK_LR_W | MASK_AQRL, match_opcode, 0 },
-+{"sc.w.sc",      "A",   "d,t,0(s)",  MATCH_SC_W | MASK_AQRL, MASK_SC_W | MASK_AQRL, match_opcode, 0 },
-+{"amoadd.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOADD_W | MASK_AQRL, MASK_AMOADD_W | MASK_AQRL, match_opcode, 0 },
-+{"amoswap.w.sc", "A",   "d,t,0(s)",  MATCH_AMOSWAP_W | MASK_AQRL, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, 0 },
-+{"amoand.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOAND_W | MASK_AQRL, MASK_AMOAND_W | MASK_AQRL, match_opcode, 0 },
-+{"amoor.w.sc",   "A",   "d,t,0(s)",  MATCH_AMOOR_W | MASK_AQRL, MASK_AMOOR_W | MASK_AQRL, match_opcode, 0 },
-+{"amoxor.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOXOR_W | MASK_AQRL, MASK_AMOXOR_W | MASK_AQRL, match_opcode, 0 },
-+{"amomax.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOMAX_W | MASK_AQRL, MASK_AMOMAX_W | MASK_AQRL, match_opcode, 0 },
-+{"amomaxu.w.sc", "A",   "d,t,0(s)",  MATCH_AMOMAXU_W | MASK_AQRL, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, 0 },
-+{"amomin.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOMIN_W | MASK_AQRL, MASK_AMOMIN_W | MASK_AQRL, match_opcode, 0 },
-+{"amominu.w.sc", "A",   "d,t,0(s)",  MATCH_AMOMINU_W | MASK_AQRL, MASK_AMOMINU_W | MASK_AQRL, match_opcode, 0 },
-+{"lr.d",         "64A", "d,0(s)",    MATCH_LR_D, MASK_LR_D | MASK_AQRL, match_opcode, 0 },
-+{"sc.d",         "64A", "d,t,0(s)",  MATCH_SC_D, MASK_SC_D | MASK_AQRL, match_opcode, 0 },
-+{"amoadd.d",     "64A", "d,t,0(s)",  MATCH_AMOADD_D, MASK_AMOADD_D | MASK_AQRL, match_opcode, 0 },
-+{"amoswap.d",    "64A", "d,t,0(s)",  MATCH_AMOSWAP_D, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, 0 },
-+{"amoand.d",     "64A", "d,t,0(s)",  MATCH_AMOAND_D, MASK_AMOAND_D | MASK_AQRL, match_opcode, 0 },
-+{"amoor.d",      "64A", "d,t,0(s)",  MATCH_AMOOR_D, MASK_AMOOR_D | MASK_AQRL, match_opcode, 0 },
-+{"amoxor.d",     "64A", "d,t,0(s)",  MATCH_AMOXOR_D, MASK_AMOXOR_D | MASK_AQRL, match_opcode, 0 },
-+{"amomax.d",     "64A", "d,t,0(s)",  MATCH_AMOMAX_D, MASK_AMOMAX_D | MASK_AQRL, match_opcode, 0 },
-+{"amomaxu.d",    "64A", "d,t,0(s)",  MATCH_AMOMAXU_D, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, 0 },
-+{"amomin.d",     "64A", "d,t,0(s)",  MATCH_AMOMIN_D, MASK_AMOMIN_D | MASK_AQRL, match_opcode, 0 },
-+{"amominu.d",    "64A", "d,t,0(s)",  MATCH_AMOMINU_D, MASK_AMOMINU_D | MASK_AQRL, match_opcode, 0 },
-+{"lr.d.aq",      "64A", "d,0(s)",    MATCH_LR_D | MASK_AQ, MASK_LR_D | MASK_AQRL, match_opcode, 0 },
-+{"sc.d.aq",      "64A", "d,t,0(s)",  MATCH_SC_D | MASK_AQ, MASK_SC_D | MASK_AQRL, match_opcode, 0 },
-+{"amoadd.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOADD_D | MASK_AQ, MASK_AMOADD_D | MASK_AQRL, match_opcode, 0 },
-+{"amoswap.d.aq", "64A", "d,t,0(s)",  MATCH_AMOSWAP_D | MASK_AQ, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, 0 },
-+{"amoand.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOAND_D | MASK_AQ, MASK_AMOAND_D | MASK_AQRL, match_opcode, 0 },
-+{"amoor.d.aq",   "64A", "d,t,0(s)",  MATCH_AMOOR_D | MASK_AQ, MASK_AMOOR_D | MASK_AQRL, match_opcode, 0 },
-+{"amoxor.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOXOR_D | MASK_AQ, MASK_AMOXOR_D | MASK_AQRL, match_opcode, 0 },
-+{"amomax.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOMAX_D | MASK_AQ, MASK_AMOMAX_D | MASK_AQRL, match_opcode, 0 },
-+{"amomaxu.d.aq", "64A", "d,t,0(s)",  MATCH_AMOMAXU_D | MASK_AQ, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, 0 },
-+{"amomin.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOMIN_D | MASK_AQ, MASK_AMOMIN_D | MASK_AQRL, match_opcode, 0 },
-+{"amominu.d.aq", "64A", "d,t,0(s)",  MATCH_AMOMINU_D | MASK_AQ, MASK_AMOMINU_D | MASK_AQRL, match_opcode, 0 },
-+{"lr.d.rl",      "64A", "d,0(s)",    MATCH_LR_D | MASK_RL, MASK_LR_D | MASK_AQRL, match_opcode, 0 },
-+{"sc.d.rl",      "64A", "d,t,0(s)",  MATCH_SC_D | MASK_RL, MASK_SC_D | MASK_AQRL, match_opcode, 0 },
-+{"amoadd.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOADD_D | MASK_RL, MASK_AMOADD_D | MASK_AQRL, match_opcode, 0 },
-+{"amoswap.d.rl", "64A", "d,t,0(s)",  MATCH_AMOSWAP_D | MASK_RL, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, 0 },
-+{"amoand.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOAND_D | MASK_RL, MASK_AMOAND_D | MASK_AQRL, match_opcode, 0 },
-+{"amoor.d.rl",   "64A", "d,t,0(s)",  MATCH_AMOOR_D | MASK_RL, MASK_AMOOR_D | MASK_AQRL, match_opcode, 0 },
-+{"amoxor.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOXOR_D | MASK_RL, MASK_AMOXOR_D | MASK_AQRL, match_opcode, 0 },
-+{"amomax.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOMAX_D | MASK_RL, MASK_AMOMAX_D | MASK_AQRL, match_opcode, 0 },
-+{"amomaxu.d.rl", "64A", "d,t,0(s)",  MATCH_AMOMAXU_D | MASK_RL, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, 0 },
-+{"amomin.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOMIN_D | MASK_RL, MASK_AMOMIN_D | MASK_AQRL, match_opcode, 0 },
-+{"amominu.d.rl", "64A", "d,t,0(s)",  MATCH_AMOMINU_D | MASK_RL, MASK_AMOMINU_D | MASK_AQRL, match_opcode, 0 },
-+{"lr.d.sc",      "64A", "d,0(s)",    MATCH_LR_D | MASK_AQRL, MASK_LR_D | MASK_AQRL, match_opcode, 0 },
-+{"sc.d.sc",      "64A", "d,t,0(s)",  MATCH_SC_D | MASK_AQRL, MASK_SC_D | MASK_AQRL, match_opcode, 0 },
-+{"amoadd.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOADD_D | MASK_AQRL, MASK_AMOADD_D | MASK_AQRL, match_opcode, 0 },
-+{"amoswap.d.sc", "64A", "d,t,0(s)",  MATCH_AMOSWAP_D | MASK_AQRL, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, 0 },
-+{"amoand.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOAND_D | MASK_AQRL, MASK_AMOAND_D | MASK_AQRL, match_opcode, 0 },
-+{"amoor.d.sc",   "64A", "d,t,0(s)",  MATCH_AMOOR_D | MASK_AQRL, MASK_AMOOR_D | MASK_AQRL, match_opcode, 0 },
-+{"amoxor.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOXOR_D | MASK_AQRL, MASK_AMOXOR_D | MASK_AQRL, match_opcode, 0 },
-+{"amomax.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOMAX_D | MASK_AQRL, MASK_AMOMAX_D | MASK_AQRL, match_opcode, 0 },
-+{"amomaxu.d.sc", "64A", "d,t,0(s)",  MATCH_AMOMAXU_D | MASK_AQRL, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, 0 },
-+{"amomin.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOMIN_D | MASK_AQRL, MASK_AMOMIN_D | MASK_AQRL, match_opcode, 0 },
-+{"amominu.d.sc", "64A", "d,t,0(s)",  MATCH_AMOMINU_D | MASK_AQRL, MASK_AMOMINU_D | MASK_AQRL, match_opcode, 0 },
-+
-+/* Multiply/Divide instruction subset */
-+{"mul",       "M",   "d,s,t",  MATCH_MUL, MASK_MUL, match_opcode, 0 },
-+{"mulh",      "M",   "d,s,t",  MATCH_MULH, MASK_MULH, match_opcode, 0 },
-+{"mulhu",     "M",   "d,s,t",  MATCH_MULHU, MASK_MULHU, match_opcode, 0 },
-+{"mulhsu",    "M",   "d,s,t",  MATCH_MULHSU, MASK_MULHSU, match_opcode, 0 },
-+{"div",       "M",   "d,s,t",  MATCH_DIV, MASK_DIV, match_opcode, 0 },
-+{"divu",      "M",   "d,s,t",  MATCH_DIVU, MASK_DIVU, match_opcode, 0 },
-+{"rem",       "M",   "d,s,t",  MATCH_REM, MASK_REM, match_opcode, 0 },
-+{"remu",      "M",   "d,s,t",  MATCH_REMU, MASK_REMU, match_opcode, 0 },
-+{"mulw",      "64M", "d,s,t",  MATCH_MULW, MASK_MULW, match_opcode, 0 },
-+{"divw",      "64M", "d,s,t",  MATCH_DIVW, MASK_DIVW, match_opcode, 0 },
-+{"divuw",     "64M", "d,s,t",  MATCH_DIVUW, MASK_DIVUW, match_opcode, 0 },
-+{"remw",      "64M", "d,s,t",  MATCH_REMW, MASK_REMW, match_opcode, 0 },
-+{"remuw",     "64M", "d,s,t",  MATCH_REMUW, MASK_REMUW, match_opcode, 0 },
-+
-+/* Single-precision floating-point instruction subset */
-+{"frsr",      "F",   "d",  MATCH_FRCSR, MASK_FRCSR, match_opcode, 0 },
-+{"fssr",      "F",   "s",  MATCH_FSCSR, MASK_FSCSR | MASK_RD, match_opcode, 0 },
-+{"fssr",      "F",   "d,s",  MATCH_FSCSR, MASK_FSCSR, match_opcode, 0 },
-+{"frcsr",     "F",   "d",  MATCH_FRCSR, MASK_FRCSR, match_opcode, 0 },
-+{"fscsr",     "F",   "s",  MATCH_FSCSR, MASK_FSCSR | MASK_RD, match_opcode, 0 },
-+{"fscsr",     "F",   "d,s",  MATCH_FSCSR, MASK_FSCSR, match_opcode, 0 },
-+{"frrm",      "F",   "d",  MATCH_FRRM, MASK_FRRM, match_opcode, 0 },
-+{"fsrm",      "F",   "s",  MATCH_FSRM, MASK_FSRM | MASK_RD, match_opcode, 0 },
-+{"fsrm",      "F",   "d,s",  MATCH_FSRM, MASK_FSRM, match_opcode, 0 },
-+{"frflags",   "F",   "d",  MATCH_FRFLAGS, MASK_FRFLAGS, match_opcode, 0 },
-+{"fsflags",   "F",   "s",  MATCH_FSFLAGS, MASK_FSFLAGS | MASK_RD, match_opcode, 0 },
-+{"fsflags",   "F",   "d,s",  MATCH_FSFLAGS, MASK_FSFLAGS, match_opcode, 0 },
-+{"flw",       "32C", "D,Cm(Cc)",  MATCH_C_FLWSP, MASK_C_FLWSP, match_opcode, INSN_ALIAS },
-+{"flw",       "32C", "CD,Ck(Cs)",  MATCH_C_FLW, MASK_C_FLW, match_opcode, INSN_ALIAS },
-+{"flw",       "F",   "D,o(s)",  MATCH_FLW, MASK_FLW, match_opcode, 0 },
-+{"flw",       "F",   "D,A,s",  0, (int) M_FLW, match_never, INSN_MACRO },
-+{"fsw",       "32C", "CT,CM(Cc)",  MATCH_C_FSWSP, MASK_C_FSWSP, match_opcode, INSN_ALIAS },
-+{"fsw",       "32C", "CD,Ck(Cs)",  MATCH_C_FSW, MASK_C_FSW, match_opcode, INSN_ALIAS },
-+{"fsw",       "F",   "T,q(s)",  MATCH_FSW, MASK_FSW, match_opcode, 0 },
-+{"fsw",       "F",   "T,A,s",  0, (int) M_FSW, match_never, INSN_MACRO },
-+{"fmv.x.s",   "F",   "d,S",  MATCH_FMV_X_S, MASK_FMV_X_S, match_opcode, 0 },
-+{"fmv.s.x",   "F",   "D,s",  MATCH_FMV_S_X, MASK_FMV_S_X, match_opcode, 0 },
-+{"fmv.s",     "F",   "D,U",  MATCH_FSGNJ_S, MASK_FSGNJ_S, match_rs1_eq_rs2, INSN_ALIAS },
-+{"fneg.s",    "F",   "D,U",  MATCH_FSGNJN_S, MASK_FSGNJN_S, match_rs1_eq_rs2, INSN_ALIAS },
-+{"fabs.s",    "F",   "D,U",  MATCH_FSGNJX_S, MASK_FSGNJX_S, match_rs1_eq_rs2, INSN_ALIAS },
-+{"fsgnj.s",   "F",   "D,S,T",  MATCH_FSGNJ_S, MASK_FSGNJ_S, match_opcode, 0 },
-+{"fsgnjn.s",  "F",   "D,S,T",  MATCH_FSGNJN_S, MASK_FSGNJN_S, match_opcode, 0 },
-+{"fsgnjx.s",  "F",   "D,S,T",  MATCH_FSGNJX_S, MASK_FSGNJX_S, match_opcode, 0 },
-+{"fadd.s",    "F",   "D,S,T",  MATCH_FADD_S | MASK_RM, MASK_FADD_S | MASK_RM, match_opcode, 0 },
-+{"fadd.s",    "F",   "D,S,T,m",  MATCH_FADD_S, MASK_FADD_S, match_opcode, 0 },
-+{"fsub.s",    "F",   "D,S,T",  MATCH_FSUB_S | MASK_RM, MASK_FSUB_S | MASK_RM, match_opcode, 0 },
-+{"fsub.s",    "F",   "D,S,T,m",  MATCH_FSUB_S, MASK_FSUB_S, match_opcode, 0 },
-+{"fmul.s",    "F",   "D,S,T",  MATCH_FMUL_S | MASK_RM, MASK_FMUL_S | MASK_RM, match_opcode, 0 },
-+{"fmul.s",    "F",   "D,S,T,m",  MATCH_FMUL_S, MASK_FMUL_S, match_opcode, 0 },
-+{"fdiv.s",    "F",   "D,S,T",  MATCH_FDIV_S | MASK_RM, MASK_FDIV_S | MASK_RM, match_opcode, 0 },
-+{"fdiv.s",    "F",   "D,S,T,m",  MATCH_FDIV_S, MASK_FDIV_S, match_opcode, 0 },
-+{"fsqrt.s",   "F",   "D,S",  MATCH_FSQRT_S | MASK_RM, MASK_FSQRT_S | MASK_RM, match_opcode, 0 },
-+{"fsqrt.s",   "F",   "D,S,m",  MATCH_FSQRT_S, MASK_FSQRT_S, match_opcode, 0 },
-+{"fmin.s",    "F",   "D,S,T",  MATCH_FMIN_S, MASK_FMIN_S, match_opcode, 0 },
-+{"fmax.s",    "F",   "D,S,T",  MATCH_FMAX_S, MASK_FMAX_S, match_opcode, 0 },
-+{"fmadd.s",   "F",   "D,S,T,R",  MATCH_FMADD_S | MASK_RM, MASK_FMADD_S | MASK_RM, match_opcode, 0 },
-+{"fmadd.s",   "F",   "D,S,T,R,m",  MATCH_FMADD_S, MASK_FMADD_S, match_opcode, 0 },
-+{"fnmadd.s",  "F",   "D,S,T,R",  MATCH_FNMADD_S | MASK_RM, MASK_FNMADD_S | MASK_RM, match_opcode, 0 },
-+{"fnmadd.s",  "F",   "D,S,T,R,m",  MATCH_FNMADD_S, MASK_FNMADD_S, match_opcode, 0 },
-+{"fmsub.s",   "F",   "D,S,T,R",  MATCH_FMSUB_S | MASK_RM, MASK_FMSUB_S | MASK_RM, match_opcode, 0 },
-+{"fmsub.s",   "F",   "D,S,T,R,m",  MATCH_FMSUB_S, MASK_FMSUB_S, match_opcode, 0 },
-+{"fnmsub.s",  "F",   "D,S,T,R",  MATCH_FNMSUB_S | MASK_RM, MASK_FNMSUB_S | MASK_RM, match_opcode, 0 },
-+{"fnmsub.s",  "F",   "D,S,T,R,m",  MATCH_FNMSUB_S, MASK_FNMSUB_S, match_opcode, 0 },
-+{"fcvt.w.s",  "F",   "d,S",  MATCH_FCVT_W_S | MASK_RM, MASK_FCVT_W_S | MASK_RM, match_opcode, 0 },
-+{"fcvt.w.s",  "F",   "d,S,m",  MATCH_FCVT_W_S, MASK_FCVT_W_S, match_opcode, 0 },
-+{"fcvt.wu.s", "F",   "d,S",  MATCH_FCVT_WU_S | MASK_RM, MASK_FCVT_WU_S | MASK_RM, match_opcode, 0 },
-+{"fcvt.wu.s", "F",   "d,S,m",  MATCH_FCVT_WU_S, MASK_FCVT_WU_S, match_opcode, 0 },
-+{"fcvt.s.w",  "F",   "D,s",  MATCH_FCVT_S_W | MASK_RM, MASK_FCVT_S_W | MASK_RM, match_opcode, 0 },
-+{"fcvt.s.w",  "F",   "D,s,m",  MATCH_FCVT_S_W, MASK_FCVT_S_W, match_opcode, 0 },
-+{"fcvt.s.wu", "F",   "D,s",  MATCH_FCVT_S_WU | MASK_RM, MASK_FCVT_S_W | MASK_RM, match_opcode, 0 },
-+{"fcvt.s.wu", "F",   "D,s,m",  MATCH_FCVT_S_WU, MASK_FCVT_S_WU, match_opcode, 0 },
-+{"fclass.s",  "F",   "d,S",  MATCH_FCLASS_S, MASK_FCLASS_S, match_opcode, 0 },
-+{"feq.s",     "F",   "d,S,T",    MATCH_FEQ_S, MASK_FEQ_S, match_opcode, 0 },
-+{"flt.s",     "F",   "d,S,T",    MATCH_FLT_S, MASK_FLT_S, match_opcode, 0 },
-+{"fle.s",     "F",   "d,S,T",    MATCH_FLE_S, MASK_FLE_S, match_opcode, 0 },
-+{"fgt.s",     "F",   "d,T,S",    MATCH_FLT_S, MASK_FLT_S, match_opcode, 0 },
-+{"fge.s",     "F",   "d,T,S",    MATCH_FLE_S, MASK_FLE_S, match_opcode, 0 },
-+{"fcvt.l.s",  "64F", "d,S",  MATCH_FCVT_L_S | MASK_RM, MASK_FCVT_L_S | MASK_RM, match_opcode, 0 },
-+{"fcvt.l.s",  "64F", "d,S,m",  MATCH_FCVT_L_S, MASK_FCVT_L_S, match_opcode, 0 },
-+{"fcvt.lu.s", "64F", "d,S",  MATCH_FCVT_LU_S | MASK_RM, MASK_FCVT_LU_S | MASK_RM, match_opcode, 0 },
-+{"fcvt.lu.s", "64F", "d,S,m",  MATCH_FCVT_LU_S, MASK_FCVT_LU_S, match_opcode, 0 },
-+{"fcvt.s.l",  "64F", "D,s",  MATCH_FCVT_S_L | MASK_RM, MASK_FCVT_S_L | MASK_RM, match_opcode, 0 },
-+{"fcvt.s.l",  "64F", "D,s,m",  MATCH_FCVT_S_L, MASK_FCVT_S_L, match_opcode, 0 },
-+{"fcvt.s.lu", "64F", "D,s",  MATCH_FCVT_S_LU | MASK_RM, MASK_FCVT_S_L | MASK_RM, match_opcode, 0 },
-+{"fcvt.s.lu", "64F", "D,s,m",  MATCH_FCVT_S_LU, MASK_FCVT_S_LU, match_opcode, 0 },
-+
-+/* Double-precision floating-point instruction subset */
-+{"fld",       "C",   "D,Cn(Cc)",  MATCH_C_FLDSP, MASK_C_FLDSP, match_opcode, INSN_ALIAS },
-+{"fld",       "C",   "CD,Cl(Cs)",  MATCH_C_FLD, MASK_C_FLD, match_opcode, INSN_ALIAS },
-+{"fld",       "D",   "D,o(s)",  MATCH_FLD, MASK_FLD, match_opcode, 0 },
-+{"fld",       "D",   "D,A,s",  0, (int) M_FLD, match_never, INSN_MACRO },
-+{"fsd",       "C",   "CT,CN(Cc)",  MATCH_C_FSDSP, MASK_C_FSDSP, match_opcode, INSN_ALIAS },
-+{"fsd",       "C",   "CD,Cl(Cs)",  MATCH_C_FSD, MASK_C_FSD, match_opcode, INSN_ALIAS },
-+{"fsd",       "D",   "T,q(s)",  MATCH_FSD, MASK_FSD, match_opcode, 0 },
-+{"fsd",       "D",   "T,A,s",  0, (int) M_FSD, match_never, INSN_MACRO },
-+{"fmv.d",     "D",   "D,U",  MATCH_FSGNJ_D, MASK_FSGNJ_D, match_rs1_eq_rs2, INSN_ALIAS },
-+{"fneg.d",    "D",   "D,U",  MATCH_FSGNJN_D, MASK_FSGNJN_D, match_rs1_eq_rs2, INSN_ALIAS },
-+{"fabs.d",    "D",   "D,U",  MATCH_FSGNJX_D, MASK_FSGNJX_D, match_rs1_eq_rs2, INSN_ALIAS },
-+{"fsgnj.d",   "D",   "D,S,T",  MATCH_FSGNJ_D, MASK_FSGNJ_D, match_opcode, 0 },
-+{"fsgnjn.d",  "D",   "D,S,T",  MATCH_FSGNJN_D, MASK_FSGNJN_D, match_opcode, 0 },
-+{"fsgnjx.d",  "D",   "D,S,T",  MATCH_FSGNJX_D, MASK_FSGNJX_D, match_opcode, 0 },
-+{"fadd.d",    "D",   "D,S,T",  MATCH_FADD_D | MASK_RM, MASK_FADD_D | MASK_RM, match_opcode, 0 },
-+{"fadd.d",    "D",   "D,S,T,m",  MATCH_FADD_D, MASK_FADD_D, match_opcode, 0 },
-+{"fsub.d",    "D",   "D,S,T",  MATCH_FSUB_D | MASK_RM, MASK_FSUB_D | MASK_RM, match_opcode, 0 },
-+{"fsub.d",    "D",   "D,S,T,m",  MATCH_FSUB_D, MASK_FSUB_D, match_opcode, 0 },
-+{"fmul.d",    "D",   "D,S,T",  MATCH_FMUL_D | MASK_RM, MASK_FMUL_D | MASK_RM, match_opcode, 0 },
-+{"fmul.d",    "D",   "D,S,T,m",  MATCH_FMUL_D, MASK_FMUL_D, match_opcode, 0 },
-+{"fdiv.d",    "D",   "D,S,T",  MATCH_FDIV_D | MASK_RM, MASK_FDIV_D | MASK_RM, match_opcode, 0 },
-+{"fdiv.d",    "D",   "D,S,T,m",  MATCH_FDIV_D, MASK_FDIV_D, match_opcode, 0 },
-+{"fsqrt.d",   "D",   "D,S",  MATCH_FSQRT_D | MASK_RM, MASK_FSQRT_D | MASK_RM, match_opcode, 0 },
-+{"fsqrt.d",   "D",   "D,S,m",  MATCH_FSQRT_D, MASK_FSQRT_D, match_opcode, 0 },
-+{"fmin.d",    "D",   "D,S,T",  MATCH_FMIN_D, MASK_FMIN_D, match_opcode, 0 },
-+{"fmax.d",    "D",   "D,S,T",  MATCH_FMAX_D, MASK_FMAX_D, match_opcode, 0 },
-+{"fmadd.d",   "D",   "D,S,T,R",  MATCH_FMADD_D | MASK_RM, MASK_FMADD_D | MASK_RM, match_opcode, 0 },
-+{"fmadd.d",   "D",   "D,S,T,R,m",  MATCH_FMADD_D, MASK_FMADD_D, match_opcode, 0 },
-+{"fnmadd.d",  "D",   "D,S,T,R",  MATCH_FNMADD_D | MASK_RM, MASK_FNMADD_D | MASK_RM, match_opcode, 0 },
-+{"fnmadd.d",  "D",   "D,S,T,R,m",  MATCH_FNMADD_D, MASK_FNMADD_D, match_opcode, 0 },
-+{"fmsub.d",   "D",   "D,S,T,R",  MATCH_FMSUB_D | MASK_RM, MASK_FMSUB_D | MASK_RM, match_opcode, 0 },
-+{"fmsub.d",   "D",   "D,S,T,R,m",  MATCH_FMSUB_D, MASK_FMSUB_D, match_opcode, 0 },
-+{"fnmsub.d",  "D",   "D,S,T,R",  MATCH_FNMSUB_D | MASK_RM, MASK_FNMSUB_D | MASK_RM, match_opcode, 0 },
-+{"fnmsub.d",  "D",   "D,S,T,R,m",  MATCH_FNMSUB_D, MASK_FNMSUB_D, match_opcode, 0 },
-+{"fcvt.w.d",  "D",   "d,S",  MATCH_FCVT_W_D | MASK_RM, MASK_FCVT_W_D | MASK_RM, match_opcode, 0 },
-+{"fcvt.w.d",  "D",   "d,S,m",  MATCH_FCVT_W_D, MASK_FCVT_W_D, match_opcode, 0 },
-+{"fcvt.wu.d", "D",   "d,S",  MATCH_FCVT_WU_D | MASK_RM, MASK_FCVT_WU_D | MASK_RM, match_opcode, 0 },
-+{"fcvt.wu.d", "D",   "d,S,m",  MATCH_FCVT_WU_D, MASK_FCVT_WU_D, match_opcode, 0 },
-+{"fcvt.d.w",  "D",   "D,s",  MATCH_FCVT_D_W, MASK_FCVT_D_W | MASK_RM, match_opcode, 0 },
-+{"fcvt.d.wu", "D",   "D,s",  MATCH_FCVT_D_WU, MASK_FCVT_D_WU | MASK_RM, match_opcode, 0 },
-+{"fcvt.d.s",  "D",   "D,S",  MATCH_FCVT_D_S, MASK_FCVT_D_S | MASK_RM, match_opcode, 0 },
-+{"fcvt.s.d",  "D",   "D,S",  MATCH_FCVT_S_D | MASK_RM, MASK_FCVT_S_D | MASK_RM, match_opcode, 0 },
-+{"fcvt.s.d",  "D",   "D,S,m",  MATCH_FCVT_S_D, MASK_FCVT_S_D, match_opcode, 0 },
-+{"fclass.d",  "D",   "d,S",  MATCH_FCLASS_D, MASK_FCLASS_D, match_opcode, 0 },
-+{"feq.d",     "D",   "d,S,T",    MATCH_FEQ_D, MASK_FEQ_D, match_opcode, 0 },
-+{"flt.d",     "D",   "d,S,T",    MATCH_FLT_D, MASK_FLT_D, match_opcode, 0 },
-+{"fle.d",     "D",   "d,S,T",    MATCH_FLE_D, MASK_FLE_D, match_opcode, 0 },
-+{"fgt.d",     "D",   "d,T,S",    MATCH_FLT_D, MASK_FLT_D, match_opcode, 0 },
-+{"fge.d",     "D",   "d,T,S",    MATCH_FLE_D, MASK_FLE_D, match_opcode, 0 },
-+{"fmv.x.d",   "64D", "d,S",  MATCH_FMV_X_D, MASK_FMV_X_D, match_opcode, 0 },
-+{"fmv.d.x",   "64D", "D,s",  MATCH_FMV_D_X, MASK_FMV_D_X, match_opcode, 0 },
-+{"fcvt.l.d",  "64D", "d,S",  MATCH_FCVT_L_D | MASK_RM, MASK_FCVT_L_D | MASK_RM, match_opcode, 0 },
-+{"fcvt.l.d",  "64D", "d,S,m",  MATCH_FCVT_L_D, MASK_FCVT_L_D, match_opcode, 0 },
-+{"fcvt.lu.d", "64D", "d,S",  MATCH_FCVT_LU_D | MASK_RM, MASK_FCVT_LU_D | MASK_RM, match_opcode, 0 },
-+{"fcvt.lu.d", "64D", "d,S,m",  MATCH_FCVT_LU_D, MASK_FCVT_LU_D, match_opcode, 0 },
-+{"fcvt.d.l",  "64D", "D,s",  MATCH_FCVT_D_L | MASK_RM, MASK_FCVT_D_L | MASK_RM, match_opcode, 0 },
-+{"fcvt.d.l",  "64D", "D,s,m",  MATCH_FCVT_D_L, MASK_FCVT_D_L, match_opcode, 0 },
-+{"fcvt.d.lu", "64D", "D,s",  MATCH_FCVT_D_LU | MASK_RM, MASK_FCVT_D_L | MASK_RM, match_opcode, 0 },
-+{"fcvt.d.lu", "64D", "D,s,m",  MATCH_FCVT_D_LU, MASK_FCVT_D_LU, match_opcode, 0 },
-+
-+/* Compressed instructions */
-+{"c.ebreak",  "C",   "",  MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, 0 },
-+{"c.jr",      "C",   "d",  MATCH_C_JR, MASK_C_JR, match_rd_nonzero, 0 },
-+{"c.jalr",    "C",   "d",  MATCH_C_JALR, MASK_C_JALR, match_rd_nonzero, 0 },
-+{"c.j",       "C",   "Ca",  MATCH_C_J, MASK_C_J, match_opcode, 0 },
-+{"c.jal",     "32C", "Ca",  MATCH_C_JAL, MASK_C_JAL, match_opcode, 0 },
-+{"c.beqz",    "C",   "Cs,Cp",  MATCH_C_BEQZ, MASK_C_BEQZ, match_opcode, 0 },
-+{"c.bnez",    "C",   "Cs,Cp",  MATCH_C_BNEZ, MASK_C_BNEZ, match_opcode, 0 },
-+{"c.lwsp",    "C",   "d,Cm(Cc)",  MATCH_C_LWSP, MASK_C_LWSP, match_rd_nonzero, 0 },
-+{"c.lw",      "C",   "Ct,Ck(Cs)",  MATCH_C_LW, MASK_C_LW, match_opcode, 0 },
-+{"c.swsp",    "C",   "CV,CM(Cc)",  MATCH_C_SWSP, MASK_C_SWSP, match_opcode, 0 },
-+{"c.sw",      "C",   "Ct,Ck(Cs)",  MATCH_C_SW, MASK_C_SW, match_opcode, 0 },
-+{"c.nop",     "C",   "",  MATCH_C_ADDI, 0xffff, match_opcode, 0 },
-+{"c.mv",      "C",   "d,CV",  MATCH_C_MV, MASK_C_MV, match_c_add, 0 },
-+{"c.lui",     "C",   "d,Cu",  MATCH_C_LUI, MASK_C_LUI, match_c_lui, 0 },
-+{"c.li",      "C",   "d,Cj",  MATCH_C_LI, MASK_C_LI, match_rd_nonzero, 0 },
-+{"c.addi4spn","C",   "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_opcode, 0 },
-+{"c.addi16sp","C",   "Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_opcode, 0 },
-+{"c.addi",    "C",   "d,Cj",  MATCH_C_ADDI, MASK_C_ADDI, match_rd_nonzero, 0 },
-+{"c.add",     "C",   "d,CV",  MATCH_C_ADD, MASK_C_ADD, match_c_add, 0 },
-+{"c.sub",     "C",   "Cs,Ct",  MATCH_C_SUB, MASK_C_SUB, match_opcode, 0 },
-+{"c.and",     "C",   "Cs,Ct",  MATCH_C_AND, MASK_C_AND, match_opcode, 0 },
-+{"c.or",      "C",   "Cs,Ct",  MATCH_C_OR, MASK_C_OR, match_opcode, 0 },
-+{"c.xor",     "C",   "Cs,Ct",  MATCH_C_XOR, MASK_C_XOR, match_opcode, 0 },
-+{"c.slli",    "C",   "d,C>",  MATCH_C_SLLI, MASK_C_SLLI, match_rd_nonzero, 0 },
-+{"c.srli",    "C",   "Cs,C>",  MATCH_C_SRLI, MASK_C_SRLI, match_opcode, 0 },
-+{"c.srai",    "C",   "Cs,C>",  MATCH_C_SRAI, MASK_C_SRAI, match_opcode, 0 },
-+{"c.andi",    "C",   "Cs,Cj",  MATCH_C_ANDI, MASK_C_ANDI, match_opcode, 0 },
-+{"c.addiw",   "64C", "d,Cj",  MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, 0 },
-+{"c.addw",    "64C", "Cs,Ct",  MATCH_C_ADDW, MASK_C_ADDW, match_opcode, 0 },
-+{"c.subw",    "64C", "Cs,Ct",  MATCH_C_SUBW, MASK_C_SUBW, match_opcode, 0 },
-+{"c.ldsp",    "64C", "d,Cn(Cc)",  MATCH_C_LDSP, MASK_C_LDSP, match_rd_nonzero, 0 },
-+{"c.ld",      "64C", "Ct,Cl(Cs)",  MATCH_C_LD, MASK_C_LD, match_opcode, 0 },
-+{"c.sdsp",    "64C", "CV,CN(Cc)",  MATCH_C_SDSP, MASK_C_SDSP, match_opcode, 0 },
-+{"c.sd",      "64C", "Ct,Cl(Cs)",  MATCH_C_SD, MASK_C_SD, match_opcode, 0 },
-+{"c.fldsp",   "C",   "D,Cn(Cc)",  MATCH_C_FLDSP, MASK_C_FLDSP, match_opcode, 0 },
-+{"c.fld",     "C",   "CD,Cl(Cs)",  MATCH_C_FLD, MASK_C_FLD, match_opcode, 0 },
-+{"c.fsdsp",   "C",   "CT,CN(Cc)",  MATCH_C_FSDSP, MASK_C_FSDSP, match_opcode, 0 },
-+{"c.fsd",     "C",   "CD,Cl(Cs)",  MATCH_C_FSD, MASK_C_FSD, match_opcode, 0 },
-+{"c.flwsp",   "32C", "D,Cm(Cc)",  MATCH_C_FLWSP, MASK_C_FLWSP, match_opcode, 0 },
-+{"c.flw",     "32C", "CD,Ck(Cs)",  MATCH_C_FLW, MASK_C_FLW, match_opcode, 0 },
-+{"c.fswsp",   "32C", "CT,CM(Cc)",  MATCH_C_FSWSP, MASK_C_FSWSP, match_opcode, 0 },
-+{"c.fsw",     "32C", "CD,Ck(Cs)",  MATCH_C_FSW, MASK_C_FSW, match_opcode, 0 },
-+
-+/* Supervisor instructions */
-+{"csrr",      "I",   "d,E",  MATCH_CSRRS, MASK_CSRRS | MASK_RS1, match_opcode, 0 },
-+{"csrwi",     "I",   "E,Z",  MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, 0 },
-+{"csrw",      "I",   "E,s",  MATCH_CSRRW, MASK_CSRRW | MASK_RD, match_opcode, 0 },
-+{"csrw",      "I",   "E,Z",  MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, 0 },
-+{"csrsi",     "I",   "E,Z",  MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, 0 },
-+{"csrs",      "I",   "E,s",  MATCH_CSRRS, MASK_CSRRS | MASK_RD, match_opcode, 0 },
-+{"csrs",      "I",   "E,Z",  MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, 0 },
-+{"csrci",     "I",   "E,Z",  MATCH_CSRRCI, MASK_CSRRCI | MASK_RD, match_opcode, 0 },
-+{"csrc",      "I",   "E,s",  MATCH_CSRRC, MASK_CSRRC | MASK_RD, match_opcode, 0 },
-+{"csrc",      "I",   "E,Z",  MATCH_CSRRCI, MASK_CSRRCI | MASK_RD, match_opcode, 0 },
-+{"csrrw",     "I",   "d,E,s",  MATCH_CSRRW, MASK_CSRRW, match_opcode, 0 },
-+{"csrrw",     "I",   "d,E,Z",  MATCH_CSRRWI, MASK_CSRRWI, match_opcode, 0 },
-+{"csrrs",     "I",   "d,E,s",  MATCH_CSRRS, MASK_CSRRS, match_opcode, 0 },
-+{"csrrs",     "I",   "d,E,Z",  MATCH_CSRRSI, MASK_CSRRSI, match_opcode, 0 },
-+{"csrrc",     "I",   "d,E,s",  MATCH_CSRRC, MASK_CSRRC, match_opcode, 0 },
-+{"csrrc",     "I",   "d,E,Z",  MATCH_CSRRCI, MASK_CSRRCI, match_opcode, 0 },
-+{"csrrwi",    "I",   "d,E,Z",  MATCH_CSRRWI, MASK_CSRRWI, match_opcode, 0 },
-+{"csrrsi",    "I",   "d,E,Z",  MATCH_CSRRSI, MASK_CSRRSI, match_opcode, 0 },
-+{"csrrci",    "I",   "d,E,Z",  MATCH_CSRRCI, MASK_CSRRCI, match_opcode, 0 },
-+{"eret",      "I",   "",     MATCH_SRET, MASK_SRET, match_opcode, 0 },
-+{"sret",      "I",   "",     MATCH_SRET, MASK_SRET, match_opcode, 0 },
-+{"sfence.vm", "I",   "",     MATCH_SFENCE_VM, MASK_SFENCE_VM | MASK_RS1, match_opcode, 0 },
-+{"sfence.vm", "I",   "s",    MATCH_SFENCE_VM, MASK_SFENCE_VM, match_opcode, 0 },
-+{"wfi",       "I",   "",     MATCH_WFI, MASK_WFI, match_opcode, 0 },
-+
-+/* Rocket Custom Coprocessor extension */
-+{"custom0",   "Xcustom", "d,s,t,^j", MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2, match_opcode, 0},
-+{"custom0",   "Xcustom", "d,s,^t,^j", MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1, match_opcode, 0},
-+{"custom0",   "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD, match_opcode, 0},
-+{"custom0",   "Xcustom", "^d,s,t,^j", MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2, match_opcode, 0},
-+{"custom0",   "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1, match_opcode, 0},
-+{"custom0",   "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM0, MASK_CUSTOM0, match_opcode, 0},
-+{"custom1",   "Xcustom", "d,s,t,^j", MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2, match_opcode, 0},
-+{"custom1",   "Xcustom", "d,s,^t,^j", MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1, match_opcode, 0},
-+{"custom1",   "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD, match_opcode, 0},
-+{"custom1",   "Xcustom", "^d,s,t,^j", MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2, match_opcode, 0},
-+{"custom1",   "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1, match_opcode, 0},
-+{"custom1",   "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM1, MASK_CUSTOM1, match_opcode, 0},
-+{"custom2",   "Xcustom", "d,s,t,^j", MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2, match_opcode, 0},
-+{"custom2",   "Xcustom", "d,s,^t,^j", MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1, match_opcode, 0},
-+{"custom2",   "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD, match_opcode, 0},
-+{"custom2",   "Xcustom", "^d,s,t,^j", MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2, match_opcode, 0},
-+{"custom2",   "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1, match_opcode, 0},
-+{"custom2",   "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM2, MASK_CUSTOM2, match_opcode, 0},
-+{"custom3",   "Xcustom", "d,s,t,^j", MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2, match_opcode, 0},
-+{"custom3",   "Xcustom", "d,s,^t,^j", MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1, match_opcode, 0},
-+{"custom3",   "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD, match_opcode, 0},
-+{"custom3",   "Xcustom", "^d,s,t,^j", MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2, match_opcode, 0},
-+{"custom3",   "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1, match_opcode, 0},
-+{"custom3",   "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM3, MASK_CUSTOM3, match_opcode, 0},
-+};
-+
-+#define RISCV_NUM_OPCODES \
-+  ((sizeof riscv_builtin_opcodes) / (sizeof (riscv_builtin_opcodes[0])))
-+const int bfd_riscv_num_builtin_opcodes = RISCV_NUM_OPCODES;
-+
-+/* Removed const from the following to allow for dynamic extensions to the
-+   built-in instruction set.  */
-+struct riscv_opcode *riscv_opcodes =
-+  (struct riscv_opcode *) riscv_builtin_opcodes;
-+int bfd_riscv_num_opcodes = RISCV_NUM_OPCODES;
-+#undef RISCV_NUM_OPCODES
---- original-binutils/bfd/archures.c
-+++ binutils-2.26.1/bfd/archures.c
-@@ -612,6 +612,7 @@ extern const bfd_arch_info_type bfd_pj_a
- extern const bfd_arch_info_type bfd_plugin_arch;
- extern const bfd_arch_info_type bfd_powerpc_archs[];
- #define bfd_powerpc_arch bfd_powerpc_archs[0]
-+extern const bfd_arch_info_type bfd_riscv_arch;
- extern const bfd_arch_info_type bfd_rs6000_arch;
- extern const bfd_arch_info_type bfd_rl78_arch;
- extern const bfd_arch_info_type bfd_rx_arch;
-@@ -701,6 +702,7 @@ static const bfd_arch_info_type * const
-     &bfd_or1k_arch,
-     &bfd_pdp11_arch,
-     &bfd_powerpc_arch,
-+    &bfd_riscv_arch,
-     &bfd_rs6000_arch,
-     &bfd_rl78_arch,
-     &bfd_rx_arch,
---- original-binutils/bfd/bfd-in2.h
-+++ binutils-2.26.1/bfd/bfd-in2.h
-@@ -2073,6 +2073,9 @@ enum bfd_architecture
- #define bfd_mach_ppc_e6500     5007
- #define bfd_mach_ppc_titan     83
- #define bfd_mach_ppc_vle       84
-+  bfd_arch_riscv,     /* RISC-V */
-+#define bfd_mach_riscv32       132
-+#define bfd_mach_riscv64       164
-   bfd_arch_rs6000,    /* IBM RS/6000 */
- #define bfd_mach_rs6k          6000
- #define bfd_mach_rs6k_rs1      6001
-@@ -5652,6 +5655,46 @@ relative offset from _GLOBAL_OFFSET_TABL
- value in a word.  The relocation is relative offset from  */
-   BFD_RELOC_MICROBLAZE_32_GOTOFF,
- 
-+/* RISC-V relocations.  */
-+  BFD_RELOC_RISCV_HI20,
-+  BFD_RELOC_RISCV_PCREL_HI20,
-+  BFD_RELOC_RISCV_PCREL_LO12_I,
-+  BFD_RELOC_RISCV_PCREL_LO12_S,
-+  BFD_RELOC_RISCV_LO12_I,
-+  BFD_RELOC_RISCV_LO12_S,
-+  BFD_RELOC_RISCV_GPREL12_I,
-+  BFD_RELOC_RISCV_GPREL12_S,
-+  BFD_RELOC_RISCV_TPREL_HI20,
-+  BFD_RELOC_RISCV_TPREL_LO12_I,
-+  BFD_RELOC_RISCV_TPREL_LO12_S,
-+  BFD_RELOC_RISCV_TPREL_ADD,
-+  BFD_RELOC_RISCV_CALL,
-+  BFD_RELOC_RISCV_CALL_PLT,
-+  BFD_RELOC_RISCV_ADD8,
-+  BFD_RELOC_RISCV_ADD16,
-+  BFD_RELOC_RISCV_ADD32,
-+  BFD_RELOC_RISCV_ADD64,
-+  BFD_RELOC_RISCV_SUB8,
-+  BFD_RELOC_RISCV_SUB16,
-+  BFD_RELOC_RISCV_SUB32,
-+  BFD_RELOC_RISCV_SUB64,
-+  BFD_RELOC_RISCV_GOT_HI20,
-+  BFD_RELOC_RISCV_TLS_GOT_HI20,
-+  BFD_RELOC_RISCV_TLS_GD_HI20,
-+  BFD_RELOC_RISCV_JMP,
-+  BFD_RELOC_RISCV_TLS_DTPMOD32,
-+  BFD_RELOC_RISCV_TLS_DTPREL32,
-+  BFD_RELOC_RISCV_TLS_DTPMOD64,
-+  BFD_RELOC_RISCV_TLS_DTPREL64,
-+  BFD_RELOC_RISCV_TLS_TPREL32,
-+  BFD_RELOC_RISCV_TLS_TPREL64,
-+  BFD_RELOC_RISCV_ALIGN,
-+  BFD_RELOC_RISCV_RVC_BRANCH,
-+  BFD_RELOC_RISCV_RVC_JUMP,
-+  BFD_RELOC_RISCV_RVC_LUI,
-+  BFD_RELOC_RISCV_GPREL_I,
-+  BFD_RELOC_RISCV_GPREL_S,
-+
- /* This is used to tell the dynamic linker to copy the value out of
- the dynamic object into the runtime process image.  */
-   BFD_RELOC_MICROBLAZE_COPY,
---- original-binutils/bfd/config.bfd
-+++ binutils-2.26.1/bfd/config.bfd
-@@ -120,6 +120,7 @@ or1k*|or1knd*)	 targ_archs=bfd_or1k_arch
- pdp11*)		 targ_archs=bfd_pdp11_arch ;;
- pj*)		 targ_archs="bfd_pj_arch bfd_i386_arch";;
- powerpc*)	 targ_archs="bfd_rs6000_arch bfd_powerpc_arch" ;;
-+riscv*)		 targ_archs=bfd_riscv_arch ;;
- rs6000)		 targ_archs="bfd_rs6000_arch bfd_powerpc_arch" ;;
- s390*)		 targ_archs=bfd_s390_arch ;;
- sh*)		 targ_archs=bfd_sh_arch ;;
-@@ -1344,6 +1345,18 @@ case "${targ}" in
-     targ_defvec=rl78_elf32_vec
-     ;;
- 
-+  riscv32-*-*)
-+    targ_defvec=riscv_elf32_vec
-+    targ_selvecs="riscv_elf32_vec"
-+    want64=true
-+    ;;
-+
-+  riscv64-*-*)
-+    targ_defvec=riscv_elf64_vec
-+    targ_selvecs="riscv_elf32_vec riscv_elf64_vec"
-+    want64=true
-+    ;;
-+
-   rx-*-elf)
-     targ_defvec=rx_elf32_le_vec
-     targ_selvecs="rx_elf32_be_vec rx_elf32_le_vec rx_elf32_be_ns_vec"
---- original-binutils/bfd/configure
-+++ binutils-2.26.1/bfd/configure
-@@ -15472,6 +15472,8 @@ do
-     powerpc_pei_vec)		 tb="$tb pei-ppc.lo peigen.lo $coff" ;;
-     powerpc_pei_le_vec)		 tb="$tb pei-ppc.lo peigen.lo $coff" ;;
-     powerpc_xcoff_vec)		 tb="$tb coff-rs6000.lo $xcoff" ;;
-+    riscv_elf32_vec)		 tb="$tb elf32-riscv.lo elfxx-riscv.lo elf32.lo $elf" ;;
-+    riscv_elf64_vec)		 tb="$tb elf64-riscv.lo elf64.lo elfxx-riscv.lo elf32.lo $elf"; target_size=64 ;;
-     rl78_elf32_vec)		 tb="$tb elf32-rl78.lo elf32.lo $elf" ;;
-     rs6000_xcoff64_vec)		 tb="$tb coff64-rs6000.lo aix5ppc-core.lo $xcoff"; target_size=64 ;;
-     rs6000_xcoff64_aix_vec)	 tb="$tb coff64-rs6000.lo aix5ppc-core.lo $xcoff"; target_size=64 ;;
---- original-binutils/bfd/configure.ac
-+++ binutils-2.26.1/bfd/configure.ac
-@@ -918,6 +918,8 @@ do
-     powerpc_pei_vec)		 tb="$tb pei-ppc.lo peigen.lo $coff" ;;
-     powerpc_pei_le_vec)		 tb="$tb pei-ppc.lo peigen.lo $coff" ;;
-     powerpc_xcoff_vec)		 tb="$tb coff-rs6000.lo $xcoff" ;;
-+    riscv_elf32_vec)		 tb="$tb elf32-riscv.lo elfxx-riscv.lo elf32.lo $elf" ;;
-+    riscv_elf64_vec)		 tb="$tb elf64-riscv.lo elf64.lo elfxx-riscv.lo elf32.lo $elf"; target_size=64 ;;
-     rl78_elf32_vec)		 tb="$tb elf32-rl78.lo elf32.lo $elf" ;;
-     rs6000_xcoff64_vec)		 tb="$tb coff64-rs6000.lo aix5ppc-core.lo $xcoff"; target_size=64 ;;
-     rs6000_xcoff64_aix_vec)	 tb="$tb coff64-rs6000.lo aix5ppc-core.lo $xcoff"; target_size=64 ;;
---- original-binutils/bfd/elf-bfd.h
-+++ binutils-2.26.1/bfd/elf-bfd.h
-@@ -475,6 +475,7 @@ enum elf_target_id
-   XGATE_ELF_DATA,
-   TILEGX_ELF_DATA,
-   TILEPRO_ELF_DATA,
-+  RISCV_ELF_DATA,
-   GENERIC_ELF_DATA
- };
- 
---- original-binutils/bfd/Makefile.am
-+++ binutils-2.26.1/bfd/Makefile.am
-@@ -949,6 +949,18 @@ elf64-ia64.c : elfnn-ia64.c
- 	$(SED) -e s/NN/64/g < $(srcdir)/elfnn-ia64.c > elf64-ia64.new
- 	mv -f elf64-ia64.new elf64-ia64.c
- 
-+elf32-riscv.c : elfnn-riscv.c
-+	rm -f elf32-riscv.c
-+	echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf32-riscv.new
-+	sed -e s/NN/32/g < $(srcdir)/elfnn-riscv.c >> elf32-riscv.new
-+	mv -f elf32-riscv.new elf32-riscv.c
-+
-+elf64-riscv.c : elfnn-riscv.c
-+	rm -f elf64-riscv.c
-+	echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf64-riscv.new
-+	sed -e s/NN/64/g < $(srcdir)/elfnn-riscv.c >> elf64-riscv.new
-+	mv -f elf64-riscv.new elf64-riscv.c
-+
- peigen.c : peXXigen.c
- 	rm -f peigen.c
- 	$(SED) -e s/XX/pe/g < $(srcdir)/peXXigen.c > peigen.new
---- original-binutils/bfd/Makefile.in
-+++ binutils-2.26.1/bfd/Makefile.in
-@@ -450,6 +450,7 @@ ALL_MACHINES = \
- 	cpu-pj.lo \
- 	cpu-plugin.lo \
- 	cpu-powerpc.lo \
-+	cpu-riscv.lo \
- 	cpu-rs6000.lo \
- 	cpu-rl78.lo \
- 	cpu-rx.lo \
-@@ -537,6 +538,7 @@ ALL_MACHINES_CFILES = \
- 	cpu-pj.c \
- 	cpu-plugin.c \
- 	cpu-powerpc.c \
-+	cpu-riscv.c \
- 	cpu-rs6000.c \
- 	cpu-rl78.c \
- 	cpu-rx.c \
-@@ -2035,6 +2037,18 @@ elf64-ia64.c : elfnn-ia64.c
- 	$(SED) -e s/NN/64/g < $(srcdir)/elfnn-ia64.c > elf64-ia64.new
- 	mv -f elf64-ia64.new elf64-ia64.c
- 
-+elf32-riscv.c : elfnn-riscv.c
-+	rm -f elf32-riscv.c
-+	echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf32-riscv.new
-+	sed -e s/NN/32/g < $(srcdir)/elfnn-riscv.c >> elf32-riscv.new
-+	mv -f elf32-riscv.new elf32-riscv.c
-+
-+elf64-riscv.c : elfnn-riscv.c
-+	rm -f elf64-riscv.c
-+	echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf64-riscv.new
-+	sed -e s/NN/64/g < $(srcdir)/elfnn-riscv.c >> elf64-riscv.new
-+	mv -f elf64-riscv.new elf64-riscv.c
-+
- peigen.c : peXXigen.c
- 	rm -f peigen.c
- 	$(SED) -e s/XX/pe/g < $(srcdir)/peXXigen.c > peigen.new
---- original-binutils/bfd/targets.c
-+++ binutils-2.26.1/bfd/targets.c
-@@ -793,6 +793,8 @@ extern const bfd_target powerpc_pe_le_ve
- extern const bfd_target powerpc_pei_vec;
- extern const bfd_target powerpc_pei_le_vec;
- extern const bfd_target powerpc_xcoff_vec;
-+extern const bfd_target riscv_elf32_vec;
-+extern const bfd_target riscv_elf64_vec;
- extern const bfd_target rl78_elf32_vec;
- extern const bfd_target rs6000_xcoff64_vec;
- extern const bfd_target rs6000_xcoff64_aix_vec;
---- original-binutils/binutils/readelf.c
-+++ binutils-2.26.1/binutils/readelf.c
-@@ -124,6 +124,7 @@
- #include "elf/metag.h"
- #include "elf/microblaze.h"
- #include "elf/mips.h"
-+#include "elf/riscv.h"
- #include "elf/mmix.h"
- #include "elf/mn10200.h"
- #include "elf/mn10300.h"
-@@ -771,6 +772,7 @@ guess_is_rela (unsigned int e_machine)
-     case EM_OR1K:
-     case EM_PPC64:
-     case EM_PPC:
-+    case EM_RISCV:
-     case EM_RL78:
-     case EM_RX:
-     case EM_S390:
-@@ -1309,6 +1311,10 @@ dump_relocations (FILE * file,
- 	  rtype = elf_mips_reloc_type (type);
- 	  break;
- 
-+	case EM_RISCV:
-+	  rtype = elf_riscv_reloc_type (type);
-+	  break;
-+
- 	case EM_ALPHA:
- 	  rtype = elf_alpha_reloc_type (type);
- 	  break;
-@@ -2250,6 +2256,7 @@ get_machine_name (unsigned e_machine)
-     case EM_CR16:
-     case EM_MICROBLAZE:
-     case EM_MICROBLAZE_OLD:	return "Xilinx MicroBlaze";
-+    case EM_RISCV:		return "RISC-V";
-     case EM_RL78:		return "Renesas RL78";
-     case EM_RX:			return "Renesas RX";
-     case EM_METAG:		return "Imagination Technologies Meta processor architecture";
-@@ -3193,6 +3200,13 @@ get_machine_flags (unsigned e_flags, uns
- 	  decode_NDS32_machine_flags (e_flags, buf, sizeof buf);
- 	  break;
- 
-+	case EM_RISCV:
-+	  if (e_flags & EF_RISCV_RVC)
-+	    strcat (buf, ", RVC");
-+	  if (e_flags & EF_RISCV_SOFT_FLOAT)
-+	    strcat (buf, ", soft-float ABI");
-+	  break;
-+
- 	case EM_SH:
- 	  switch ((e_flags & EF_SH_MACH_MASK))
- 	    {
-@@ -11430,6 +11444,8 @@ is_32bit_abs_reloc (unsigned int reloc_t
-       return reloc_type == 1; /* R_PPC64_ADDR32.  */
-     case EM_PPC:
-       return reloc_type == 1; /* R_PPC_ADDR32.  */
-+    case EM_RISCV:
-+      return reloc_type == 1; /* R_RISCV_32.  */
-     case EM_RL78:
-       return reloc_type == 1; /* R_RL78_DIR32.  */
-     case EM_RX:
-@@ -11576,6 +11592,8 @@ is_64bit_abs_reloc (unsigned int reloc_t
-       return reloc_type == 80; /* R_PARISC_DIR64.  */
-     case EM_PPC64:
-       return reloc_type == 38; /* R_PPC64_ADDR64.  */
-+    case EM_RISCV:
-+      return reloc_type == 2; /* R_RISCV_64.  */
-     case EM_SPARC32PLUS:
-     case EM_SPARCV9:
-     case EM_SPARC:
-@@ -11730,6 +11748,7 @@ is_none_reloc (unsigned int reloc_type)
-     case EM_ADAPTEVA_EPIPHANY:
-     case EM_PPC:     /* R_PPC_NONE.  */
-     case EM_PPC64:   /* R_PPC64_NONE.  */
-+    case EM_RISCV:   /* R_RISCV_NONE.  */
-     case EM_ARC:     /* R_ARC_NONE.  */
-     case EM_ARC_COMPACT: /* R_ARC_NONE.  */
-     case EM_ARC_COMPACT2: /* R_ARC_NONE.  */
---- original-binutils/gas/configure
-+++ binutils-2.26.1/gas/configure
-@@ -12418,7 +12418,7 @@ $as_echo "#define NDS32_DEFAULT_AUDIO_EX
- $as_echo "$enable_audio_ext" >&6; }
- 	;;
- 
--      i386 | s390 | sparc)
-+      i386 | riscv | s390 | sparc)
- 	if test $this_target = $target ; then
- 
- cat >>confdefs.h <<_ACEOF
---- original-binutils/gas/configure.ac
-+++ binutils-2.26.1/gas/configure.ac
-@@ -466,7 +466,7 @@ changequote([,])dnl
- 	AC_MSG_RESULT($enable_audio_ext)
- 	;;
- 
--      i386 | s390 | sparc)
-+      i386 | riscv | s390 | sparc)
- 	if test $this_target = $target ; then
- 	  AC_DEFINE_UNQUOTED(DEFAULT_ARCH, "${arch}", [Default architecture.])
- 	fi
---- original-binutils/gas/configure.tgt
-+++ binutils-2.26.1/gas/configure.tgt
-@@ -87,6 +87,8 @@ case ${cpu} in
-   pj*)			cpu_type=pj endian=big ;;
-   powerpc*le*)		cpu_type=ppc endian=little ;;
-   powerpc*)		cpu_type=ppc endian=big ;;
-+  riscv32*)		cpu_type=riscv endian=little arch=riscv32 ;;
-+  riscv64*)		cpu_type=riscv endian=little arch=riscv64 ;;
-   rs6000*)		cpu_type=ppc ;;
-   rl78*)		cpu_type=rl78 ;;
-   rx)			cpu_type=rx ;;
-@@ -391,6 +393,8 @@ case ${generic_target} in
-   ppc-*-kaos*)				fmt=elf ;;
-   ppc-*-lynxos*)			fmt=elf em=lynx ;;
- 
-+  riscv*-*-*)			fmt=elf endian=little em=linux ;;
-+
-   s390-*-linux-*)			fmt=elf em=linux ;;
-   s390-*-tpf*)				fmt=elf ;;
- 
-@@ -488,7 +492,7 @@ case ${generic_target} in
- esac
- 
- case ${cpu_type} in
--  aarch64 | alpha | arm | i386 | ia64 | microblaze | mips | ns32k | or1k | or1knd | pdp11 | ppc | sparc | z80 | z8k)
-+  aarch64 | alpha | arm | i386 | ia64 | microblaze | mips | ns32k | or1k | or1knd | pdp11 | ppc | riscv | sparc | z80 | z8k)
-     bfd_gas=yes
-     ;;
- esac
---- original-binutils/gas/Makefile.am
-+++ binutils-2.26.1/gas/Makefile.am
-@@ -177,6 +177,7 @@ TARGET_CPU_CFILES = \
- 	config/tc-pdp11.c \
- 	config/tc-pj.c \
- 	config/tc-ppc.c \
-+	config/tc-riscv.c \
- 	config/tc-rl78.c \
- 	config/tc-rx.c \
- 	config/tc-s390.c \
-@@ -250,6 +251,7 @@ TARGET_CPU_HFILES = \
- 	config/tc-pdp11.h \
- 	config/tc-pj.h \
- 	config/tc-ppc.h \
-+	config/tc-riscv.h \
- 	config/tc-rl78.h \
- 	config/tc-rx.h \
- 	config/tc-s390.h \
---- original-binutils/gas/Makefile.in
-+++ binutils-2.26.1/gas/Makefile.in
-@@ -448,6 +448,7 @@ TARGET_CPU_CFILES = \
- 	config/tc-pdp11.c \
- 	config/tc-pj.c \
- 	config/tc-ppc.c \
-+	config/tc-riscv.c \
- 	config/tc-rl78.c \
- 	config/tc-rx.c \
- 	config/tc-s390.c \
-@@ -521,6 +522,7 @@ TARGET_CPU_HFILES = \
- 	config/tc-pdp11.h \
- 	config/tc-pj.h \
- 	config/tc-ppc.h \
-+	config/tc-riscv.h \
- 	config/tc-rl78.h \
- 	config/tc-rx.h \
- 	config/tc-s390.h \
-@@ -878,6 +880,7 @@ distclean-compile:
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-pdp11.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-pj.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-ppc.Po at am__quote@
-+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-riscv.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-rl78.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-rx.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-s390.Po at am__quote@
-@@ -1598,6 +1601,20 @@ tc-ppc.obj: config/tc-ppc.c
- @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- @am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-ppc.obj `if test -f 'config/tc-ppc.c'; then $(CYGPATH_W) 'config/tc-ppc.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-ppc.c'; fi`
- 
-+tc-riscv.o: config/tc-riscv.c
-+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-riscv.o -MD -MP -MF $(DEPDIR)/tc-riscv.Tpo -c -o tc-riscv.o `test -f 'config/tc-riscv.c' || echo '$(srcdir)/'`config/tc-riscv.c
-+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/tc-riscv.Tpo $(DEPDIR)/tc-riscv.Po
-+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='config/tc-riscv.c' object='tc-riscv.o' libtool=no @AMDEPBACKSLASH@
-+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-riscv.o `test -f 'config/tc-riscv.c' || echo '$(srcdir)/'`config/tc-riscv.c
-+
-+tc-riscv.obj: config/tc-riscv.c
-+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-riscv.obj -MD -MP -MF $(DEPDIR)/tc-riscv.Tpo -c -o tc-riscv.obj `if test -f 'config/tc-riscv.c'; then $(CYGPATH_W) 'config/tc-riscv.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-riscv.c'; fi`
-+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/tc-riscv.Tpo $(DEPDIR)/tc-riscv.Po
-+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='config/tc-riscv.c' object='tc-riscv.obj' libtool=no @AMDEPBACKSLASH@
-+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-riscv.obj `if test -f 'config/tc-riscv.c'; then $(CYGPATH_W) 'config/tc-riscv.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-riscv.c'; fi`
-+
- tc-rl78.o: config/tc-rl78.c
- @am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-rl78.o -MD -MP -MF $(DEPDIR)/tc-rl78.Tpo -c -o tc-rl78.o `test -f 'config/tc-rl78.c' || echo '$(srcdir)/'`config/tc-rl78.c
- @am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/tc-rl78.Tpo $(DEPDIR)/tc-rl78.Po
---- original-binutils/include/dis-asm.h
-+++ binutils-2.26.1/include/dis-asm.h
-@@ -263,6 +263,7 @@ extern int print_insn_little_arm	(bfd_vm
- extern int print_insn_little_mips	(bfd_vma, disassemble_info *);
- extern int print_insn_little_nios2	(bfd_vma, disassemble_info *);
- extern int print_insn_little_powerpc	(bfd_vma, disassemble_info *);
-+extern int print_insn_riscv		(bfd_vma, disassemble_info *);
- extern int print_insn_little_score      (bfd_vma, disassemble_info *); 
- extern int print_insn_lm32		(bfd_vma, disassemble_info *);
- extern int print_insn_m32c	        (bfd_vma, disassemble_info *);
-@@ -327,6 +328,7 @@ extern void print_aarch64_disassembler_o
- extern void print_i386_disassembler_options (FILE *);
- extern void print_mips_disassembler_options (FILE *);
- extern void print_ppc_disassembler_options (FILE *);
-+extern void print_riscv_disassembler_options (FILE *);
- extern void print_arm_disassembler_options (FILE *);
- extern void parse_arm_disassembler_option (char *);
- extern void print_s390_disassembler_options (FILE *);
---- original-binutils/include/elf/common.h
-+++ binutils-2.26.1/include/elf/common.h
-@@ -306,6 +306,7 @@
- #define EM_VISIUM	221	/* Controls and Data Services VISIUMcore processor */
- #define EM_FT32         222     /* FTDI Chip FT32 high performance 32-bit RISC architecture */
- #define EM_MOXIE        223     /* Moxie processor family */
-+#define EM_RISCV	243	/* RISC-V */
- 
- /* If it is necessary to assign new unofficial EM_* values, please pick large
-    random numbers (0x8523, 0xa7f2, etc.) to minimize the chances of collision
---- original-binutils/ld/configure.tgt
-+++ binutils-2.26.1/ld/configure.tgt
-@@ -638,6 +638,12 @@ powerpc-*-aix*)		targ_emul=aixppc ;;
- powerpc-*-beos*)	targ_emul=aixppc ;;
- powerpc-*-windiss*)	targ_emul=elf32ppcwindiss ;;
- powerpc-*-lynxos*)	targ_emul=ppclynx ;;
-+riscv32*-*-*)		targ_emul=elf32lriscv
-+			targ_extra_emuls="elf64lriscv"
-+			targ_extra_libpath=$targ_extra_emuls ;;
-+riscv64*-*-*)		targ_emul=elf64lriscv
-+			targ_extra_emuls="elf32lriscv"
-+			targ_extra_libpath=$targ_extra_emuls ;;
- rs6000-*-aix[5-9]*)	targ_emul=aix5rs6 ;;
- rs6000-*-aix*)		targ_emul=aixrs6
- 			;;
---- original-binutils/ld/Makefile.am
-+++ binutils-2.26.1/ld/Makefile.am
-@@ -267,6 +267,7 @@ ALL_EMULATION_SOURCES = \
- 	eelf32ppcsim.c \
- 	eelf32ppcvxworks.c \
- 	eelf32ppcwindiss.c \
-+	eelf32lriscv.c \
- 	eelf32rl78.c \
- 	eelf32rx.c \
- 	eelf32tilegx.c \
-@@ -483,6 +484,7 @@ ALL_64_EMULATION_SOURCES = \
- 	eelf64btsmip_fbsd.c \
- 	eelf64hppa.c \
- 	eelf64lppc.c \
-+	eelf64lriscv.c \
- 	eelf64ltsmip.c \
- 	eelf64ltsmip_fbsd.c \
- 	eelf64mmix.c \
-@@ -1144,6 +1146,11 @@ eelf32lppcsim.c: $(srcdir)/emulparams/el
-   $(srcdir)/emultempl/ppc32elf.em ldemul-list.h \
-   $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
- 
-+eelf32lriscv.c: $(srcdir)/emulparams/elf32lriscv.sh \
-+  $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
-+  $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
-+  ${GEN_DEPENDS}
-+
- eelf32lsmip.c: $(srcdir)/emulparams/elf32lsmip.sh \
-   $(srcdir)/emulparams/elf32lmip.sh $(srcdir)/emulparams/elf32bmip.sh \
-   $(ELF_DEPS) $(srcdir)/emultempl/mipself.em $(srcdir)/scripttempl/elf.sc \
-@@ -1937,6 +1944,12 @@ eelf64lppc.c: $(srcdir)/emulparams/elf64
-   ldemul-list.h \
-   $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
- 
-+eelf64lriscv.c: $(srcdir)/emulparams/elf64lriscv.sh \
-+  $(srcdir)/emulparams/elf64lriscv-defs.sh \
-+  $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
-+  $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
-+  ${GEN_DEPENDS}
-+
- eelf64ltsmip.c: $(srcdir)/emulparams/elf64ltsmip.sh \
-   $(srcdir)/emulparams/elf64btsmip.sh $(srcdir)/emulparams/elf64bmip-defs.sh \
-   $(srcdir)/emulparams/elf32bmipn32-defs.sh $(ELF_DEPS) \
---- original-binutils/ld/Makefile.in
-+++ binutils-2.26.1/ld/Makefile.in
-@@ -577,6 +577,7 @@ ALL_EMULATION_SOURCES = \
- 	eelf32lppclinux.c \
- 	eelf32lppcnto.c \
- 	eelf32lppcsim.c \
-+	eelf32lriscv.c \
- 	eelf32m32c.c \
- 	eelf32mb_linux.c \
- 	eelf32mbel_linux.c \
-@@ -812,6 +813,7 @@ ALL_64_EMULATION_SOURCES = \
- 	eelf64btsmip_fbsd.c \
- 	eelf64hppa.c \
- 	eelf64lppc.c \
-+	eelf64lriscv.c \
- 	eelf64ltsmip.c \
- 	eelf64ltsmip_fbsd.c \
- 	eelf64mmix.c \
-@@ -1219,6 +1221,7 @@ distclean-compile:
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lppclinux.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lppcnto.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lppcsim.Po at am__quote@
-+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lriscv.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lr5900.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lr5900n32.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lsmip.Po at am__quote@
-@@ -1274,6 +1277,7 @@ distclean-compile:
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64btsmip_fbsd.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64hppa.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64lppc.Po at am__quote@
-+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64lriscv.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64ltsmip.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64ltsmip_fbsd.Po at am__quote@
- @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64mmix.Po at am__quote@
-@@ -2650,6 +2654,11 @@ eelf32lppcsim.c: $(srcdir)/emulparams/el
-   $(srcdir)/emultempl/ppc32elf.em ldemul-list.h \
-   $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
- 
-+eelf32lriscv.c: $(srcdir)/emulparams/elf32lriscv.sh \
-+  $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
-+  $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
-+  ${GEN_DEPENDS}
-+
- eelf32lsmip.c: $(srcdir)/emulparams/elf32lsmip.sh \
-   $(srcdir)/emulparams/elf32lmip.sh $(srcdir)/emulparams/elf32bmip.sh \
-   $(ELF_DEPS) $(srcdir)/emultempl/mipself.em $(srcdir)/scripttempl/elf.sc \
-@@ -3443,6 +3452,12 @@ eelf64lppc.c: $(srcdir)/emulparams/elf64
-   ldemul-list.h \
-   $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
- 
-+eelf64lriscv.c: $(srcdir)/emulparams/elf64lriscv.sh \
-+  $(srcdir)/emulparams/elf64lriscv-defs.sh \
-+  $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
-+  $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
-+  ${GEN_DEPENDS}
-+
- eelf64ltsmip.c: $(srcdir)/emulparams/elf64ltsmip.sh \
-   $(srcdir)/emulparams/elf64btsmip.sh $(srcdir)/emulparams/elf64bmip-defs.sh \
-   $(srcdir)/emulparams/elf32bmipn32-defs.sh $(ELF_DEPS) \
---- original-binutils/opcodes/configure
-+++ binutils-2.26.1/opcodes/configure
-@@ -12603,6 +12603,7 @@ if test x${all_targets} = xfalse ; then
- 	bfd_powerpc_arch)	ta="$ta ppc-dis.lo ppc-opc.lo" ;;
- 	bfd_powerpc_64_arch)	ta="$ta ppc-dis.lo ppc-opc.lo" ;;
- 	bfd_pyramid_arch)	;;
-+	bfd_riscv_arch)		ta="$ta riscv-dis.lo riscv-opc.lo" ;;
- 	bfd_romp_arch)		;;
- 	bfd_rs6000_arch)	ta="$ta ppc-dis.lo ppc-opc.lo" ;;
- 	bfd_rl78_arch)		ta="$ta rl78-dis.lo rl78-decode.lo";;
---- original-binutils/opcodes/disassemble.c
-+++ binutils-2.26.1/opcodes/disassemble.c
-@@ -376,6 +376,11 @@ disassembler (abfd)
- 	disassemble = print_insn_little_powerpc;
-       break;
- #endif
-+#ifdef ARCH_riscv
-+    case bfd_arch_riscv:
-+      disassemble = print_insn_riscv;
-+      break;
-+#endif
- #ifdef ARCH_rs6000
-     case bfd_arch_rs6000:
-       if (bfd_get_mach (abfd) == bfd_mach_ppc_620)
-@@ -558,6 +563,9 @@ disassembler_usage (stream)
- #ifdef ARCH_powerpc
-   print_ppc_disassembler_options (stream);
- #endif
-+#ifdef ARCH_riscv
-+  print_riscv_disassembler_options (stream);
-+#endif
- #ifdef ARCH_i386
-   print_i386_disassembler_options (stream);
- #endif
diff --git a/util/crossgcc/patches/binutils-2.27_aarch.patch b/util/crossgcc/patches/binutils-2.27_aarch.patch
new file mode 100644
index 0000000..4a04418
--- /dev/null
+++ b/util/crossgcc/patches/binutils-2.27_aarch.patch
@@ -0,0 +1,92 @@
+diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
+index 2d491f6..e221ef4 100644
+--- a/gas/config/tc-aarch64.c
++++ b/gas/config/tc-aarch64.c
+@@ -1736,13 +1736,13 @@ s_ltorg (int ignored ATTRIBUTE_UNUSED)
+       if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
+ 	continue;
+ 
+-      mapping_state (MAP_DATA);
+-
+       /* Align pool as you have word accesses.
+          Only make a frag if we have to.  */
+       if (!need_pass_2)
+ 	frag_align (align, 0, 0);
+ 
++      mapping_state (MAP_DATA);
++
+       record_alignment (now_seg, align);
+ 
+       sprintf (sym_name, "$$lit_\002%x", pool->id);
+@@ -6373,11 +6373,15 @@ aarch64_init_frag (fragS * fragP, int max_chars)
+ 
+   switch (fragP->fr_type)
+     {
+-    case rs_align:
+     case rs_align_test:
+     case rs_fill:
+       mapping_state_2 (MAP_DATA, max_chars);
+       break;
++    case rs_align:
++      /* PR 20364: We can get alignment frags in code sections,
++	 so do not just assume that we should use the MAP_DATA state.  */
++      mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
++      break;
+     case rs_align_code:
+       mapping_state_2 (MAP_INSN, max_chars);
+       break;
+diff --git a/gas/testsuite/gas/aarch64/pr20364.d b/gas/testsuite/gas/aarch64/pr20364.d
+new file mode 100644
+index 0000000..babcff1
+--- /dev/null
++++ b/gas/testsuite/gas/aarch64/pr20364.d
+@@ -0,0 +1,13 @@
++# Check that ".align <size>, <fill>" does not set the mapping state to DATA, causing unnecessary frag generation.
++#name: PR20364 
++#objdump: -d
++
++.*:     file format .*
++
++Disassembly of section \.vectors:
++
++0+000 <.*>:
++   0:	d2800000 	mov	x0, #0x0                   	// #0
++   4:	94000000 	bl	0 <plat_report_exception>
++   8:	17fffffe 	b	0 <bl1_exceptions>
++
+diff --git a/gas/testsuite/gas/aarch64/pr20364.s b/gas/testsuite/gas/aarch64/pr20364.s
+new file mode 100644
+index 0000000..594ad7c
+--- /dev/null
++++ b/gas/testsuite/gas/aarch64/pr20364.s
+@@ -0,0 +1,28 @@
++ .macro vector_base label
++ .section .vectors, "ax"
++ .align 11, 0
++ \label:
++ .endm
++
++ .macro vector_entry label
++ .section .vectors, "ax"
++ .align 7, 0
++ \label:
++ .endm
++
++ .macro check_vector_size since
++   .if (. - \since) > (32 * 4)
++     .error "Vector exceeds 32 instructions"
++   .endif
++ .endm
++
++ .globl bl1_exceptions
++
++vector_base bl1_exceptions
++
++vector_entry SynchronousExceptionSP0
++ mov x0, #0x0
++ bl plat_report_exception
++ b SynchronousExceptionSP0
++ check_vector_size SynchronousExceptionSP0
++
+-- 
+1.7.1
diff --git a/util/crossgcc/patches/binutils-2.27_mips-gold.patch b/util/crossgcc/patches/binutils-2.27_mips-gold.patch
new file mode 100644
index 0000000..d9a4021
--- /dev/null
+++ b/util/crossgcc/patches/binutils-2.27_mips-gold.patch
@@ -0,0 +1,11 @@
+diff -urN binutils-2.27.orig/gold/configure.tgt binutils-2.27/gold/configure.tgt
+--- binutils-2.27.orig/gold/configure.tgt	2016-08-03 15:36:53.000000000 +0800
++++ binutils-2.27/gold/configure.tgt	2016-10-29 19:28:56.140587026 +0800
+@@ -157,6 +157,7 @@
+  targ_obj=mips
+  targ_machine=EM_MIPS_RS3_LE
+  targ_size=32
++ targ_extra_size=64
+  targ_big_endian=false
+  targ_extra_big_endian=true
+  ;;
diff --git a/util/crossgcc/patches/binutils-2.27_no-bfd-doc.patch b/util/crossgcc/patches/binutils-2.27_no-bfd-doc.patch
new file mode 100644
index 0000000..607d479
--- /dev/null
+++ b/util/crossgcc/patches/binutils-2.27_no-bfd-doc.patch
@@ -0,0 +1,12 @@
+diff -ur binutils-2.26.1/bfd/Makefile.in binutils-2.26.1.patched/bfd/Makefile.in
+--- binutils-2.26.1/bfd/Makefile.in	2015-11-13 16:27:40.000000000 +0800
++++ binutils-2.27/bfd/Makefile.in	2016-04-02 11:05:43.398422394 +0800
+@@ -341,7 +341,7 @@
+ ACLOCAL_AMFLAGS = -I . -I .. -I ../config
+ INCDIR = $(srcdir)/../include
+ CSEARCH = -I. -I$(srcdir) -I$(INCDIR)
+-SUBDIRS = doc po
++SUBDIRS = po
+ bfddocdir = doc
+ libbfd_la_LDFLAGS = $(am__append_1) -release `cat libtool-soversion` \
+ 	@SHARED_LDFLAGS@ $(am__empty)
diff --git a/util/crossgcc/patches/binutils-2.27_riscv.patch b/util/crossgcc/patches/binutils-2.27_riscv.patch
new file mode 100644
index 0000000..899a9c7
--- /dev/null
+++ b/util/crossgcc/patches/binutils-2.27_riscv.patch
@@ -0,0 +1,10251 @@
+diff --git original-binutils/bfd/Makefile.am binutils-2_27/bfd/Makefile.am
+index 6720f86..70ec66c 100644
+--- original-binutils/bfd/Makefile.am
++++ binutils-2_27/bfd/Makefile.am
+@@ -955,6 +955,18 @@ elf64-ia64.c : elfnn-ia64.c
+ 	$(SED) -e s/NN/64/g < $(srcdir)/elfnn-ia64.c > elf64-ia64.new
+ 	mv -f elf64-ia64.new elf64-ia64.c
+ 
++elf32-riscv.c : elfnn-riscv.c
++	rm -f elf32-riscv.c
++	echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf32-riscv.new
++	sed -e s/NN/32/g < $(srcdir)/elfnn-riscv.c >> elf32-riscv.new
++	mv -f elf32-riscv.new elf32-riscv.c
++
++elf64-riscv.c : elfnn-riscv.c
++	rm -f elf64-riscv.c
++	echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf64-riscv.new
++	sed -e s/NN/64/g < $(srcdir)/elfnn-riscv.c >> elf64-riscv.new
++	mv -f elf64-riscv.new elf64-riscv.c
++
+ peigen.c : peXXigen.c
+ 	rm -f peigen.c
+ 	$(SED) -e s/XX/pe/g < $(srcdir)/peXXigen.c > peigen.new
+diff --git original-binutils/bfd/Makefile.in binutils-2_27/bfd/Makefile.in
+index 7283ed9..6f2383b 100644
+--- original-binutils/bfd/Makefile.in
++++ binutils-2_27/bfd/Makefile.in
+@@ -479,6 +479,7 @@ ALL_MACHINES = \
+ 	cpu-pj.lo \
+ 	cpu-plugin.lo \
+ 	cpu-powerpc.lo \
++	cpu-riscv.lo \
+ 	cpu-rs6000.lo \
+ 	cpu-rl78.lo \
+ 	cpu-rx.lo \
+@@ -566,6 +567,7 @@ ALL_MACHINES_CFILES = \
+ 	cpu-pj.c \
+ 	cpu-plugin.c \
+ 	cpu-powerpc.c \
++	cpu-riscv.c \
+ 	cpu-rs6000.c \
+ 	cpu-rl78.c \
+ 	cpu-rx.c \
+@@ -2078,6 +2080,18 @@ elf64-ia64.c : elfnn-ia64.c
+ 	$(SED) -e s/NN/64/g < $(srcdir)/elfnn-ia64.c > elf64-ia64.new
+ 	mv -f elf64-ia64.new elf64-ia64.c
+ 
++elf32-riscv.c : elfnn-riscv.c
++	rm -f elf32-riscv.c
++	echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf32-riscv.new
++	sed -e s/NN/32/g < $(srcdir)/elfnn-riscv.c >> elf32-riscv.new
++	mv -f elf32-riscv.new elf32-riscv.c
++
++elf64-riscv.c : elfnn-riscv.c
++	rm -f elf64-riscv.c
++	echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf64-riscv.new
++	sed -e s/NN/64/g < $(srcdir)/elfnn-riscv.c >> elf64-riscv.new
++	mv -f elf64-riscv.new elf64-riscv.c
++
+ peigen.c : peXXigen.c
+ 	rm -f peigen.c
+ 	$(SED) -e s/XX/pe/g < $(srcdir)/peXXigen.c > peigen.new
+diff --git original-binutils/bfd/archures.c binutils-2_27/bfd/archures.c
+index 96c9109..7761cf7 100644
+--- original-binutils/bfd/archures.c
++++ binutils-2_27/bfd/archures.c
+@@ -628,6 +628,7 @@ extern const bfd_arch_info_type bfd_pj_arch;
+ extern const bfd_arch_info_type bfd_plugin_arch;
+ extern const bfd_arch_info_type bfd_powerpc_archs[];
+ #define bfd_powerpc_arch bfd_powerpc_archs[0]
++extern const bfd_arch_info_type bfd_riscv_arch;
+ extern const bfd_arch_info_type bfd_rs6000_arch;
+ extern const bfd_arch_info_type bfd_rl78_arch;
+ extern const bfd_arch_info_type bfd_rx_arch;
+@@ -717,6 +718,7 @@ static const bfd_arch_info_type * const bfd_archures_list[] =
+     &bfd_or1k_arch,
+     &bfd_pdp11_arch,
+     &bfd_powerpc_arch,
++    &bfd_riscv_arch,
+     &bfd_rs6000_arch,
+     &bfd_rl78_arch,
+     &bfd_rx_arch,
+diff --git original-binutils/bfd/bfd-in2.h binutils-2_27/bfd/bfd-in2.h
+index 30513c4..fc290dd 100644
+--- original-binutils/bfd/bfd-in2.h
++++ binutils-2_27/bfd/bfd-in2.h
+@@ -2088,6 +2088,9 @@ enum bfd_architecture
+ #define bfd_mach_ppc_e6500     5007
+ #define bfd_mach_ppc_titan     83
+ #define bfd_mach_ppc_vle       84
++  bfd_arch_riscv,     /* RISC-V */
++#define bfd_mach_riscv32       132
++#define bfd_mach_riscv64       164
+   bfd_arch_rs6000,    /* IBM RS/6000 */
+ #define bfd_mach_rs6k          6000
+ #define bfd_mach_rs6k_rs1      6001
+@@ -5689,6 +5692,46 @@ relative offset from _GLOBAL_OFFSET_TABLE_  */
+ value in a word.  The relocation is relative offset from  */
+   BFD_RELOC_MICROBLAZE_32_GOTOFF,
+ 
++/* RISC-V relocations.  */
++  BFD_RELOC_RISCV_HI20,
++  BFD_RELOC_RISCV_PCREL_HI20,
++  BFD_RELOC_RISCV_PCREL_LO12_I,
++  BFD_RELOC_RISCV_PCREL_LO12_S,
++  BFD_RELOC_RISCV_LO12_I,
++  BFD_RELOC_RISCV_LO12_S,
++  BFD_RELOC_RISCV_GPREL12_I,
++  BFD_RELOC_RISCV_GPREL12_S,
++  BFD_RELOC_RISCV_TPREL_HI20,
++  BFD_RELOC_RISCV_TPREL_LO12_I,
++  BFD_RELOC_RISCV_TPREL_LO12_S,
++  BFD_RELOC_RISCV_TPREL_ADD,
++  BFD_RELOC_RISCV_CALL,
++  BFD_RELOC_RISCV_CALL_PLT,
++  BFD_RELOC_RISCV_ADD8,
++  BFD_RELOC_RISCV_ADD16,
++  BFD_RELOC_RISCV_ADD32,
++  BFD_RELOC_RISCV_ADD64,
++  BFD_RELOC_RISCV_SUB8,
++  BFD_RELOC_RISCV_SUB16,
++  BFD_RELOC_RISCV_SUB32,
++  BFD_RELOC_RISCV_SUB64,
++  BFD_RELOC_RISCV_GOT_HI20,
++  BFD_RELOC_RISCV_TLS_GOT_HI20,
++  BFD_RELOC_RISCV_TLS_GD_HI20,
++  BFD_RELOC_RISCV_JMP,
++  BFD_RELOC_RISCV_TLS_DTPMOD32,
++  BFD_RELOC_RISCV_TLS_DTPREL32,
++  BFD_RELOC_RISCV_TLS_DTPMOD64,
++  BFD_RELOC_RISCV_TLS_DTPREL64,
++  BFD_RELOC_RISCV_TLS_TPREL32,
++  BFD_RELOC_RISCV_TLS_TPREL64,
++  BFD_RELOC_RISCV_ALIGN,
++  BFD_RELOC_RISCV_RVC_BRANCH,
++  BFD_RELOC_RISCV_RVC_JUMP,
++  BFD_RELOC_RISCV_RVC_LUI,
++  BFD_RELOC_RISCV_GPREL_I,
++  BFD_RELOC_RISCV_GPREL_S,
++
+ /* This is used to tell the dynamic linker to copy the value out of
+ the dynamic object into the runtime process image.  */
+   BFD_RELOC_MICROBLAZE_COPY,
+diff --git original-binutils/bfd/config.bfd binutils-2_27/bfd/config.bfd
+index b998830..5333b55 100644
+--- original-binutils/bfd/config.bfd
++++ binutils-2_27/bfd/config.bfd
+@@ -122,6 +122,7 @@ or1k*|or1knd*)	 targ_archs=bfd_or1k_arch ;;
+ pdp11*)		 targ_archs=bfd_pdp11_arch ;;
+ pj*)		 targ_archs="bfd_pj_arch bfd_i386_arch";;
+ powerpc*)	 targ_archs="bfd_rs6000_arch bfd_powerpc_arch" ;;
++riscv*)		 targ_archs=bfd_riscv_arch ;;
+ rs6000)		 targ_archs="bfd_rs6000_arch bfd_powerpc_arch" ;;
+ s390*)		 targ_archs=bfd_s390_arch ;;
+ sh*)		 targ_archs=bfd_sh_arch ;;
+@@ -1358,6 +1359,18 @@ case "${targ}" in
+     targ_defvec=rl78_elf32_vec
+     ;;
+ 
++  riscv32-*-*)
++    targ_defvec=riscv_elf32_vec
++    targ_selvecs="riscv_elf32_vec"
++    want64=true
++    ;;
++
++  riscv64-*-*)
++    targ_defvec=riscv_elf64_vec
++    targ_selvecs="riscv_elf32_vec riscv_elf64_vec"
++    want64=true
++    ;;
++
+   rx-*-elf)
+     targ_defvec=rx_elf32_le_vec
+     targ_selvecs="rx_elf32_be_vec rx_elf32_le_vec rx_elf32_be_ns_vec"
+diff --git original-binutils/bfd/configure binutils-2_27/bfd/configure
+index 6e6283d..b413e4f 100755
+--- original-binutils/bfd/configure
++++ binutils-2_27/bfd/configure
+@@ -14431,6 +14431,8 @@ do
+     powerpc_pei_vec)		 tb="$tb pei-ppc.lo peigen.lo $coff" ;;
+     powerpc_pei_le_vec)		 tb="$tb pei-ppc.lo peigen.lo $coff" ;;
+     powerpc_xcoff_vec)		 tb="$tb coff-rs6000.lo $xcoff" ;;
++    riscv_elf32_vec)		 tb="$tb elf32-riscv.lo elfxx-riscv.lo elf32.lo $elf" ;;
++    riscv_elf64_vec)		 tb="$tb elf64-riscv.lo elf64.lo elfxx-riscv.lo elf32.lo $elf"; target_size=64 ;;
+     rl78_elf32_vec)		 tb="$tb elf32-rl78.lo elf32.lo $elf" ;;
+     rs6000_xcoff64_vec)		 tb="$tb coff64-rs6000.lo aix5ppc-core.lo $xcoff"; target_size=64 ;;
+     rs6000_xcoff64_aix_vec)	 tb="$tb coff64-rs6000.lo aix5ppc-core.lo $xcoff"; target_size=64 ;;
+diff --git original-binutils/bfd/configure.ac binutils-2_27/bfd/configure.ac
+index 669cff7..6f11d29 100644
+--- original-binutils/bfd/configure.ac
++++ binutils-2_27/bfd/configure.ac
+@@ -606,6 +606,8 @@ do
+     powerpc_pei_vec)		 tb="$tb pei-ppc.lo peigen.lo $coff" ;;
+     powerpc_pei_le_vec)		 tb="$tb pei-ppc.lo peigen.lo $coff" ;;
+     powerpc_xcoff_vec)		 tb="$tb coff-rs6000.lo $xcoff" ;;
++    riscv_elf32_vec)		 tb="$tb elf32-riscv.lo elfxx-riscv.lo elf32.lo $elf" ;;
++    riscv_elf64_vec)		 tb="$tb elf64-riscv.lo elf64.lo elfxx-riscv.lo elf32.lo $elf"; target_size=64 ;;
+     rl78_elf32_vec)		 tb="$tb elf32-rl78.lo elf32.lo $elf" ;;
+     rs6000_xcoff64_vec)		 tb="$tb coff64-rs6000.lo aix5ppc-core.lo $xcoff"; target_size=64 ;;
+     rs6000_xcoff64_aix_vec)	 tb="$tb coff64-rs6000.lo aix5ppc-core.lo $xcoff"; target_size=64 ;;
+diff --git original-binutils/bfd/cpu-riscv.c binutils-2_27/bfd/cpu-riscv.c
+new file mode 100644
+index 0000000..f1b8d5d
+--- /dev/null
++++ binutils-2_27/bfd/cpu-riscv.c
+@@ -0,0 +1,76 @@
++/* BFD backend for RISC-V
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target.
++
++   This file is part of BFD, the Binary File Descriptor library.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3 of the License, or
++   (at your option) any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "libbfd.h"
++
++/* This routine is provided two arch_infos and returns an arch_info
++   that is compatible with both, or NULL if none exists.  */
++
++static const bfd_arch_info_type *
++riscv_compatible (const bfd_arch_info_type *a, const bfd_arch_info_type *b)
++{
++  if (a->arch != b->arch)
++    return NULL;
++
++  /* Machine compatibility is checked in
++     _bfd_riscv_elf_merge_private_bfd_data.  */
++
++  return a;
++}
++
++#define N(BITS_WORD, BITS_ADDR, NUMBER, PRINT, DEFAULT, NEXT)		\
++  {							\
++    BITS_WORD, /*  bits in a word */			\
++    BITS_ADDR, /* bits in an address */			\
++    8,	/* 8 bits in a byte */				\
++    bfd_arch_riscv,					\
++    NUMBER,						\
++    "riscv",						\
++    PRINT,						\
++    3,							\
++    DEFAULT,						\
++    riscv_compatible,					\
++    bfd_default_scan,					\
++    bfd_arch_default_fill,				\
++    NEXT,						\
++  }
++
++enum
++{
++  I_riscv64,
++  I_riscv32
++};
++
++#define NN(index) (&arch_info_struct[(index) + 1])
++
++static const bfd_arch_info_type arch_info_struct[] =
++{
++  N (64, 64, bfd_mach_riscv64, "riscv:rv64", FALSE, NN (I_riscv64)),
++  N (32, 32, bfd_mach_riscv32, "riscv:rv32", FALSE, 0)
++};
++
++/* The default architecture is riscv:rv64.  */
++
++const bfd_arch_info_type bfd_riscv_arch =
++  N (64, 64, 0, "riscv", TRUE, &arch_info_struct[0]);
+diff --git original-binutils/bfd/elf-bfd.h binutils-2_27/bfd/elf-bfd.h
+index 163ef35..9fa3290 100644
+--- original-binutils/bfd/elf-bfd.h
++++ binutils-2_27/bfd/elf-bfd.h
+@@ -476,6 +476,7 @@ enum elf_target_id
+   XGATE_ELF_DATA,
+   TILEGX_ELF_DATA,
+   TILEPRO_ELF_DATA,
++  RISCV_ELF_DATA,
+   GENERIC_ELF_DATA
+ };
+ 
+diff --git original-binutils/bfd/elfnn-riscv.c binutils-2_27/bfd/elfnn-riscv.c
+new file mode 100644
+index 0000000..64ec0da
+--- /dev/null
++++ binutils-2_27/bfd/elfnn-riscv.c
+@@ -0,0 +1,3141 @@
++/* RISC-V-specific support for NN-bit ELF.
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on TILE-Gx and MIPS targets.
++
++   This file is part of BFD, the Binary File Descriptor library.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3 of the License, or
++   (at your option) any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++/* This file handles RISC-V ELF targets.  */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "libbfd.h"
++#include "bfdlink.h"
++#include "genlink.h"
++#include "elf-bfd.h"
++#include "elfxx-riscv.h"
++#include "elf/riscv.h"
++#include "opcode/riscv.h"
++
++#define ARCH_SIZE NN
++
++#define MINUS_ONE ((bfd_vma)0 - 1)
++
++#define RISCV_ELF_LOG_WORD_BYTES (ARCH_SIZE == 32 ? 2 : 3)
++
++#define RISCV_ELF_WORD_BYTES (1 << RISCV_ELF_LOG_WORD_BYTES)
++
++/* The name of the dynamic interpreter.  This is put in the .interp
++   section.  */
++
++#define ELF64_DYNAMIC_INTERPRETER "/lib/ld.so.1"
++#define ELF32_DYNAMIC_INTERPRETER "/lib32/ld.so.1"
++
++#define ELF_ARCH			bfd_arch_riscv
++#define ELF_TARGET_ID			RISCV_ELF_DATA
++#define ELF_MACHINE_CODE		EM_RISCV
++#define ELF_MAXPAGESIZE			0x1000
++#define ELF_COMMONPAGESIZE		0x1000
++
++/* The RISC-V linker needs to keep track of the number of relocs that it
++   decides to copy as dynamic relocs in check_relocs for each symbol.
++   This is so that it can later discard them if they are found to be
++   unnecessary.  We store the information in a field extending the
++   regular ELF linker hash table.  */
++
++struct riscv_elf_dyn_relocs
++{
++  struct riscv_elf_dyn_relocs *next;
++
++  /* The input section of the reloc.  */
++  asection *sec;
++
++  /* Total number of relocs copied for the input section.  */
++  bfd_size_type count;
++
++  /* Number of pc-relative relocs copied for the input section.  */
++  bfd_size_type pc_count;
++};
++
++/* RISC-V ELF linker hash entry.  */
++
++struct riscv_elf_link_hash_entry
++{
++  struct elf_link_hash_entry elf;
++
++  /* Track dynamic relocs copied for this symbol.  */
++  struct riscv_elf_dyn_relocs *dyn_relocs;
++
++#define GOT_UNKNOWN     0
++#define GOT_NORMAL      1
++#define GOT_TLS_GD      2
++#define GOT_TLS_IE      4
++#define GOT_TLS_LE      8
++  char tls_type;
++};
++
++#define riscv_elf_hash_entry(ent) \
++  ((struct riscv_elf_link_hash_entry *)(ent))
++
++struct _bfd_riscv_elf_obj_tdata
++{
++  struct elf_obj_tdata root;
++
++  /* tls_type for each local got entry.  */
++  char *local_got_tls_type;
++};
++
++#define _bfd_riscv_elf_tdata(abfd) \
++  ((struct _bfd_riscv_elf_obj_tdata *) (abfd)->tdata.any)
++
++#define _bfd_riscv_elf_local_got_tls_type(abfd) \
++  (_bfd_riscv_elf_tdata (abfd)->local_got_tls_type)
++
++#define _bfd_riscv_elf_tls_type(abfd, h, symndx)		\
++  (*((h) != NULL ? &riscv_elf_hash_entry (h)->tls_type		\
++     : &_bfd_riscv_elf_local_got_tls_type (abfd) [symndx]))
++
++#define is_riscv_elf(bfd)				\
++  (bfd_get_flavour (bfd) == bfd_target_elf_flavour	\
++   && elf_tdata (bfd) != NULL				\
++   && elf_object_id (bfd) == RISCV_ELF_DATA)
++
++#include "elf/common.h"
++#include "elf/internal.h"
++
++struct riscv_elf_link_hash_table
++{
++  struct elf_link_hash_table elf;
++
++  /* Short-cuts to get to dynamic linker sections.  */
++  asection *sdynbss;
++  asection *srelbss;
++  asection *sdyntdata;
++
++  /* Small local sym to section mapping cache.  */
++  struct sym_cache sym_cache;
++};
++
++
++/* Get the RISC-V ELF linker hash table from a link_info structure.  */
++#define riscv_elf_hash_table(p) \
++  (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
++  == RISCV_ELF_DATA ? ((struct riscv_elf_link_hash_table *) ((p)->hash)) : NULL)
++
++static void
++riscv_info_to_howto_rela (bfd *abfd ATTRIBUTE_UNUSED,
++			  arelent *cache_ptr,
++			  Elf_Internal_Rela *dst)
++{
++  cache_ptr->howto = riscv_elf_rtype_to_howto (ELFNN_R_TYPE (dst->r_info));
++}
++
++static void
++riscv_elf_append_rela (bfd *abfd, asection *s, Elf_Internal_Rela *rel)
++{
++  const struct elf_backend_data *bed;
++  bfd_byte *loc;
++
++  bed = get_elf_backend_data (abfd);
++  loc = s->contents + (s->reloc_count++ * bed->s->sizeof_rela);
++  bed->s->swap_reloca_out (abfd, rel, loc);
++}
++
++/* PLT/GOT stuff */
++
++#define PLT_HEADER_INSNS 8
++#define PLT_ENTRY_INSNS 4
++#define PLT_HEADER_SIZE (PLT_HEADER_INSNS * 4)
++#define PLT_ENTRY_SIZE (PLT_ENTRY_INSNS * 4)
++
++#define GOT_ENTRY_SIZE RISCV_ELF_WORD_BYTES
++
++#define GOTPLT_HEADER_SIZE (2 * GOT_ENTRY_SIZE)
++
++#define sec_addr(sec) ((sec)->output_section->vma + (sec)->output_offset)
++
++static bfd_vma
++riscv_elf_got_plt_val (bfd_vma plt_index, struct bfd_link_info *info)
++{
++  return sec_addr (riscv_elf_hash_table (info)->elf.sgotplt)
++	 + GOTPLT_HEADER_SIZE + (plt_index * GOT_ENTRY_SIZE);
++}
++
++#if ARCH_SIZE == 32
++# define MATCH_LREG MATCH_LW
++#else
++# define MATCH_LREG MATCH_LD
++#endif
++
++/* Generate a PLT header.  */
++
++static void
++riscv_make_plt_header (bfd_vma gotplt_addr, bfd_vma addr, uint32_t *entry)
++{
++  bfd_vma gotplt_offset_high = RISCV_PCREL_HIGH_PART (gotplt_addr, addr);
++  bfd_vma gotplt_offset_low = RISCV_PCREL_LOW_PART (gotplt_addr, addr);
++
++  /* auipc  t2, %hi(.got.plt)
++     sub    t1, t1, t3               # shifted .got.plt offset + hdr size + 12
++     l[w|d] t3, %lo(.got.plt)(t2)    # _dl_runtime_resolve
++     addi   t1, t1, -(hdr size + 12) # shifted .got.plt offset
++     addi   t0, t2, %lo(.got.plt)    # &.got.plt
++     srli   t1, t1, log2(16/PTRSIZE) # .got.plt offset
++     l[w|d] t0, PTRSIZE(t0)          # link map
++     jr     t3 */
++
++  entry[0] = RISCV_UTYPE (AUIPC, X_T2, gotplt_offset_high);
++  entry[1] = RISCV_RTYPE (SUB, X_T1, X_T1, X_T3);
++  entry[2] = RISCV_ITYPE (LREG, X_T3, X_T2, gotplt_offset_low);
++  entry[3] = RISCV_ITYPE (ADDI, X_T1, X_T1, -(PLT_HEADER_SIZE + 12));
++  entry[4] = RISCV_ITYPE (ADDI, X_T0, X_T2, gotplt_offset_low);
++  entry[5] = RISCV_ITYPE (SRLI, X_T1, X_T1, 4 - RISCV_ELF_LOG_WORD_BYTES);
++  entry[6] = RISCV_ITYPE (LREG, X_T0, X_T0, RISCV_ELF_WORD_BYTES);
++  entry[7] = RISCV_ITYPE (JALR, 0, X_T3, 0);
++}
++
++/* Generate a PLT entry.  */
++
++static void
++riscv_make_plt_entry (bfd_vma got, bfd_vma addr, uint32_t *entry)
++{
++  /* auipc  t3, %hi(.got.plt entry)
++     l[w|d] t3, %lo(.got.plt entry)(t3)
++     jalr   t1, t3
++     nop */
++
++  entry[0] = RISCV_UTYPE (AUIPC, X_T3, RISCV_PCREL_HIGH_PART (got, addr));
++  entry[1] = RISCV_ITYPE (LREG,  X_T3, X_T3, RISCV_PCREL_LOW_PART(got, addr));
++  entry[2] = RISCV_ITYPE (JALR, X_T1, X_T3, 0);
++  entry[3] = RISCV_NOP;
++}
++
++/* Create an entry in an RISC-V ELF linker hash table.  */
++
++static struct bfd_hash_entry *
++link_hash_newfunc (struct bfd_hash_entry *entry,
++		   struct bfd_hash_table *table, const char *string)
++{
++  /* Allocate the structure if it has not already been allocated by a
++     subclass.  */
++  if (entry == NULL)
++    {
++      entry =
++	bfd_hash_allocate (table,
++			   sizeof (struct riscv_elf_link_hash_entry));
++      if (entry == NULL)
++	return entry;
++    }
++
++  /* Call the allocation method of the superclass.  */
++  entry = _bfd_elf_link_hash_newfunc (entry, table, string);
++  if (entry != NULL)
++    {
++      struct riscv_elf_link_hash_entry *eh;
++
++      eh = (struct riscv_elf_link_hash_entry *) entry;
++      eh->dyn_relocs = NULL;
++      eh->tls_type = GOT_UNKNOWN;
++    }
++
++  return entry;
++}
++
++/* Create a RISC-V ELF linker hash table.  */
++
++static struct bfd_link_hash_table *
++riscv_elf_link_hash_table_create (bfd *abfd)
++{
++  struct riscv_elf_link_hash_table *ret;
++  bfd_size_type amt = sizeof (struct riscv_elf_link_hash_table);
++
++  ret = (struct riscv_elf_link_hash_table *) bfd_zmalloc (amt);
++  if (ret == NULL)
++    return NULL;
++
++  if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, link_hash_newfunc,
++				      sizeof (struct riscv_elf_link_hash_entry),
++				      RISCV_ELF_DATA))
++    {
++      free (ret);
++      return NULL;
++    }
++
++  return &ret->elf.root;
++}
++
++/* Create the .got section.  */
++
++static bfd_boolean
++riscv_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
++{
++  flagword flags;
++  asection *s, *s_got;
++  struct elf_link_hash_entry *h;
++  const struct elf_backend_data *bed = get_elf_backend_data (abfd);
++  struct elf_link_hash_table *htab = elf_hash_table (info);
++
++  /* This function may be called more than once.  */
++  s = bfd_get_linker_section (abfd, ".got");
++  if (s != NULL)
++    return TRUE;
++
++  flags = bed->dynamic_sec_flags;
++
++  s = bfd_make_section_anyway_with_flags (abfd,
++					  (bed->rela_plts_and_copies_p
++					   ? ".rela.got" : ".rel.got"),
++					  (bed->dynamic_sec_flags
++					   | SEC_READONLY));
++  if (s == NULL
++      || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
++    return FALSE;
++  htab->srelgot = s;
++
++  s = s_got = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
++  if (s == NULL
++      || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
++    return FALSE;
++  htab->sgot = s;
++
++  /* The first bit of the global offset table is the header.  */
++  s->size += bed->got_header_size;
++
++  if (bed->want_got_plt)
++    {
++      s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
++      if (s == NULL
++	  || !bfd_set_section_alignment (abfd, s,
++					 bed->s->log_file_align))
++	return FALSE;
++      htab->sgotplt = s;
++
++      /* Reserve room for the header.  */
++      s->size += GOTPLT_HEADER_SIZE;
++    }
++
++  if (bed->want_got_sym)
++    {
++      /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
++	 section.  We don't do this in the linker script because we don't want
++	 to define the symbol if we are not creating a global offset
++	 table.  */
++      h = _bfd_elf_define_linkage_sym (abfd, info, s_got,
++				       "_GLOBAL_OFFSET_TABLE_");
++      elf_hash_table (info)->hgot = h;
++      if (h == NULL)
++	return FALSE;
++    }
++
++  return TRUE;
++}
++
++/* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
++   .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
++   hash table.  */
++
++static bfd_boolean
++riscv_elf_create_dynamic_sections (bfd *dynobj,
++				   struct bfd_link_info *info)
++{
++  struct riscv_elf_link_hash_table *htab;
++
++  htab = riscv_elf_hash_table (info);
++  BFD_ASSERT (htab != NULL);
++
++  if (!riscv_elf_create_got_section (dynobj, info))
++    return FALSE;
++
++  if (!_bfd_elf_create_dynamic_sections (dynobj, info))
++    return FALSE;
++
++  htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
++  if (!bfd_link_pic (info))
++    {
++      htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
++      htab->sdyntdata =
++	bfd_make_section_anyway_with_flags (dynobj, ".tdata.dyn",
++					    SEC_ALLOC | SEC_THREAD_LOCAL);
++    }
++
++  if (!htab->elf.splt || !htab->elf.srelplt || !htab->sdynbss
++      || (!bfd_link_pic (info) && (!htab->srelbss || !htab->sdyntdata)))
++    abort ();
++
++  return TRUE;
++}
++
++/* Copy the extra info we tack onto an elf_link_hash_entry.  */
++
++static void
++riscv_elf_copy_indirect_symbol (struct bfd_link_info *info,
++				struct elf_link_hash_entry *dir,
++				struct elf_link_hash_entry *ind)
++{
++  struct riscv_elf_link_hash_entry *edir, *eind;
++
++  edir = (struct riscv_elf_link_hash_entry *) dir;
++  eind = (struct riscv_elf_link_hash_entry *) ind;
++
++  if (eind->dyn_relocs != NULL)
++    {
++      if (edir->dyn_relocs != NULL)
++	{
++	  struct riscv_elf_dyn_relocs **pp;
++	  struct riscv_elf_dyn_relocs *p;
++
++	  /* Add reloc counts against the indirect sym to the direct sym
++	     list.  Merge any entries against the same section.  */
++	  for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
++	    {
++	      struct riscv_elf_dyn_relocs *q;
++
++	      for (q = edir->dyn_relocs; q != NULL; q = q->next)
++		if (q->sec == p->sec)
++		  {
++		    q->pc_count += p->pc_count;
++		    q->count += p->count;
++		    *pp = p->next;
++		    break;
++		  }
++	      if (q == NULL)
++		pp = &p->next;
++	    }
++	  *pp = edir->dyn_relocs;
++	}
++
++      edir->dyn_relocs = eind->dyn_relocs;
++      eind->dyn_relocs = NULL;
++    }
++
++  if (ind->root.type == bfd_link_hash_indirect
++      && dir->got.refcount <= 0)
++    {
++      edir->tls_type = eind->tls_type;
++      eind->tls_type = GOT_UNKNOWN;
++    }
++  _bfd_elf_link_hash_copy_indirect (info, dir, ind);
++}
++
++static bfd_boolean
++riscv_elf_record_tls_type (bfd *abfd, struct elf_link_hash_entry *h,
++			   unsigned long symndx, char tls_type)
++{
++  char *new_tls_type = &_bfd_riscv_elf_tls_type (abfd, h, symndx);
++  *new_tls_type |= tls_type;
++  if ((*new_tls_type & GOT_NORMAL) && (*new_tls_type & ~GOT_NORMAL))
++    {
++      (*_bfd_error_handler)
++	(_("%B: `%s' accessed both as normal and thread local symbol"),
++	 abfd, h ? h->root.root.string : "<local>");
++      return FALSE;
++    }
++  return TRUE;
++}
++
++static bfd_boolean
++riscv_elf_record_got_reference (bfd *abfd, struct bfd_link_info *info,
++				struct elf_link_hash_entry *h, long symndx)
++{
++  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
++
++  if (htab->elf.sgot == NULL)
++    {
++      if (!riscv_elf_create_got_section (htab->elf.dynobj, info))
++	return FALSE;
++    }
++
++  if (h != NULL)
++    {
++      h->got.refcount += 1;
++      return TRUE;
++    }
++
++  /* This is a global offset table entry for a local symbol.  */
++  if (elf_local_got_refcounts (abfd) == NULL)
++    {
++      bfd_size_type size = symtab_hdr->sh_info * (sizeof (bfd_vma) + 1);
++      if (!(elf_local_got_refcounts (abfd) = bfd_zalloc (abfd, size)))
++	return FALSE;
++      _bfd_riscv_elf_local_got_tls_type (abfd)
++	= (char *) (elf_local_got_refcounts (abfd) + symtab_hdr->sh_info);
++    }
++  elf_local_got_refcounts (abfd) [symndx] += 1;
++
++  return TRUE;
++}
++
++static bfd_boolean
++bad_static_reloc (bfd *abfd, unsigned r_type, struct elf_link_hash_entry *h)
++{
++  (*_bfd_error_handler)
++    (_("%B: relocation %s against `%s' can not be used when making a shared "
++       "object; recompile with -fPIC"),
++      abfd, riscv_elf_rtype_to_howto (r_type)->name,
++      h != NULL ? h->root.root.string : "a local symbol");
++  bfd_set_error (bfd_error_bad_value);
++  return FALSE;
++}
++/* Look through the relocs for a section during the first phase, and
++   allocate space in the global offset table or procedure linkage
++   table.  */
++
++static bfd_boolean
++riscv_elf_check_relocs (bfd *abfd, struct bfd_link_info *info,
++			asection *sec, const Elf_Internal_Rela *relocs)
++{
++  struct riscv_elf_link_hash_table *htab;
++  Elf_Internal_Shdr *symtab_hdr;
++  struct elf_link_hash_entry **sym_hashes;
++  const Elf_Internal_Rela *rel;
++  asection *sreloc = NULL;
++
++  if (bfd_link_relocatable (info))
++    return TRUE;
++
++  htab = riscv_elf_hash_table (info);
++  symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
++  sym_hashes = elf_sym_hashes (abfd);
++
++  if (htab->elf.dynobj == NULL)
++    htab->elf.dynobj = abfd;
++
++  for (rel = relocs; rel < relocs + sec->reloc_count; rel++)
++    {
++      unsigned int r_type;
++      unsigned long r_symndx;
++      struct elf_link_hash_entry *h;
++
++      r_symndx = ELFNN_R_SYM (rel->r_info);
++      r_type = ELFNN_R_TYPE (rel->r_info);
++
++      if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
++	{
++	  (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
++				 abfd, r_symndx);
++	  return FALSE;
++	}
++
++      if (r_symndx < symtab_hdr->sh_info)
++	h = NULL;
++      else
++	{
++	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
++	  while (h->root.type == bfd_link_hash_indirect
++		 || h->root.type == bfd_link_hash_warning)
++	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
++
++	  /* PR15323, ref flags aren't set for references in the same
++	     object.  */
++	  h->root.non_ir_ref = 1;
++	}
++
++      switch (r_type)
++	{
++	case R_RISCV_TLS_GD_HI20:
++	  if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
++	      || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_GD))
++	    return FALSE;
++	  break;
++
++	case R_RISCV_TLS_GOT_HI20:
++	  if (bfd_link_pic (info))
++	    info->flags |= DF_STATIC_TLS;
++	  if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
++	      || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_IE))
++	    return FALSE;
++	  break;
++
++	case R_RISCV_GOT_HI20:
++	  if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
++	      || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_NORMAL))
++	    return FALSE;
++	  break;
++
++	case R_RISCV_CALL_PLT:
++	  /* This symbol requires a procedure linkage table entry.  We
++	     actually build the entry in adjust_dynamic_symbol,
++	     because this might be a case of linking PIC code without
++	     linking in any dynamic objects, in which case we don't
++	     need to generate a procedure linkage table after all.  */
++
++	  if (h != NULL)
++	    {
++	      h->needs_plt = 1;
++	      h->plt.refcount += 1;
++	    }
++	  break;
++
++	case R_RISCV_CALL:
++	case R_RISCV_JAL:
++	case R_RISCV_BRANCH:
++	case R_RISCV_RVC_BRANCH:
++	case R_RISCV_RVC_JUMP:
++	case R_RISCV_PCREL_HI20:
++	  /* In shared libraries, these relocs are known to bind locally.  */
++	  if (bfd_link_pic (info))
++	    break;
++	  goto static_reloc;
++
++	case R_RISCV_TPREL_HI20:
++	  if (!bfd_link_executable (info))
++	    return bad_static_reloc (abfd, r_type, h);
++	  if (h != NULL)
++	    riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_LE);
++	  goto static_reloc;
++
++	case R_RISCV_HI20:
++	  if (bfd_link_pic (info))
++	    return bad_static_reloc (abfd, r_type, h);
++	  /* Fall through.  */
++
++	case R_RISCV_COPY:
++	case R_RISCV_JUMP_SLOT:
++	case R_RISCV_RELATIVE:
++	case R_RISCV_64:
++	case R_RISCV_32:
++	  /* Fall through.  */
++
++	static_reloc:
++	  /* This reloc might not bind locally.  */
++	  if (h != NULL)
++	    h->non_got_ref = 1;
++
++	  if (h != NULL && !bfd_link_pic (info))
++	    {
++	      /* We may need a .plt entry if the function this reloc
++		 refers to is in a shared lib.  */
++	      h->plt.refcount += 1;
++	    }
++
++	  /* If we are creating a shared library, and this is a reloc
++	     against a global symbol, or a non PC relative reloc
++	     against a local symbol, then we need to copy the reloc
++	     into the shared library.  However, if we are linking with
++	     -Bsymbolic, we do not need to copy a reloc against a
++	     global symbol which is defined in an object we are
++	     including in the link (i.e., DEF_REGULAR is set).  At
++	     this point we have not seen all the input files, so it is
++	     possible that DEF_REGULAR is not set now but will be set
++	     later (it is never cleared).  In case of a weak definition,
++	     DEF_REGULAR may be cleared later by a strong definition in
++	     a shared library.  We account for that possibility below by
++	     storing information in the relocs_copied field of the hash
++	     table entry.  A similar situation occurs when creating
++	     shared libraries and symbol visibility changes render the
++	     symbol local.
++
++	     If on the other hand, we are creating an executable, we
++	     may need to keep relocations for symbols satisfied by a
++	     dynamic library if we manage to avoid copy relocs for the
++	     symbol.  */
++	  if ((bfd_link_pic (info)
++	       && (sec->flags & SEC_ALLOC) != 0
++	       && (! riscv_elf_rtype_to_howto (r_type)->pc_relative
++		   || (h != NULL
++		       && (! info->symbolic
++			   || h->root.type == bfd_link_hash_defweak
++			   || !h->def_regular))))
++	      || (!bfd_link_pic (info)
++		  && (sec->flags & SEC_ALLOC) != 0
++		  && h != NULL
++		  && (h->root.type == bfd_link_hash_defweak
++		      || !h->def_regular)))
++	    {
++	      struct riscv_elf_dyn_relocs *p;
++	      struct riscv_elf_dyn_relocs **head;
++
++	      /* When creating a shared object, we must copy these
++		 relocs into the output file.  We create a reloc
++		 section in dynobj and make room for the reloc.  */
++	      if (sreloc == NULL)
++		{
++		  sreloc = _bfd_elf_make_dynamic_reloc_section
++		    (sec, htab->elf.dynobj, RISCV_ELF_LOG_WORD_BYTES,
++		    abfd, /*rela?*/ TRUE);
++
++		  if (sreloc == NULL)
++		    return FALSE;
++		}
++
++	      /* If this is a global symbol, we count the number of
++		 relocations we need for this symbol.  */
++	      if (h != NULL)
++		head = &((struct riscv_elf_link_hash_entry *) h)->dyn_relocs;
++	      else
++		{
++		  /* Track dynamic relocs needed for local syms too.
++		     We really need local syms available to do this
++		     easily.  Oh well.  */
++
++		  asection *s;
++		  void *vpp;
++		  Elf_Internal_Sym *isym;
++
++		  isym = bfd_sym_from_r_symndx (&htab->sym_cache,
++						abfd, r_symndx);
++		  if (isym == NULL)
++		    return FALSE;
++
++		  s = bfd_section_from_elf_index (abfd, isym->st_shndx);
++		  if (s == NULL)
++		    s = sec;
++
++		  vpp = &elf_section_data (s)->local_dynrel;
++		  head = (struct riscv_elf_dyn_relocs **) vpp;
++		}
++
++	      p = *head;
++	      if (p == NULL || p->sec != sec)
++		{
++		  bfd_size_type amt = sizeof *p;
++		  p = ((struct riscv_elf_dyn_relocs *)
++		       bfd_alloc (htab->elf.dynobj, amt));
++		  if (p == NULL)
++		    return FALSE;
++		  p->next = *head;
++		  *head = p;
++		  p->sec = sec;
++		  p->count = 0;
++		  p->pc_count = 0;
++		}
++
++	      p->count += 1;
++	      p->pc_count += riscv_elf_rtype_to_howto (r_type)->pc_relative;
++	    }
++
++	  break;
++
++	case R_RISCV_GNU_VTINHERIT:
++	  if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
++	    return FALSE;
++	  break;
++
++	case R_RISCV_GNU_VTENTRY:
++	  if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
++	    return FALSE;
++	  break;
++
++	default:
++	  break;
++	}
++    }
++
++  return TRUE;
++}
++
++static asection *
++riscv_elf_gc_mark_hook (asection *sec,
++			struct bfd_link_info *info,
++			Elf_Internal_Rela *rel,
++			struct elf_link_hash_entry *h,
++			Elf_Internal_Sym *sym)
++{
++  if (h != NULL)
++    switch (ELFNN_R_TYPE (rel->r_info))
++      {
++      case R_RISCV_GNU_VTINHERIT:
++      case R_RISCV_GNU_VTENTRY:
++	return NULL;
++      }
++
++  return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
++}
++
++/* Update the got entry reference counts for the section being removed.  */
++static bfd_boolean
++riscv_elf_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
++			 asection *sec, const Elf_Internal_Rela *relocs)
++{
++  const Elf_Internal_Rela *rel, *relend;
++  Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (abfd);
++  struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (abfd);
++  bfd_signed_vma *local_got_refcounts = elf_local_got_refcounts (abfd);
++
++  if (bfd_link_relocatable (info))
++    return TRUE;
++
++  elf_section_data (sec)->local_dynrel = NULL;
++
++  for (rel = relocs, relend = relocs + sec->reloc_count; rel < relend; rel++)
++    {
++      unsigned long r_symndx;
++      struct elf_link_hash_entry *h = NULL;
++
++      r_symndx = ELFNN_R_SYM (rel->r_info);
++      if (r_symndx >= symtab_hdr->sh_info)
++	{
++	  struct riscv_elf_link_hash_entry *eh;
++	  struct riscv_elf_dyn_relocs **pp;
++	  struct riscv_elf_dyn_relocs *p;
++
++	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
++	  while (h->root.type == bfd_link_hash_indirect
++		 || h->root.type == bfd_link_hash_warning)
++	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
++	  eh = (struct riscv_elf_link_hash_entry *) h;
++	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
++	    if (p->sec == sec)
++	      {
++		/* Everything must go for SEC.  */
++		*pp = p->next;
++		break;
++	      }
++	}
++
++      switch (ELFNN_R_TYPE (rel->r_info))
++	{
++	case R_RISCV_GOT_HI20:
++	case R_RISCV_TLS_GOT_HI20:
++	case R_RISCV_TLS_GD_HI20:
++	  if (h != NULL)
++	    {
++	      if (h->got.refcount > 0)
++		h->got.refcount--;
++	    }
++	  else
++	    {
++	      if (local_got_refcounts &&
++		  local_got_refcounts[r_symndx] > 0)
++		local_got_refcounts[r_symndx]--;
++	    }
++	  break;
++
++	case R_RISCV_HI20:
++	case R_RISCV_PCREL_HI20:
++	case R_RISCV_COPY:
++	case R_RISCV_JUMP_SLOT:
++	case R_RISCV_RELATIVE:
++	case R_RISCV_64:
++	case R_RISCV_32:
++	case R_RISCV_BRANCH:
++	case R_RISCV_CALL:
++	case R_RISCV_JAL:
++	case R_RISCV_RVC_BRANCH:
++	case R_RISCV_RVC_JUMP:
++	  if (bfd_link_pic (info))
++	    break;
++	  /* Fall through.  */
++
++	case R_RISCV_CALL_PLT:
++	  if (h != NULL)
++	    {
++	      if (h->plt.refcount > 0)
++		h->plt.refcount--;
++	    }
++	  break;
++
++	default:
++	  break;
++	}
++    }
++
++  return TRUE;
++}
++
++/* Adjust a symbol defined by a dynamic object and referenced by a
++   regular object.  The current definition is in some section of the
++   dynamic object, but we're not including those sections.  We have to
++   change the definition to something the rest of the link can
++   understand.  */
++
++static bfd_boolean
++riscv_elf_adjust_dynamic_symbol (struct bfd_link_info *info,
++				 struct elf_link_hash_entry *h)
++{
++  struct riscv_elf_link_hash_table *htab;
++  struct riscv_elf_link_hash_entry * eh;
++  struct riscv_elf_dyn_relocs *p;
++  bfd *dynobj;
++  asection *s;
++
++  htab = riscv_elf_hash_table (info);
++  BFD_ASSERT (htab != NULL);
++
++  dynobj = htab->elf.dynobj;
++
++  /* Make sure we know what is going on here.  */
++  BFD_ASSERT (dynobj != NULL
++	      && (h->needs_plt
++		  || h->type == STT_GNU_IFUNC
++		  || h->u.weakdef != NULL
++		  || (h->def_dynamic
++		      && h->ref_regular
++		      && !h->def_regular)));
++
++  /* If this is a function, put it in the procedure linkage table.  We
++     will fill in the contents of the procedure linkage table later
++     (although we could actually do it here).  */
++  if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
++    {
++      if (h->plt.refcount <= 0
++	  || SYMBOL_CALLS_LOCAL (info, h)
++	  || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
++	      && h->root.type == bfd_link_hash_undefweak))
++	{
++	  /* This case can occur if we saw a R_RISCV_CALL_PLT reloc in an
++	     input file, but the symbol was never referred to by a dynamic
++	     object, or if all references were garbage collected.  In such
++	     a case, we don't actually need to build a PLT entry.  */
++	  h->plt.offset = (bfd_vma) -1;
++	  h->needs_plt = 0;
++	}
++
++      return TRUE;
++    }
++  else
++    h->plt.offset = (bfd_vma) -1;
++
++  /* If this is a weak symbol, and there is a real definition, the
++     processor independent code will have arranged for us to see the
++     real definition first, and we can just use the same value.  */
++  if (h->u.weakdef != NULL)
++    {
++      BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
++		  || h->u.weakdef->root.type == bfd_link_hash_defweak);
++      h->root.u.def.section = h->u.weakdef->root.u.def.section;
++      h->root.u.def.value = h->u.weakdef->root.u.def.value;
++      return TRUE;
++    }
++
++  /* This is a reference to a symbol defined by a dynamic object which
++     is not a function.  */
++
++  /* If we are creating a shared library, we must presume that the
++     only references to the symbol are via the global offset table.
++     For such cases we need not do anything here; the relocations will
++     be handled correctly by relocate_section.  */
++  if (bfd_link_pic (info))
++    return TRUE;
++
++  /* If there are no references to this symbol that do not use the
++     GOT, we don't need to generate a copy reloc.  */
++  if (!h->non_got_ref)
++    return TRUE;
++
++  /* If -z nocopyreloc was given, we won't generate them either.  */
++  if (info->nocopyreloc)
++    {
++      h->non_got_ref = 0;
++      return TRUE;
++    }
++
++  eh = (struct riscv_elf_link_hash_entry *) h;
++  for (p = eh->dyn_relocs; p != NULL; p = p->next)
++    {
++      s = p->sec->output_section;
++      if (s != NULL && (s->flags & SEC_READONLY) != 0)
++	break;
++    }
++
++  /* If we didn't find any dynamic relocs in read-only sections, then
++     we'll be keeping the dynamic relocs and avoiding the copy reloc.  */
++  if (p == NULL)
++    {
++      h->non_got_ref = 0;
++      return TRUE;
++    }
++
++  /* We must allocate the symbol in our .dynbss section, which will
++     become part of the .bss section of the executable.  There will be
++     an entry for this symbol in the .dynsym section.  The dynamic
++     object will contain position independent code, so all references
++     from the dynamic object to this symbol will go through the global
++     offset table.  The dynamic linker will use the .dynsym entry to
++     determine the address it must put in the global offset table, so
++     both the dynamic object and the regular object will refer to the
++     same memory location for the variable.  */
++
++  /* We must generate a R_RISCV_COPY reloc to tell the dynamic linker
++     to copy the initial value out of the dynamic object and into the
++     runtime process image.  We need to remember the offset into the
++     .rel.bss section we are going to use.  */
++  if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
++    {
++      htab->srelbss->size += sizeof (ElfNN_External_Rela);
++      h->needs_copy = 1;
++    }
++
++  if (eh->tls_type & ~GOT_NORMAL)
++    return _bfd_elf_adjust_dynamic_copy (info, h, htab->sdyntdata);
++
++  return _bfd_elf_adjust_dynamic_copy (info, h, htab->sdynbss);
++}
++
++/* Allocate space in .plt, .got and associated reloc sections for
++   dynamic relocs.  */
++
++static bfd_boolean
++allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
++{
++  struct bfd_link_info *info;
++  struct riscv_elf_link_hash_table *htab;
++  struct riscv_elf_link_hash_entry *eh;
++  struct riscv_elf_dyn_relocs *p;
++
++  if (h->root.type == bfd_link_hash_indirect)
++    return TRUE;
++
++  info = (struct bfd_link_info *) inf;
++  htab = riscv_elf_hash_table (info);
++  BFD_ASSERT (htab != NULL);
++
++  if (htab->elf.dynamic_sections_created
++      && h->plt.refcount > 0)
++    {
++      /* Make sure this symbol is output as a dynamic symbol.
++	 Undefined weak syms won't yet be marked as dynamic.  */
++      if (h->dynindx == -1
++	  && !h->forced_local)
++	{
++	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
++	    return FALSE;
++	}
++
++      if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, bfd_link_pic (info), h))
++	{
++	  asection *s = htab->elf.splt;
++
++	  if (s->size == 0)
++	    s->size = PLT_HEADER_SIZE;
++
++	  h->plt.offset = s->size;
++
++	  /* Make room for this entry.  */
++	  s->size += PLT_ENTRY_SIZE;
++
++	  /* We also need to make an entry in the .got.plt section.  */
++	  htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
++
++	  /* We also need to make an entry in the .rela.plt section.  */
++	  htab->elf.srelplt->size += sizeof (ElfNN_External_Rela);
++
++	  /* If this symbol is not defined in a regular file, and we are
++	     not generating a shared library, then set the symbol to this
++	     location in the .plt.  This is required to make function
++	     pointers compare as equal between the normal executable and
++	     the shared library.  */
++	  if (! bfd_link_pic (info)
++	      && !h->def_regular)
++	    {
++	      h->root.u.def.section = s;
++	      h->root.u.def.value = h->plt.offset;
++	    }
++	}
++      else
++	{
++	  h->plt.offset = (bfd_vma) -1;
++	  h->needs_plt = 0;
++	}
++    }
++  else
++    {
++      h->plt.offset = (bfd_vma) -1;
++      h->needs_plt = 0;
++    }
++
++  if (h->got.refcount > 0)
++    {
++      asection *s;
++      bfd_boolean dyn;
++      int tls_type = riscv_elf_hash_entry (h)->tls_type;
++
++      /* Make sure this symbol is output as a dynamic symbol.
++	 Undefined weak syms won't yet be marked as dynamic.  */
++      if (h->dynindx == -1
++	  && !h->forced_local)
++	{
++	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
++	    return FALSE;
++	}
++
++      s = htab->elf.sgot;
++      h->got.offset = s->size;
++      dyn = htab->elf.dynamic_sections_created;
++      if (tls_type & (GOT_TLS_GD | GOT_TLS_IE))
++	{
++	  /* TLS_GD needs two dynamic relocs and two GOT slots.  */
++	  if (tls_type & GOT_TLS_GD)
++	    {
++	      s->size += 2 * RISCV_ELF_WORD_BYTES;
++	      htab->elf.srelgot->size += 2 * sizeof (ElfNN_External_Rela);
++	    }
++
++	  /* TLS_IE needs one dynamic reloc and one GOT slot.  */
++	  if (tls_type & GOT_TLS_IE)
++	    {
++	      s->size += RISCV_ELF_WORD_BYTES;
++	      htab->elf.srelgot->size += sizeof (ElfNN_External_Rela);
++	    }
++	}
++      else
++	{
++	  s->size += RISCV_ELF_WORD_BYTES;
++	  if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h))
++	    htab->elf.srelgot->size += sizeof (ElfNN_External_Rela);
++	}
++    }
++  else
++    h->got.offset = (bfd_vma) -1;
++
++  eh = (struct riscv_elf_link_hash_entry *) h;
++  if (eh->dyn_relocs == NULL)
++    return TRUE;
++
++  /* In the shared -Bsymbolic case, discard space allocated for
++     dynamic pc-relative relocs against symbols which turn out to be
++     defined in regular objects.  For the normal shared case, discard
++     space for pc-relative relocs that have become local due to symbol
++     visibility changes.  */
++
++  if (bfd_link_pic (info))
++    {
++      if (SYMBOL_CALLS_LOCAL (info, h))
++	{
++	  struct riscv_elf_dyn_relocs **pp;
++
++	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
++	    {
++	      p->count -= p->pc_count;
++	      p->pc_count = 0;
++	      if (p->count == 0)
++		*pp = p->next;
++	      else
++		pp = &p->next;
++	    }
++	}
++
++      /* Also discard relocs on undefined weak syms with non-default
++	 visibility.  */
++      if (eh->dyn_relocs != NULL
++	  && h->root.type == bfd_link_hash_undefweak)
++	{
++	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
++	    eh->dyn_relocs = NULL;
++
++	  /* Make sure undefined weak symbols are output as a dynamic
++	     symbol in PIEs.  */
++	  else if (h->dynindx == -1
++		   && !h->forced_local)
++	    {
++	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
++		return FALSE;
++	    }
++	}
++    }
++  else
++    {
++      /* For the non-shared case, discard space for relocs against
++	 symbols which turn out to need copy relocs or are not
++	 dynamic.  */
++
++      if (!h->non_got_ref
++	  && ((h->def_dynamic
++	       && !h->def_regular)
++	      || (htab->elf.dynamic_sections_created
++		  && (h->root.type == bfd_link_hash_undefweak
++		      || h->root.type == bfd_link_hash_undefined))))
++	{
++	  /* Make sure this symbol is output as a dynamic symbol.
++	     Undefined weak syms won't yet be marked as dynamic.  */
++	  if (h->dynindx == -1
++	      && !h->forced_local)
++	    {
++	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
++		return FALSE;
++	    }
++
++	  /* If that succeeded, we know we'll be keeping all the
++	     relocs.  */
++	  if (h->dynindx != -1)
++	    goto keep;
++	}
++
++      eh->dyn_relocs = NULL;
++
++    keep: ;
++    }
++
++  /* Finally, allocate space.  */
++  for (p = eh->dyn_relocs; p != NULL; p = p->next)
++    {
++      asection *sreloc = elf_section_data (p->sec)->sreloc;
++      sreloc->size += p->count * sizeof (ElfNN_External_Rela);
++    }
++
++  return TRUE;
++}
++
++/* Find any dynamic relocs that apply to read-only sections.  */
++
++static bfd_boolean
++readonly_dynrelocs (struct elf_link_hash_entry *h, void *inf)
++{
++  struct riscv_elf_link_hash_entry *eh;
++  struct riscv_elf_dyn_relocs *p;
++
++  eh = (struct riscv_elf_link_hash_entry *) h;
++  for (p = eh->dyn_relocs; p != NULL; p = p->next)
++    {
++      asection *s = p->sec->output_section;
++
++      if (s != NULL && (s->flags & SEC_READONLY) != 0)
++	{
++	  ((struct bfd_link_info *) inf)->flags |= DF_TEXTREL;
++	  return FALSE;
++	}
++    }
++  return TRUE;
++}
++
++static bfd_boolean
++riscv_elf_size_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info)
++{
++  struct riscv_elf_link_hash_table *htab;
++  bfd *dynobj;
++  asection *s;
++  bfd *ibfd;
++
++  htab = riscv_elf_hash_table (info);
++  BFD_ASSERT (htab != NULL);
++  dynobj = htab->elf.dynobj;
++  BFD_ASSERT (dynobj != NULL);
++
++  if (elf_hash_table (info)->dynamic_sections_created)
++    {
++      /* Set the contents of the .interp section to the interpreter.  */
++      if (bfd_link_executable (info) && !info->nointerp)
++	{
++	  s = bfd_get_linker_section (dynobj, ".interp");
++	  BFD_ASSERT (s != NULL);
++	  s->size = strlen (ELFNN_DYNAMIC_INTERPRETER) + 1;
++	  s->contents = (unsigned char *) ELFNN_DYNAMIC_INTERPRETER;
++	}
++    }
++
++  /* Set up .got offsets for local syms, and space for local dynamic
++     relocs.  */
++  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
++    {
++      bfd_signed_vma *local_got;
++      bfd_signed_vma *end_local_got;
++      char *local_tls_type;
++      bfd_size_type locsymcount;
++      Elf_Internal_Shdr *symtab_hdr;
++      asection *srel;
++
++      if (! is_riscv_elf (ibfd))
++	continue;
++
++      for (s = ibfd->sections; s != NULL; s = s->next)
++	{
++	  struct riscv_elf_dyn_relocs *p;
++
++	  for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
++	    {
++	      if (!bfd_is_abs_section (p->sec)
++		  && bfd_is_abs_section (p->sec->output_section))
++		{
++		  /* Input section has been discarded, either because
++		     it is a copy of a linkonce section or due to
++		     linker script /DISCARD/, so we'll be discarding
++		     the relocs too.  */
++		}
++	      else if (p->count != 0)
++		{
++		  srel = elf_section_data (p->sec)->sreloc;
++		  srel->size += p->count * sizeof (ElfNN_External_Rela);
++		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
++		    info->flags |= DF_TEXTREL;
++		}
++	    }
++	}
++
++      local_got = elf_local_got_refcounts (ibfd);
++      if (!local_got)
++	continue;
++
++      symtab_hdr = &elf_symtab_hdr (ibfd);
++      locsymcount = symtab_hdr->sh_info;
++      end_local_got = local_got + locsymcount;
++      local_tls_type = _bfd_riscv_elf_local_got_tls_type (ibfd);
++      s = htab->elf.sgot;
++      srel = htab->elf.srelgot;
++      for (; local_got < end_local_got; ++local_got, ++local_tls_type)
++	{
++	  if (*local_got > 0)
++	    {
++	      *local_got = s->size;
++	      s->size += RISCV_ELF_WORD_BYTES;
++	      if (*local_tls_type & GOT_TLS_GD)
++		s->size += RISCV_ELF_WORD_BYTES;
++	      if (bfd_link_pic (info)
++		  || (*local_tls_type & (GOT_TLS_GD | GOT_TLS_IE)))
++		srel->size += sizeof (ElfNN_External_Rela);
++	    }
++	  else
++	    *local_got = (bfd_vma) -1;
++	}
++    }
++
++  /* Allocate global sym .plt and .got entries, and space for global
++     sym dynamic relocs.  */
++  elf_link_hash_traverse (&htab->elf, allocate_dynrelocs, info);
++
++  if (htab->elf.sgotplt)
++    {
++      struct elf_link_hash_entry *got;
++      got = elf_link_hash_lookup (elf_hash_table (info),
++				  "_GLOBAL_OFFSET_TABLE_",
++				  FALSE, FALSE, FALSE);
++
++      /* Don't allocate .got.plt section if there are no GOT nor PLT
++	 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_.  */
++      if ((got == NULL
++	   || !got->ref_regular_nonweak)
++	  && (htab->elf.sgotplt->size == GOTPLT_HEADER_SIZE)
++	  && (htab->elf.splt == NULL
++	      || htab->elf.splt->size == 0)
++	  && (htab->elf.sgot == NULL
++	      || (htab->elf.sgot->size
++		  == get_elf_backend_data (output_bfd)->got_header_size)))
++	htab->elf.sgotplt->size = 0;
++    }
++
++  /* The check_relocs and adjust_dynamic_symbol entry points have
++     determined the sizes of the various dynamic sections.  Allocate
++     memory for them.  */
++  for (s = dynobj->sections; s != NULL; s = s->next)
++    {
++      if ((s->flags & SEC_LINKER_CREATED) == 0)
++	continue;
++
++      if (s == htab->elf.splt
++	  || s == htab->elf.sgot
++	  || s == htab->elf.sgotplt
++	  || s == htab->sdynbss)
++	{
++	  /* Strip this section if we don't need it; see the
++	     comment below.  */
++	}
++      else if (strncmp (s->name, ".rela", 5) == 0)
++	{
++	  if (s->size != 0)
++	    {
++	      /* We use the reloc_count field as a counter if we need
++		 to copy relocs into the output file.  */
++	      s->reloc_count = 0;
++	    }
++	}
++      else
++	{
++	  /* It's not one of our sections.  */
++	  continue;
++	}
++
++      if (s->size == 0)
++	{
++	  /* If we don't need this section, strip it from the
++	     output file.  This is mostly to handle .rela.bss and
++	     .rela.plt.  We must create both sections in
++	     create_dynamic_sections, because they must be created
++	     before the linker maps input sections to output
++	     sections.  The linker does that before
++	     adjust_dynamic_symbol is called, and it is that
++	     function which decides whether anything needs to go
++	     into these sections.  */
++	  s->flags |= SEC_EXCLUDE;
++	  continue;
++	}
++
++      if ((s->flags & SEC_HAS_CONTENTS) == 0)
++	continue;
++
++      /* Allocate memory for the section contents.  Zero the memory
++	 for the benefit of .rela.plt, which has 4 unused entries
++	 at the beginning, and we don't want garbage.  */
++      s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
++      if (s->contents == NULL)
++	return FALSE;
++    }
++
++  if (elf_hash_table (info)->dynamic_sections_created)
++    {
++      /* Add some entries to the .dynamic section.  We fill in the
++	 values later, in riscv_elf_finish_dynamic_sections, but we
++	 must add the entries now so that we get the correct size for
++	 the .dynamic section.  The DT_DEBUG entry is filled in by the
++	 dynamic linker and used by the debugger.  */
++#define add_dynamic_entry(TAG, VAL) \
++  _bfd_elf_add_dynamic_entry (info, TAG, VAL)
++
++      if (bfd_link_executable (info))
++	{
++	  if (!add_dynamic_entry (DT_DEBUG, 0))
++	    return FALSE;
++	}
++
++      if (htab->elf.srelplt->size != 0)
++	{
++	  if (!add_dynamic_entry (DT_PLTGOT, 0)
++	      || !add_dynamic_entry (DT_PLTRELSZ, 0)
++	      || !add_dynamic_entry (DT_PLTREL, DT_RELA)
++	      || !add_dynamic_entry (DT_JMPREL, 0))
++	    return FALSE;
++	}
++
++      if (!add_dynamic_entry (DT_RELA, 0)
++	  || !add_dynamic_entry (DT_RELASZ, 0)
++	  || !add_dynamic_entry (DT_RELAENT, sizeof (ElfNN_External_Rela)))
++	return FALSE;
++
++      /* If any dynamic relocs apply to a read-only section,
++	 then we need a DT_TEXTREL entry.  */
++      if ((info->flags & DF_TEXTREL) == 0)
++	elf_link_hash_traverse (&htab->elf, readonly_dynrelocs, info);
++
++      if (info->flags & DF_TEXTREL)
++	{
++	  if (!add_dynamic_entry (DT_TEXTREL, 0))
++	    return FALSE;
++	}
++    }
++#undef add_dynamic_entry
++
++  return TRUE;
++}
++
++#define TP_OFFSET 0
++#define DTP_OFFSET 0x800
++
++/* Return the relocation value for a TLS dtp-relative reloc.  */
++
++static bfd_vma
++dtpoff (struct bfd_link_info *info, bfd_vma address)
++{
++  /* If tls_sec is NULL, we should have signalled an error already.  */
++  if (elf_hash_table (info)->tls_sec == NULL)
++    return 0;
++  return address - elf_hash_table (info)->tls_sec->vma - DTP_OFFSET;
++}
++
++/* Return the relocation value for a static TLS tp-relative relocation.  */
++
++static bfd_vma
++tpoff (struct bfd_link_info *info, bfd_vma address)
++{
++  /* If tls_sec is NULL, we should have signalled an error already.  */
++  if (elf_hash_table (info)->tls_sec == NULL)
++    return 0;
++  return address - elf_hash_table (info)->tls_sec->vma - TP_OFFSET;
++}
++
++/* Return the global pointer's value, or 0 if it is not in use.  */
++
++static bfd_vma
++riscv_global_pointer_value (struct bfd_link_info *info)
++{
++  struct bfd_link_hash_entry *h;
++
++  h = bfd_link_hash_lookup (info->hash, "_gp", FALSE, FALSE, TRUE);
++  if (h == NULL || h->type != bfd_link_hash_defined)
++    return 0;
++
++  return h->u.def.value + sec_addr (h->u.def.section);
++}
++
++/* Emplace a static relocation.  */
++
++static bfd_reloc_status_type
++perform_relocation (const reloc_howto_type *howto,
++		    const Elf_Internal_Rela *rel,
++		    bfd_vma value,
++		    asection *input_section,
++		    bfd *input_bfd,
++		    bfd_byte *contents)
++{
++  if (howto->pc_relative)
++    value -= sec_addr (input_section) + rel->r_offset;
++  value += rel->r_addend;
++
++  switch (ELFNN_R_TYPE (rel->r_info))
++    {
++    case R_RISCV_HI20:
++    case R_RISCV_TPREL_HI20:
++    case R_RISCV_PCREL_HI20:
++    case R_RISCV_GOT_HI20:
++    case R_RISCV_TLS_GOT_HI20:
++    case R_RISCV_TLS_GD_HI20:
++      if (ARCH_SIZE > 32 && !VALID_UTYPE_IMM (RISCV_CONST_HIGH_PART (value)))
++	return bfd_reloc_overflow;
++      value = ENCODE_UTYPE_IMM (RISCV_CONST_HIGH_PART (value));
++      break;
++
++    case R_RISCV_LO12_I:
++    case R_RISCV_GPREL_I:
++    case R_RISCV_TPREL_LO12_I:
++    case R_RISCV_PCREL_LO12_I:
++      value = ENCODE_ITYPE_IMM (value);
++      break;
++
++    case R_RISCV_LO12_S:
++    case R_RISCV_GPREL_S:
++    case R_RISCV_TPREL_LO12_S:
++    case R_RISCV_PCREL_LO12_S:
++      value = ENCODE_STYPE_IMM (value);
++      break;
++
++    case R_RISCV_CALL:
++    case R_RISCV_CALL_PLT:
++      if (ARCH_SIZE > 32 && !VALID_UTYPE_IMM (RISCV_CONST_HIGH_PART (value)))
++	return bfd_reloc_overflow;
++      value = ENCODE_UTYPE_IMM (RISCV_CONST_HIGH_PART (value))
++	      | (ENCODE_ITYPE_IMM (value) << 32);
++      break;
++
++    case R_RISCV_JAL:
++      if (!VALID_UJTYPE_IMM (value))
++	return bfd_reloc_overflow;
++      value = ENCODE_UJTYPE_IMM (value);
++      break;
++
++    case R_RISCV_BRANCH:
++      if (!VALID_SBTYPE_IMM (value))
++	return bfd_reloc_overflow;
++      value = ENCODE_SBTYPE_IMM (value);
++      break;
++
++    case R_RISCV_RVC_BRANCH:
++      if (!VALID_RVC_B_IMM (value))
++	return bfd_reloc_overflow;
++      value = ENCODE_RVC_B_IMM (value);
++      break;
++
++    case R_RISCV_RVC_JUMP:
++      if (!VALID_RVC_J_IMM (value))
++	return bfd_reloc_overflow;
++      value = ENCODE_RVC_J_IMM (value);
++      break;
++
++    case R_RISCV_RVC_LUI:
++      if (!VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (value)))
++	return bfd_reloc_overflow;
++      value = ENCODE_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (value));
++      break;
++
++    case R_RISCV_32:
++    case R_RISCV_64:
++    case R_RISCV_ADD8:
++    case R_RISCV_ADD16:
++    case R_RISCV_ADD32:
++    case R_RISCV_ADD64:
++    case R_RISCV_SUB8:
++    case R_RISCV_SUB16:
++    case R_RISCV_SUB32:
++    case R_RISCV_SUB64:
++    case R_RISCV_TLS_DTPREL32:
++    case R_RISCV_TLS_DTPREL64:
++      break;
++
++    default:
++      return bfd_reloc_notsupported;
++    }
++
++  bfd_vma word = bfd_get (howto->bitsize, input_bfd, contents + rel->r_offset);
++  word = (word & ~howto->dst_mask) | (value & howto->dst_mask);
++  bfd_put (howto->bitsize, input_bfd, word, contents + rel->r_offset);
++
++  return bfd_reloc_ok;
++}
++
++/* Remember all PC-relative high-part relocs we've encountered to help us
++   later resolve the corresponding low-part relocs.  */
++
++typedef struct {
++  bfd_vma address;
++  bfd_vma value;
++} riscv_pcrel_hi_reloc;
++
++typedef struct riscv_pcrel_lo_reloc {
++  asection *input_section;
++  struct bfd_link_info *info;
++  reloc_howto_type *howto;
++  const Elf_Internal_Rela *reloc;
++  bfd_vma addr;
++  const char *name;
++  bfd_byte *contents;
++  struct riscv_pcrel_lo_reloc *next;
++} riscv_pcrel_lo_reloc;
++
++typedef struct {
++  htab_t hi_relocs;
++  riscv_pcrel_lo_reloc *lo_relocs;
++} riscv_pcrel_relocs;
++
++static hashval_t
++riscv_pcrel_reloc_hash (const void *entry)
++{
++  const riscv_pcrel_hi_reloc *e = entry;
++  return (hashval_t)(e->address >> 2);
++}
++
++static bfd_boolean
++riscv_pcrel_reloc_eq (const void *entry1, const void *entry2)
++{
++  const riscv_pcrel_hi_reloc *e1 = entry1, *e2 = entry2;
++  return e1->address == e2->address;
++}
++
++static bfd_boolean
++riscv_init_pcrel_relocs (riscv_pcrel_relocs *p)
++{
++
++  p->lo_relocs = NULL;
++  p->hi_relocs = htab_create (1024, riscv_pcrel_reloc_hash,
++			      riscv_pcrel_reloc_eq, free);
++  return p->hi_relocs != NULL;
++}
++
++static void
++riscv_free_pcrel_relocs (riscv_pcrel_relocs *p)
++{
++  riscv_pcrel_lo_reloc *cur = p->lo_relocs;
++  while (cur != NULL)
++    {
++      riscv_pcrel_lo_reloc *next = cur->next;
++      free (cur);
++      cur = next;
++    }
++
++  htab_delete (p->hi_relocs);
++}
++
++static bfd_boolean
++riscv_record_pcrel_hi_reloc (riscv_pcrel_relocs *p, bfd_vma addr, bfd_vma value)
++{
++  riscv_pcrel_hi_reloc entry = {addr, value - addr};
++  riscv_pcrel_hi_reloc **slot =
++    (riscv_pcrel_hi_reloc **) htab_find_slot (p->hi_relocs, &entry, INSERT);
++  BFD_ASSERT (*slot == NULL);
++  *slot = (riscv_pcrel_hi_reloc *) bfd_malloc (sizeof (riscv_pcrel_hi_reloc));
++  if (*slot == NULL)
++    return FALSE;
++  **slot = entry;
++  return TRUE;
++}
++
++static bfd_boolean
++riscv_record_pcrel_lo_reloc (riscv_pcrel_relocs *p,
++			     asection *input_section,
++			     struct bfd_link_info *info,
++			     reloc_howto_type *howto,
++			     const Elf_Internal_Rela *reloc,
++			     bfd_vma addr,
++			     const char *name,
++			     bfd_byte *contents)
++{
++  riscv_pcrel_lo_reloc *entry;
++  entry = (riscv_pcrel_lo_reloc *) bfd_malloc (sizeof (riscv_pcrel_lo_reloc));
++  if (entry == NULL)
++    return FALSE;
++  *entry = (riscv_pcrel_lo_reloc) {input_section, info, howto, reloc, addr,
++				   name, contents, p->lo_relocs};
++  p->lo_relocs = entry;
++  return TRUE;
++}
++
++static bfd_boolean
++riscv_resolve_pcrel_lo_relocs (riscv_pcrel_relocs *p)
++{
++  riscv_pcrel_lo_reloc *r;
++  for (r = p->lo_relocs; r != NULL; r = r->next)
++    {
++      bfd *input_bfd = r->input_section->owner;
++      riscv_pcrel_hi_reloc search = {r->addr, 0};
++      riscv_pcrel_hi_reloc *entry = htab_find (p->hi_relocs, &search);
++      if (entry == NULL)
++        {
++	  ((*r->info->callbacks->reloc_overflow)
++	   (r->info, NULL, r->name, r->howto->name, (bfd_vma) 0,
++	    input_bfd, r->input_section, r->reloc->r_offset));
++	  return TRUE;
++        }
++
++      perform_relocation (r->howto, r->reloc, entry->value, r->input_section,
++			  input_bfd, r->contents);
++    }
++
++  return TRUE;
++}
++
++/* Relocate a RISC-V ELF section.
++
++   The RELOCATE_SECTION function is called by the new ELF backend linker
++   to handle the relocations for a section.
++
++   The relocs are always passed as Rela structures.
++
++   This function is responsible for adjusting the section contents as
++   necessary, and (if generating a relocatable output file) adjusting
++   the reloc addend as necessary.
++
++   This function does not have to worry about setting the reloc
++   address or the reloc symbol index.
++
++   LOCAL_SYMS is a pointer to the swapped in local symbols.
++
++   LOCAL_SECTIONS is an array giving the section in the input file
++   corresponding to the st_shndx field of each local symbol.
++
++   The global hash table entry for the global symbols can be found
++   via elf_sym_hashes (input_bfd).
++
++   When generating relocatable output, this function must handle
++   STB_LOCAL/STT_SECTION symbols specially.  The output symbol is
++   going to be the section symbol corresponding to the output
++   section, which means that the addend must be adjusted
++   accordingly.  */
++
++static bfd_boolean
++riscv_elf_relocate_section (bfd *output_bfd, struct bfd_link_info *info,
++			    bfd *input_bfd, asection *input_section,
++			    bfd_byte *contents, Elf_Internal_Rela *relocs,
++			    Elf_Internal_Sym *local_syms,
++			    asection **local_sections)
++{
++  Elf_Internal_Rela *rel;
++  Elf_Internal_Rela *relend;
++  riscv_pcrel_relocs pcrel_relocs;
++  bfd_boolean ret = FALSE;
++  asection *sreloc = elf_section_data (input_section)->sreloc;
++  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++  Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (input_bfd);
++  struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (input_bfd);
++  bfd_vma *local_got_offsets = elf_local_got_offsets (input_bfd);
++
++  if (!riscv_init_pcrel_relocs (&pcrel_relocs))
++    return FALSE;
++
++  relend = relocs + input_section->reloc_count;
++  for (rel = relocs; rel < relend; rel++)
++    {
++      unsigned long r_symndx;
++      struct elf_link_hash_entry *h;
++      Elf_Internal_Sym *sym;
++      asection *sec;
++      bfd_vma relocation;
++      bfd_reloc_status_type r = bfd_reloc_ok;
++      const char *name;
++      bfd_vma off, ie_off;
++      bfd_boolean unresolved_reloc, is_ie = FALSE;
++      bfd_vma pc = sec_addr (input_section) + rel->r_offset;
++      int r_type = ELFNN_R_TYPE (rel->r_info), tls_type;
++      reloc_howto_type *howto = riscv_elf_rtype_to_howto (r_type);
++      const char *msg = NULL;
++
++      if (r_type == R_RISCV_GNU_VTINHERIT || r_type == R_RISCV_GNU_VTENTRY)
++	continue;
++
++      /* This is a final link.  */
++      r_symndx = ELFNN_R_SYM (rel->r_info);
++      h = NULL;
++      sym = NULL;
++      sec = NULL;
++      unresolved_reloc = FALSE;
++      if (r_symndx < symtab_hdr->sh_info)
++	{
++	  sym = local_syms + r_symndx;
++	  sec = local_sections[r_symndx];
++	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
++	}
++      else
++	{
++	  bfd_boolean warned, ignored;
++
++	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
++				   r_symndx, symtab_hdr, sym_hashes,
++				   h, sec, relocation,
++				   unresolved_reloc, warned, ignored);
++	  if (warned)
++	    {
++	      /* To avoid generating warning messages about truncated
++		 relocations, set the relocation's address to be the same as
++		 the start of this section.  */
++	      if (input_section->output_section != NULL)
++		relocation = input_section->output_section->vma;
++	      else
++		relocation = 0;
++	    }
++	}
++
++      if (sec != NULL && discarded_section (sec))
++	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
++					 rel, 1, relend, howto, 0, contents);
++
++      if (bfd_link_relocatable (info))
++	continue;
++
++      if (h != NULL)
++	name = h->root.root.string;
++      else
++	{
++	  name = (bfd_elf_string_from_elf_section
++		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
++	  if (name == NULL || *name == '\0')
++	    name = bfd_section_name (input_bfd, sec);
++	}
++
++      switch (r_type)
++	{
++	case R_RISCV_NONE:
++	case R_RISCV_TPREL_ADD:
++	case R_RISCV_COPY:
++	case R_RISCV_JUMP_SLOT:
++	case R_RISCV_RELATIVE:
++	  /* These require nothing of us at all.  */
++	  continue;
++
++	case R_RISCV_HI20:
++	case R_RISCV_BRANCH:
++	case R_RISCV_RVC_BRANCH:
++	case R_RISCV_RVC_LUI:
++	case R_RISCV_LO12_I:
++	case R_RISCV_LO12_S:
++	  /* These require no special handling beyond perform_relocation.  */
++	  break;
++
++	case R_RISCV_GOT_HI20:
++	  if (h != NULL)
++	    {
++	      bfd_boolean dyn, pic;
++
++	      off = h->got.offset;
++	      BFD_ASSERT (off != (bfd_vma) -1);
++	      dyn = elf_hash_table (info)->dynamic_sections_created;
++	      pic = bfd_link_pic (info);
++
++	      if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, pic, h)
++		  || (pic && SYMBOL_REFERENCES_LOCAL (info, h)))
++		{
++		  /* This is actually a static link, or it is a
++		     -Bsymbolic link and the symbol is defined
++		     locally, or the symbol was forced to be local
++		     because of a version file.  We must initialize
++		     this entry in the global offset table.  Since the
++		     offset must always be a multiple of the word size,
++		     we use the least significant bit to record whether
++		     we have initialized it already.
++
++		     When doing a dynamic link, we create a .rela.got
++		     relocation entry to initialize the value.  This
++		     is done in the finish_dynamic_symbol routine.  */
++		  if ((off & 1) != 0)
++		    off &= ~1;
++		  else
++		    {
++		      bfd_put_NN (output_bfd, relocation,
++				  htab->elf.sgot->contents + off);
++		      h->got.offset |= 1;
++		    }
++		}
++	      else
++		unresolved_reloc = FALSE;
++	    }
++	  else
++	    {
++	      BFD_ASSERT (local_got_offsets != NULL
++			  && local_got_offsets[r_symndx] != (bfd_vma) -1);
++
++	      off = local_got_offsets[r_symndx];
++
++	      /* The offset must always be a multiple of the word size.
++		 So, we can use the least significant bit to record
++		 whether we have already processed this entry.  */
++	      if ((off & 1) != 0)
++		off &= ~1;
++	      else
++		{
++		  if (bfd_link_pic (info))
++		    {
++		      asection *s;
++		      Elf_Internal_Rela outrel;
++
++		      /* We need to generate a R_RISCV_RELATIVE reloc
++			 for the dynamic linker.  */
++		      s = htab->elf.srelgot;
++		      BFD_ASSERT (s != NULL);
++
++		      outrel.r_offset = sec_addr (htab->elf.sgot) + off;
++		      outrel.r_info =
++			ELFNN_R_INFO (0, R_RISCV_RELATIVE);
++		      outrel.r_addend = relocation;
++		      relocation = 0;
++		      riscv_elf_append_rela (output_bfd, s, &outrel);
++		    }
++
++		  bfd_put_NN (output_bfd, relocation,
++			      htab->elf.sgot->contents + off);
++		  local_got_offsets[r_symndx] |= 1;
++		}
++	    }
++	  relocation = sec_addr (htab->elf.sgot) + off;
++	  if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc, relocation))
++	    r = bfd_reloc_overflow;
++	  break;
++
++	case R_RISCV_ADD8:
++	case R_RISCV_ADD16:
++	case R_RISCV_ADD32:
++	case R_RISCV_ADD64:
++	  {
++	    bfd_vma old_value = bfd_get (howto->bitsize, input_bfd,
++					 contents + rel->r_offset);
++	    relocation = old_value + relocation;
++	  }
++	  break;
++
++	case R_RISCV_SUB8:
++	case R_RISCV_SUB16:
++	case R_RISCV_SUB32:
++	case R_RISCV_SUB64:
++	  {
++	    bfd_vma old_value = bfd_get (howto->bitsize, input_bfd,
++					 contents + rel->r_offset);
++	    relocation = old_value - relocation;
++	  }
++	  break;
++
++	case R_RISCV_CALL_PLT:
++	case R_RISCV_CALL:
++	case R_RISCV_JAL:
++	case R_RISCV_RVC_JUMP:
++	  if (bfd_link_pic (info) && h != NULL && h->plt.offset != MINUS_ONE)
++	    {
++	      /* Refer to the PLT entry.  */
++	      relocation = sec_addr (htab->elf.splt) + h->plt.offset;
++	      unresolved_reloc = FALSE;
++	    }
++	  break;
++
++	case R_RISCV_TPREL_HI20:
++	  relocation = tpoff (info, relocation);
++	  break;
++
++	case R_RISCV_TPREL_LO12_I:
++	case R_RISCV_TPREL_LO12_S:
++	  relocation = tpoff (info, relocation);
++	  if (VALID_ITYPE_IMM (relocation + rel->r_addend))
++	    {
++	      /* We can use tp as the base register.  */
++	      bfd_vma insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
++	      insn &= ~(OP_MASK_RS1 << OP_SH_RS1);
++	      insn |= X_TP << OP_SH_RS1;
++	      bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
++	    }
++	  break;
++
++	case R_RISCV_GPREL_I:
++	case R_RISCV_GPREL_S:
++	  {
++	    bfd_vma gp = riscv_global_pointer_value (info);
++	    bfd_boolean x0_base = VALID_ITYPE_IMM (relocation + rel->r_addend);
++	    if (x0_base || VALID_ITYPE_IMM (relocation + rel->r_addend - gp))
++	      {
++		/* We can use x0 or gp as the base register.  */
++		bfd_vma insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
++		insn &= ~(OP_MASK_RS1 << OP_SH_RS1);
++		if (!x0_base)
++		  {
++		    rel->r_addend -= gp;
++		    insn |= X_GP << OP_SH_RS1;
++		  }
++		bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
++	      }
++	    else
++	      r = bfd_reloc_overflow;
++	    break;
++	  }
++
++	case R_RISCV_PCREL_HI20:
++	  if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc,
++					    relocation + rel->r_addend))
++	    r = bfd_reloc_overflow;
++	  break;
++
++	case R_RISCV_PCREL_LO12_I:
++	case R_RISCV_PCREL_LO12_S:
++	  if (riscv_record_pcrel_lo_reloc (&pcrel_relocs, input_section, info,
++					   howto, rel, relocation, name,
++					   contents))
++	    continue;
++	  r = bfd_reloc_overflow;
++	  break;
++
++	case R_RISCV_TLS_DTPREL32:
++	case R_RISCV_TLS_DTPREL64:
++	  relocation = dtpoff (info, relocation);
++	  break;
++
++	case R_RISCV_32:
++	case R_RISCV_64:
++	  if ((input_section->flags & SEC_ALLOC) == 0)
++	    break;
++
++	  if ((bfd_link_pic (info)
++	       && (h == NULL
++		   || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
++		   || h->root.type != bfd_link_hash_undefweak)
++	       && (! howto->pc_relative
++		   || !SYMBOL_CALLS_LOCAL (info, h)))
++	      || (!bfd_link_pic (info)
++		  && h != NULL
++		  && h->dynindx != -1
++		  && !h->non_got_ref
++		  && ((h->def_dynamic
++		       && !h->def_regular)
++		      || h->root.type == bfd_link_hash_undefweak
++		      || h->root.type == bfd_link_hash_undefined)))
++	    {
++	      Elf_Internal_Rela outrel;
++	      bfd_boolean skip_static_relocation, skip_dynamic_relocation;
++
++	      /* When generating a shared object, these relocations
++		 are copied into the output file to be resolved at run
++		 time.  */
++
++	      outrel.r_offset =
++		_bfd_elf_section_offset (output_bfd, info, input_section,
++					 rel->r_offset);
++	      skip_static_relocation = outrel.r_offset != (bfd_vma) -2;
++	      skip_dynamic_relocation = outrel.r_offset >= (bfd_vma) -2;
++	      outrel.r_offset += sec_addr (input_section);
++
++	      if (skip_dynamic_relocation)
++		memset (&outrel, 0, sizeof outrel);
++	      else if (h != NULL && h->dynindx != -1
++		       && !(bfd_link_pic (info)
++			    && SYMBOLIC_BIND (info, h)
++			    && h->def_regular))
++		{
++		  outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
++		  outrel.r_addend = rel->r_addend;
++		}
++	      else
++		{
++		  outrel.r_info = ELFNN_R_INFO (0, R_RISCV_RELATIVE);
++		  outrel.r_addend = relocation + rel->r_addend;
++		}
++
++	      riscv_elf_append_rela (output_bfd, sreloc, &outrel);
++	      if (skip_static_relocation)
++		continue;
++	    }
++	  break;
++
++	case R_RISCV_TLS_GOT_HI20:
++	  is_ie = TRUE;
++	  /* Fall through.  */
++
++	case R_RISCV_TLS_GD_HI20:
++	  if (h != NULL)
++	    {
++	      off = h->got.offset;
++	      h->got.offset |= 1;
++	    }
++	  else
++	    {
++	      off = local_got_offsets[r_symndx];
++	      local_got_offsets[r_symndx] |= 1;
++	    }
++
++	  tls_type = _bfd_riscv_elf_tls_type (input_bfd, h, r_symndx);
++	  BFD_ASSERT (tls_type & (GOT_TLS_IE | GOT_TLS_GD));
++	  /* If this symbol is referenced by both GD and IE TLS, the IE
++	     reference's GOT slot follows the GD reference's slots.  */
++	  ie_off = 0;
++	  if ((tls_type & GOT_TLS_GD) && (tls_type & GOT_TLS_IE))
++	    ie_off = 2 * GOT_ENTRY_SIZE;
++
++	  if ((off & 1) != 0)
++	    off &= ~1;
++	  else
++	    {
++	      Elf_Internal_Rela outrel;
++	      int indx = 0;
++	      bfd_boolean need_relocs = FALSE;
++
++	      if (htab->elf.srelgot == NULL)
++		abort ();
++
++	      if (h != NULL)
++		{
++		  bfd_boolean dyn, pic;
++		  dyn = htab->elf.dynamic_sections_created;
++		  pic = bfd_link_pic (info);
++
++		  if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, pic, h)
++		      && (!pic || !SYMBOL_REFERENCES_LOCAL (info, h)))
++		    indx = h->dynindx;
++		}
++
++	      /* The GOT entries have not been initialized yet.  Do it
++	         now, and emit any relocations.  */
++	      if ((bfd_link_pic (info) || indx != 0)
++		  && (h == NULL
++		      || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
++		      || h->root.type != bfd_link_hash_undefweak))
++		    need_relocs = TRUE;
++
++	      if (tls_type & GOT_TLS_GD)
++		{
++		  if (need_relocs)
++		    {
++		      outrel.r_offset = sec_addr (htab->elf.sgot) + off;
++		      outrel.r_addend = 0;
++		      outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_DTPMODNN);
++		      bfd_put_NN (output_bfd, 0,
++				  htab->elf.sgot->contents + off);
++		      riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
++		      if (indx == 0)
++			{
++			  BFD_ASSERT (! unresolved_reloc);
++			  bfd_put_NN (output_bfd,
++				      dtpoff (info, relocation),
++				      (htab->elf.sgot->contents + off +
++				       RISCV_ELF_WORD_BYTES));
++			}
++		      else
++			{
++			  bfd_put_NN (output_bfd, 0,
++				      (htab->elf.sgot->contents + off +
++				       RISCV_ELF_WORD_BYTES));
++			  outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_DTPRELNN);
++			  outrel.r_offset += RISCV_ELF_WORD_BYTES;
++			  riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
++			}
++		    }
++		  else
++		    {
++		      /* If we are not emitting relocations for a
++			 general dynamic reference, then we must be in a
++			 static link or an executable link with the
++			 symbol binding locally.  Mark it as belonging
++			 to module 1, the executable.  */
++		      bfd_put_NN (output_bfd, 1,
++				  htab->elf.sgot->contents + off);
++		      bfd_put_NN (output_bfd,
++				  dtpoff (info, relocation),
++				  (htab->elf.sgot->contents + off +
++				   RISCV_ELF_WORD_BYTES));
++		   }
++		}
++
++	      if (tls_type & GOT_TLS_IE)
++		{
++		  if (need_relocs)
++		    {
++		      bfd_put_NN (output_bfd, 0,
++				  htab->elf.sgot->contents + off + ie_off);
++		      outrel.r_offset = sec_addr (htab->elf.sgot)
++				       + off + ie_off;
++		      outrel.r_addend = 0;
++		      if (indx == 0)
++			outrel.r_addend = tpoff (info, relocation);
++		      outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_TPRELNN);
++		      riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
++		    }
++		  else
++		    {
++		      bfd_put_NN (output_bfd, tpoff (info, relocation),
++				  htab->elf.sgot->contents + off + ie_off);
++		    }
++		}
++	    }
++
++	  BFD_ASSERT (off < (bfd_vma) -2);
++	  relocation = sec_addr (htab->elf.sgot) + off + (is_ie ? ie_off : 0);
++	  if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc, relocation))
++	    r = bfd_reloc_overflow;
++	  unresolved_reloc = FALSE;
++	  break;
++
++	default:
++	  r = bfd_reloc_notsupported;
++	}
++
++      /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
++	 because such sections are not SEC_ALLOC and thus ld.so will
++	 not process them.  */
++      if (unresolved_reloc
++	  && !((input_section->flags & SEC_DEBUGGING) != 0
++	       && h->def_dynamic)
++	  && _bfd_elf_section_offset (output_bfd, info, input_section,
++				      rel->r_offset) != (bfd_vma) -1)
++	{
++	  (*_bfd_error_handler)
++	    (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
++	     input_bfd,
++	     input_section,
++	     (long) rel->r_offset,
++	     howto->name,
++	     h->root.root.string);
++	  continue;
++	}
++
++      if (r == bfd_reloc_ok)
++	r = perform_relocation (howto, rel, relocation, input_section,
++				input_bfd, contents);
++
++      switch (r)
++	{
++	case bfd_reloc_ok:
++	  continue;
++
++	case bfd_reloc_overflow:
++	  info->callbacks->reloc_overflow
++	    (info, (h ? &h->root : NULL), name, howto->name,
++	     (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
++	  break;
++
++	case bfd_reloc_undefined:
++	  info->callbacks->undefined_symbol
++	    (info, name, input_bfd, input_section, rel->r_offset,
++	     TRUE);
++	  break;
++
++	case bfd_reloc_outofrange:
++	  msg = _("internal error: out of range error");
++	  break;
++
++	case bfd_reloc_notsupported:
++	  msg = _("internal error: unsupported relocation error");
++	  break;
++
++	case bfd_reloc_dangerous:
++	  msg = _("internal error: dangerous relocation");
++	  break;
++
++	default:
++	  msg = _("internal error: unknown error");
++	  break;
++	}
++
++      if (msg)
++	info->callbacks->warning
++	  (info, msg, name, input_bfd, input_section, rel->r_offset);
++      goto out;
++    }
++
++  ret = riscv_resolve_pcrel_lo_relocs (&pcrel_relocs);
++out:
++  riscv_free_pcrel_relocs (&pcrel_relocs);
++  return ret;
++}
++
++/* Finish up dynamic symbol handling.  We set the contents of various
++   dynamic sections here.  */
++
++static bfd_boolean
++riscv_elf_finish_dynamic_symbol (bfd *output_bfd,
++				 struct bfd_link_info *info,
++				 struct elf_link_hash_entry *h,
++				 Elf_Internal_Sym *sym)
++{
++  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++  const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
++
++  if (h->plt.offset != (bfd_vma) -1)
++    {
++      /* We've decided to create a PLT entry for this symbol.  */
++      bfd_byte *loc;
++      bfd_vma i, header_address, plt_idx, got_address;
++      uint32_t plt_entry[PLT_ENTRY_INSNS];
++      Elf_Internal_Rela rela;
++
++      BFD_ASSERT (h->dynindx != -1);
++
++      /* Calculate the address of the PLT header.  */
++      header_address = sec_addr (htab->elf.splt);
++
++      /* Calculate the index of the entry.  */
++      plt_idx = (h->plt.offset - PLT_HEADER_SIZE) / PLT_ENTRY_SIZE;
++
++      /* Calculate the address of the .got.plt entry.  */
++      got_address = riscv_elf_got_plt_val (plt_idx, info);
++
++      /* Find out where the .plt entry should go.  */
++      loc = htab->elf.splt->contents + h->plt.offset;
++
++      /* Fill in the PLT entry itself.  */
++      riscv_make_plt_entry (got_address, header_address + h->plt.offset,
++			    plt_entry);
++      for (i = 0; i < PLT_ENTRY_INSNS; i++)
++	bfd_put_32 (output_bfd, plt_entry[i], loc + 4*i);
++
++      /* Fill in the initial value of the .got.plt entry.  */
++      loc = htab->elf.sgotplt->contents
++	    + (got_address - sec_addr (htab->elf.sgotplt));
++      bfd_put_NN (output_bfd, sec_addr (htab->elf.splt), loc);
++
++      /* Fill in the entry in the .rela.plt section.  */
++      rela.r_offset = got_address;
++      rela.r_addend = 0;
++      rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_JUMP_SLOT);
++
++      loc = htab->elf.srelplt->contents + plt_idx * sizeof (ElfNN_External_Rela);
++      bed->s->swap_reloca_out (output_bfd, &rela, loc);
++
++      if (!h->def_regular)
++	{
++	  /* Mark the symbol as undefined, rather than as defined in
++	     the .plt section.  Leave the value alone.  */
++	  sym->st_shndx = SHN_UNDEF;
++	  /* If the symbol is weak, we do need to clear the value.
++	     Otherwise, the PLT entry would provide a definition for
++	     the symbol even if the symbol wasn't defined anywhere,
++	     and so the symbol would never be NULL.  */
++	  if (!h->ref_regular_nonweak)
++	    sym->st_value = 0;
++	}
++    }
++
++  if (h->got.offset != (bfd_vma) -1
++      && !(riscv_elf_hash_entry(h)->tls_type & (GOT_TLS_GD | GOT_TLS_IE)))
++    {
++      asection *sgot;
++      asection *srela;
++      Elf_Internal_Rela rela;
++
++      /* This symbol has an entry in the GOT.  Set it up.  */
++
++      sgot = htab->elf.sgot;
++      srela = htab->elf.srelgot;
++      BFD_ASSERT (sgot != NULL && srela != NULL);
++
++      rela.r_offset = sec_addr (sgot) + (h->got.offset &~ (bfd_vma) 1);
++
++      /* If this is a -Bsymbolic link, and the symbol is defined
++	 locally, we just want to emit a RELATIVE reloc.  Likewise if
++	 the symbol was forced to be local because of a version file.
++	 The entry in the global offset table will already have been
++	 initialized in the relocate_section function.  */
++      if (bfd_link_pic (info)
++	  && (info->symbolic || h->dynindx == -1)
++	  && h->def_regular)
++	{
++	  asection *sec = h->root.u.def.section;
++	  rela.r_info = ELFNN_R_INFO (0, R_RISCV_RELATIVE);
++	  rela.r_addend = (h->root.u.def.value
++			   + sec->output_section->vma
++			   + sec->output_offset);
++	}
++      else
++	{
++	  BFD_ASSERT (h->dynindx != -1);
++	  rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_NN);
++	  rela.r_addend = 0;
++	}
++
++      bfd_put_NN (output_bfd, 0,
++		  sgot->contents + (h->got.offset & ~(bfd_vma) 1));
++      riscv_elf_append_rela (output_bfd, srela, &rela);
++    }
++
++  if (h->needs_copy)
++    {
++      Elf_Internal_Rela rela;
++
++      /* This symbols needs a copy reloc.  Set it up.  */
++      BFD_ASSERT (h->dynindx != -1);
++
++      rela.r_offset = sec_addr (h->root.u.def.section) + h->root.u.def.value;
++      rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_COPY);
++      rela.r_addend = 0;
++      riscv_elf_append_rela (output_bfd, htab->srelbss, &rela);
++    }
++
++  /* Mark some specially defined symbols as absolute.  */
++  if (h == htab->elf.hdynamic
++      || (h == htab->elf.hgot || h == htab->elf.hplt))
++    sym->st_shndx = SHN_ABS;
++
++  return TRUE;
++}
++
++/* Finish up the dynamic sections.  */
++
++static bfd_boolean
++riscv_finish_dyn (bfd *output_bfd, struct bfd_link_info *info,
++		  bfd *dynobj, asection *sdyn)
++{
++  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++  const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
++  size_t dynsize = bed->s->sizeof_dyn;
++  bfd_byte *dyncon, *dynconend;
++
++  dynconend = sdyn->contents + sdyn->size;
++  for (dyncon = sdyn->contents; dyncon < dynconend; dyncon += dynsize)
++    {
++      Elf_Internal_Dyn dyn;
++      asection *s;
++
++      bed->s->swap_dyn_in (dynobj, dyncon, &dyn);
++
++      switch (dyn.d_tag)
++	{
++	case DT_PLTGOT:
++	  s = htab->elf.sgotplt;
++	  dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
++	  break;
++	case DT_JMPREL:
++	  s = htab->elf.srelplt;
++	  dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
++	  break;
++	case DT_PLTRELSZ:
++	  s = htab->elf.srelplt;
++	  dyn.d_un.d_val = s->size;
++	  break;
++	default:
++	  continue;
++	}
++
++      bed->s->swap_dyn_out (output_bfd, &dyn, dyncon);
++    }
++  return TRUE;
++}
++
++static bfd_boolean
++riscv_elf_finish_dynamic_sections (bfd *output_bfd,
++				   struct bfd_link_info *info)
++{
++  bfd *dynobj;
++  asection *sdyn;
++  struct riscv_elf_link_hash_table *htab;
++
++  htab = riscv_elf_hash_table (info);
++  BFD_ASSERT (htab != NULL);
++  dynobj = htab->elf.dynobj;
++
++  sdyn = bfd_get_linker_section (dynobj, ".dynamic");
++
++  if (elf_hash_table (info)->dynamic_sections_created)
++    {
++      asection *splt;
++      bfd_boolean ret;
++
++      splt = htab->elf.splt;
++      BFD_ASSERT (splt != NULL && sdyn != NULL);
++
++      ret = riscv_finish_dyn (output_bfd, info, dynobj, sdyn);
++
++      if (ret != TRUE)
++	return ret;
++
++      /* Fill in the head and tail entries in the procedure linkage table.  */
++      if (splt->size > 0)
++	{
++	  int i;
++	  uint32_t plt_header[PLT_HEADER_INSNS];
++	  riscv_make_plt_header (sec_addr (htab->elf.sgotplt),
++				 sec_addr (splt), plt_header);
++
++	  for (i = 0; i < PLT_HEADER_INSNS; i++)
++	    bfd_put_32 (output_bfd, plt_header[i], splt->contents + 4*i);
++	}
++
++      elf_section_data (splt->output_section)->this_hdr.sh_entsize
++	= PLT_ENTRY_SIZE;
++    }
++
++  if (htab->elf.sgotplt)
++    {
++      asection *output_section = htab->elf.sgotplt->output_section;
++
++      if (bfd_is_abs_section (output_section))
++	{
++	  (*_bfd_error_handler)
++	    (_("discarded output section: `%A'"), htab->elf.sgotplt);
++	  return FALSE;
++	}
++
++      if (htab->elf.sgotplt->size > 0)
++	{
++	  /* Write the first two entries in .got.plt, needed for the dynamic
++	     linker.  */
++	  bfd_put_NN (output_bfd, (bfd_vma) -1, htab->elf.sgotplt->contents);
++	  bfd_put_NN (output_bfd, (bfd_vma) 0,
++		      htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
++	}
++
++      elf_section_data (output_section)->this_hdr.sh_entsize = GOT_ENTRY_SIZE;
++    }
++
++  if (htab->elf.sgot)
++    {
++      asection *output_section = htab->elf.sgot->output_section;
++
++      if (htab->elf.sgot->size > 0)
++	{
++	  /* Set the first entry in the global offset table to the address of
++	     the dynamic section.  */
++	  bfd_vma val = sdyn ? sec_addr (sdyn) : 0;
++	  bfd_put_NN (output_bfd, val, htab->elf.sgot->contents);
++	}
++
++      elf_section_data (output_section)->this_hdr.sh_entsize = GOT_ENTRY_SIZE;
++    }
++
++  return TRUE;
++}
++
++/* Return address for Ith PLT stub in section PLT, for relocation REL
++   or (bfd_vma) -1 if it should not be included.  */
++
++static bfd_vma
++riscv_elf_plt_sym_val (bfd_vma i, const asection *plt,
++		       const arelent *rel ATTRIBUTE_UNUSED)
++{
++  return plt->vma + PLT_HEADER_SIZE + i * PLT_ENTRY_SIZE;
++}
++
++static enum elf_reloc_type_class
++riscv_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
++			const asection *rel_sec ATTRIBUTE_UNUSED,
++			const Elf_Internal_Rela *rela)
++{
++  switch (ELFNN_R_TYPE (rela->r_info))
++    {
++    case R_RISCV_RELATIVE:
++      return reloc_class_relative;
++    case R_RISCV_JUMP_SLOT:
++      return reloc_class_plt;
++    case R_RISCV_COPY:
++      return reloc_class_copy;
++    default:
++      return reloc_class_normal;
++    }
++}
++
++/* Merge backend specific data from an object file to the output
++   object file when linking.  */
++
++static bfd_boolean
++_bfd_riscv_elf_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
++{
++  flagword new_flags = elf_elfheader (ibfd)->e_flags;
++  flagword old_flags = elf_elfheader (obfd)->e_flags;
++
++  if (!is_riscv_elf (ibfd) || !is_riscv_elf (obfd))
++    return TRUE;
++
++  if (strcmp (bfd_get_target (ibfd), bfd_get_target (obfd)) != 0)
++    {
++      (*_bfd_error_handler)
++	(_("%B: ABI is incompatible with that of the selected emulation"),
++	 ibfd);
++      return FALSE;
++    }
++
++  if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
++    return FALSE;
++
++  if (! elf_flags_init (obfd))
++    {
++      elf_flags_init (obfd) = TRUE;
++      elf_elfheader (obfd)->e_flags = new_flags;
++      return TRUE;
++    }
++
++  /* Disallow linking different float ABIs.  */
++  if ((old_flags ^ new_flags) & EF_RISCV_FLOAT_ABI)
++    {
++      (*_bfd_error_handler)
++	(_("%B: can't link hard-float modules with soft-float modules"), ibfd);
++      goto fail;
++    }
++
++  /* Allow linking RVC and non-RVC, and keep the RVC flag.  */
++  elf_elfheader (obfd)->e_flags |= new_flags & EF_RISCV_RVC;
++
++  return TRUE;
++
++fail:
++  bfd_set_error (bfd_error_bad_value);
++  return FALSE;
++}
++
++/* Delete some bytes from a section while relaxing.  */
++
++static bfd_boolean
++riscv_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, size_t count)
++{
++  unsigned int i, symcount;
++  bfd_vma toaddr = sec->size;
++  struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (abfd);
++  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
++  unsigned int sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
++  struct bfd_elf_section_data *data = elf_section_data (sec);
++  bfd_byte *contents = data->this_hdr.contents;
++
++  /* Actually delete the bytes.  */
++  sec->size -= count;
++  memmove (contents + addr, contents + addr + count, toaddr - addr - count);
++
++  /* Adjust the location of all of the relocs.  Note that we need not
++     adjust the addends, since all PC-relative references must be against
++     symbols, which we will adjust below.  */
++  for (i = 0; i < sec->reloc_count; i++)
++    if (data->relocs[i].r_offset > addr && data->relocs[i].r_offset < toaddr)
++      data->relocs[i].r_offset -= count;
++
++  /* Adjust the local symbols defined in this section.  */
++  for (i = 0; i < symtab_hdr->sh_info; i++)
++    {
++      Elf_Internal_Sym *sym = (Elf_Internal_Sym *) symtab_hdr->contents + i;
++      if (sym->st_shndx == sec_shndx)
++	{
++	  /* If the symbol is in the range of memory we just moved, we
++	     have to adjust its value.  */
++	  if (sym->st_value > addr && sym->st_value <= toaddr)
++	    sym->st_value -= count;
++
++	  /* If the symbol *spans* the bytes we just deleted (i.e. its
++	     *end* is in the moved bytes but its *start* isn't), then we
++	     must adjust its size.  */
++	  if (sym->st_value <= addr
++	      && sym->st_value + sym->st_size > addr
++	      && sym->st_value + sym->st_size <= toaddr)
++	    sym->st_size -= count;
++	}
++    }
++
++  /* Now adjust the global symbols defined in this section.  */
++  symcount = ((symtab_hdr->sh_size / sizeof (ElfNN_External_Sym))
++	      - symtab_hdr->sh_info);
++
++  for (i = 0; i < symcount; i++)
++    {
++      struct elf_link_hash_entry *sym_hash = sym_hashes[i];
++
++      if ((sym_hash->root.type == bfd_link_hash_defined
++	   || sym_hash->root.type == bfd_link_hash_defweak)
++	  && sym_hash->root.u.def.section == sec)
++	{
++	  /* As above, adjust the value if needed.  */
++	  if (sym_hash->root.u.def.value > addr
++	      && sym_hash->root.u.def.value <= toaddr)
++	    sym_hash->root.u.def.value -= count;
++
++	  /* As above, adjust the size if needed.  */
++	  if (sym_hash->root.u.def.value <= addr
++	      && sym_hash->root.u.def.value + sym_hash->size > addr
++	      && sym_hash->root.u.def.value + sym_hash->size <= toaddr)
++	    sym_hash->size -= count;
++	}
++    }
++
++  return TRUE;
++}
++
++/* Relax AUIPC + JALR into JAL.  */
++
++static bfd_boolean
++_bfd_riscv_relax_call (bfd *abfd, asection *sec, asection *sym_sec,
++		       struct bfd_link_info *link_info,
++		       Elf_Internal_Rela *rel,
++		       bfd_vma symval,
++		       unsigned int max_alignment,
++		       bfd_boolean *again)
++{
++  bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
++  bfd_signed_vma foff = symval - (sec_addr (sec) + rel->r_offset);
++  bfd_boolean near_zero = (symval + RISCV_IMM_REACH/2) < RISCV_IMM_REACH;
++  bfd_vma auipc, jalr;
++  int rd, r_type, len = 4, rvc = elf_elfheader (abfd)->e_flags & EF_RISCV_RVC;
++
++  /* If the call crosses section boundaries, an alignment directive could
++     cause the PC-relative offset to later increase.  */
++  if (VALID_UJTYPE_IMM (foff) && sym_sec->output_section != sec->output_section)
++    foff += (foff < 0 ? -max_alignment : max_alignment);
++
++  /* See if this function call can be shortened.  */
++  if (!VALID_UJTYPE_IMM (foff) && !(!bfd_link_pic (link_info) && near_zero))
++    return TRUE;
++
++  /* Shorten the function call.  */
++  BFD_ASSERT (rel->r_offset + 8 <= sec->size);
++
++  auipc = bfd_get_32 (abfd, contents + rel->r_offset);
++  jalr = bfd_get_32 (abfd, contents + rel->r_offset + 4);
++  rd = (jalr >> OP_SH_RD) & OP_MASK_RD;
++  rvc = rvc && VALID_RVC_J_IMM (foff) && ARCH_SIZE == 32;
++
++  if (rvc && (rd == 0 || rd == X_RA))
++    {
++      /* Relax to C.J[AL] rd, addr.  */
++      r_type = R_RISCV_RVC_JUMP;
++      auipc = rd == 0 ? MATCH_C_J : MATCH_C_JAL;
++      len = 2;
++    }
++  else if (VALID_UJTYPE_IMM (foff))
++    {
++      /* Relax to JAL rd, addr.  */
++      r_type = R_RISCV_JAL;
++      auipc = MATCH_JAL | (rd << OP_SH_RD);
++    }
++  else /* near_zero */
++    {
++      /* Relax to JALR rd, x0, addr.  */
++      r_type = R_RISCV_LO12_I;
++      auipc = MATCH_JALR | (rd << OP_SH_RD);
++    }
++
++  /* Replace the R_RISCV_CALL reloc.  */
++  rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), r_type);
++  /* Replace the AUIPC.  */
++  bfd_put (8 * len, abfd, auipc, contents + rel->r_offset);
++
++  /* Delete unnecessary JALR.  */
++  *again = TRUE;
++  return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + len, 8 - len);
++}
++
++/* Traverse all output sections and return the max alignment.  */
++
++static unsigned int
++_bfd_riscv_get_max_alignment (asection *sec)
++{
++  unsigned int max_alignment_power = 0;
++  asection *o;
++
++  for (o = sec->output_section->owner->sections; o != NULL; o = o->next)
++    {
++      if (o->alignment_power > max_alignment_power)
++	max_alignment_power = o->alignment_power;
++    }
++
++  return 1 << max_alignment_power;
++}
++
++/* Relax non-PIC global variable references.  */
++
++static bfd_boolean
++_bfd_riscv_relax_lui (bfd *abfd, asection *sec, asection *sym_sec,
++		      struct bfd_link_info *link_info,
++		      Elf_Internal_Rela *rel,
++		      bfd_vma symval,
++		      unsigned int max_alignment,
++		      bfd_boolean *again)
++{
++  bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
++  bfd_vma gp = riscv_global_pointer_value (link_info);
++  int use_rvc = elf_elfheader (abfd)->e_flags & EF_RISCV_RVC;
++
++  /* Mergeable symbols and code might later move out of range.  */
++  if (sym_sec->flags & (SEC_MERGE | SEC_CODE))
++    return TRUE;
++
++  BFD_ASSERT (rel->r_offset + 4 <= sec->size);
++
++  /* Is the reference in range of x0 or gp?
++     Valid gp range conservatively because of alignment issue.  */
++  if (VALID_ITYPE_IMM (symval)
++      || (symval >= gp && VALID_ITYPE_IMM (symval - gp + max_alignment))
++      || (symval < gp && VALID_ITYPE_IMM (symval - gp - max_alignment)))
++    {
++      unsigned sym = ELFNN_R_SYM (rel->r_info);
++      switch (ELFNN_R_TYPE (rel->r_info))
++	{
++	case R_RISCV_LO12_I:
++	  rel->r_info = ELFNN_R_INFO (sym, R_RISCV_GPREL_I);
++	  return TRUE;
++
++	case R_RISCV_LO12_S:
++	  rel->r_info = ELFNN_R_INFO (sym, R_RISCV_GPREL_S);
++	  return TRUE;
++
++	case R_RISCV_HI20:
++	  /* We can delete the unnecessary LUI and reloc.  */
++	  rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
++	  *again = TRUE;
++	  return riscv_relax_delete_bytes (abfd, sec, rel->r_offset, 4);
++
++	default:
++	  abort ();
++	}
++    }
++
++  /* Can we relax LUI to C.LUI?  Alignment might move the section forward;
++     account for this assuming page alignment at worst.  */
++  if (use_rvc
++      && ELFNN_R_TYPE (rel->r_info) == R_RISCV_HI20
++      && VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (symval))
++      && VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (symval + ELF_MAXPAGESIZE)))
++    {
++      /* Replace LUI with C.LUI if legal (i.e., rd != x2/sp).  */
++      bfd_vma lui = bfd_get_32 (abfd, contents + rel->r_offset);
++      if (((lui >> OP_SH_RD) & OP_MASK_RD) == X_SP)
++	return TRUE;
++
++      lui = (lui & (OP_MASK_RD << OP_SH_RD)) | MATCH_C_LUI;
++      bfd_put_32 (abfd, lui, contents + rel->r_offset);
++
++      /* Replace the R_RISCV_HI20 reloc.  */
++      rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), R_RISCV_RVC_LUI);
++
++      *again = TRUE;
++      return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + 2, 2);
++    }
++
++  return TRUE;
++}
++
++/* Relax non-PIC TLS references.  */
++
++static bfd_boolean
++_bfd_riscv_relax_tls_le (bfd *abfd, asection *sec,
++			 asection *sym_sec ATTRIBUTE_UNUSED,
++			 struct bfd_link_info *link_info,
++			 Elf_Internal_Rela *rel,
++			 bfd_vma symval,
++			 unsigned int max_alignment ATTRIBUTE_UNUSED,
++			 bfd_boolean *again)
++{
++  /* See if this symbol is in range of tp.  */
++  if (RISCV_CONST_HIGH_PART (tpoff (link_info, symval)) != 0)
++    return TRUE;
++
++  /* We can delete the unnecessary LUI and tp add.  The LO12 reloc will be
++     made directly tp-relative.  */
++  BFD_ASSERT (rel->r_offset + 4 <= sec->size);
++  rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
++
++  *again = TRUE;
++  return riscv_relax_delete_bytes (abfd, sec, rel->r_offset, 4);
++}
++
++/* Implement R_RISCV_ALIGN by deleting excess alignment NOPs.  */
++
++static bfd_boolean
++_bfd_riscv_relax_align (bfd *abfd, asection *sec,
++			asection *sym_sec ATTRIBUTE_UNUSED,
++			struct bfd_link_info *link_info ATTRIBUTE_UNUSED,
++			Elf_Internal_Rela *rel,
++			bfd_vma symval,
++			unsigned int max_alignment ATTRIBUTE_UNUSED,
++			bfd_boolean *again ATTRIBUTE_UNUSED)
++{
++  bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
++  bfd_vma alignment = 1, pos;
++  while (alignment <= rel->r_addend)
++    alignment *= 2;
++
++  symval -= rel->r_addend;
++  bfd_vma aligned_addr = ((symval - 1) & ~(alignment - 1)) + alignment;
++  bfd_vma nop_bytes = aligned_addr - symval;
++
++  /* Once we've handled an R_RISCV_ALIGN, we can't relax anything else.  */
++  sec->sec_flg0 = TRUE;
++
++  /* Make sure there are enough NOPs to actually achieve the alignment.  */
++  if (rel->r_addend < nop_bytes)
++    return FALSE;
++
++  /* Delete the reloc.  */
++  rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
++
++  /* If the number of NOPs is already correct, there's nothing to do.  */
++  if (nop_bytes == rel->r_addend)
++    return TRUE;
++
++  /* Write as many RISC-V NOPs as we need.  */
++  for (pos = 0; pos < (nop_bytes & -4); pos += 4)
++    bfd_put_32 (abfd, RISCV_NOP, contents + rel->r_offset + pos);
++
++  /* Write a final RVC NOP if need be.  */
++  if (nop_bytes % 4 != 0)
++    bfd_put_16 (abfd, RVC_NOP, contents + rel->r_offset + pos);
++
++  /* Delete the excess bytes.  */
++  return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + nop_bytes,
++				   rel->r_addend - nop_bytes);
++}
++
++/* Relax a section.  Pass 0 shortens code sequences unless disabled.
++   Pass 1, which cannot be disabled, handles code alignment directives.  */
++
++static bfd_boolean
++_bfd_riscv_relax_section (bfd *abfd, asection *sec,
++			  struct bfd_link_info *info, bfd_boolean *again)
++{
++  Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (abfd);
++  struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++  struct bfd_elf_section_data *data = elf_section_data (sec);
++  Elf_Internal_Rela *relocs;
++  bfd_boolean ret = FALSE;
++  unsigned int i;
++  unsigned int max_alignment;
++
++  *again = FALSE;
++
++  if (bfd_link_relocatable (info)
++      || sec->sec_flg0
++      || (sec->flags & SEC_RELOC) == 0
++      || sec->reloc_count == 0
++      || (info->disable_target_specific_optimizations
++	  && info->relax_pass == 0))
++    return TRUE;
++
++  /* Read this BFD's relocs if we haven't done so already.  */
++  if (data->relocs)
++    relocs = data->relocs;
++  else if (!(relocs = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL,
++						 info->keep_memory)))
++    goto fail;
++
++  max_alignment = _bfd_riscv_get_max_alignment (sec);
++
++  /* Examine and consider relaxing each reloc.  */
++  for (i = 0; i < sec->reloc_count; i++)
++    {
++      asection *sym_sec;
++      Elf_Internal_Rela *rel = relocs + i;
++      typeof (&_bfd_riscv_relax_call) relax_func = NULL;
++      int type = ELFNN_R_TYPE (rel->r_info);
++      bfd_vma symval;
++
++      if (info->relax_pass == 0)
++	{
++	  if (type == R_RISCV_CALL || type == R_RISCV_CALL_PLT)
++	    relax_func = _bfd_riscv_relax_call;
++	  else if (type == R_RISCV_HI20
++		   || type == R_RISCV_LO12_I
++		   || type == R_RISCV_LO12_S)
++	    relax_func = _bfd_riscv_relax_lui;
++	  else if (type == R_RISCV_TPREL_HI20 || type == R_RISCV_TPREL_ADD)
++	    relax_func = _bfd_riscv_relax_tls_le;
++	}
++      else if (type == R_RISCV_ALIGN)
++	relax_func = _bfd_riscv_relax_align;
++
++      if (!relax_func)
++	continue;
++
++      data->relocs = relocs;
++
++      /* Read this BFD's contents if we haven't done so already.  */
++      if (!data->this_hdr.contents
++	  && !bfd_malloc_and_get_section (abfd, sec, &data->this_hdr.contents))
++	goto fail;
++
++      /* Read this BFD's symbols if we haven't done so already.  */
++      if (symtab_hdr->sh_info != 0
++	  && !symtab_hdr->contents
++	  && !(symtab_hdr->contents =
++	       (unsigned char *) bfd_elf_get_elf_syms (abfd, symtab_hdr,
++						       symtab_hdr->sh_info,
++						       0, NULL, NULL, NULL)))
++	goto fail;
++
++      /* Get the value of the symbol referred to by the reloc.  */
++      if (ELFNN_R_SYM (rel->r_info) < symtab_hdr->sh_info)
++	{
++	  /* A local symbol.  */
++	  Elf_Internal_Sym *isym = ((Elf_Internal_Sym *) symtab_hdr->contents
++				    + ELFNN_R_SYM (rel->r_info));
++
++	  if (isym->st_shndx == SHN_UNDEF)
++	    sym_sec = sec, symval = sec_addr (sec) + rel->r_offset;
++	  else
++	    {
++	      BFD_ASSERT (isym->st_shndx < elf_numsections (abfd));
++	      sym_sec = elf_elfsections (abfd)[isym->st_shndx]->bfd_section;
++	      if (sec_addr (sym_sec) == 0)
++		continue;
++	      symval = sec_addr (sym_sec) + isym->st_value;
++	    }
++	}
++      else
++	{
++	  unsigned long indx;
++	  struct elf_link_hash_entry *h;
++
++	  indx = ELFNN_R_SYM (rel->r_info) - symtab_hdr->sh_info;
++	  h = elf_sym_hashes (abfd)[indx];
++
++	  while (h->root.type == bfd_link_hash_indirect
++		 || h->root.type == bfd_link_hash_warning)
++	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
++
++	  if (h->plt.offset != MINUS_ONE)
++	    symval = sec_addr (htab->elf.splt) + h->plt.offset;
++	  else if (h->root.u.def.section->output_section == NULL
++		   || (h->root.type != bfd_link_hash_defined
++		       && h->root.type != bfd_link_hash_defweak))
++	    continue;
++	  else
++	    symval = sec_addr (h->root.u.def.section) + h->root.u.def.value;
++
++	  sym_sec = h->root.u.def.section;
++	}
++
++      symval += rel->r_addend;
++
++      if (!relax_func (abfd, sec, sym_sec, info, rel, symval,
++		       max_alignment, again))
++	goto fail;
++    }
++
++  ret = TRUE;
++
++fail:
++  if (relocs != data->relocs)
++    free (relocs);
++
++  return ret;
++}
++
++#if ARCH_SIZE == 32
++# define PRSTATUS_SIZE			0 /* FIXME */
++# define PRSTATUS_OFFSET_PR_CURSIG	12
++# define PRSTATUS_OFFSET_PR_PID		24
++# define PRSTATUS_OFFSET_PR_REG		72
++# define ELF_GREGSET_T_SIZE		128
++# define PRPSINFO_SIZE			128
++# define PRPSINFO_OFFSET_PR_PID		16
++# define PRPSINFO_OFFSET_PR_FNAME	32
++# define PRPSINFO_OFFSET_PR_PSARGS	48
++#else
++# define PRSTATUS_SIZE			376
++# define PRSTATUS_OFFSET_PR_CURSIG	12
++# define PRSTATUS_OFFSET_PR_PID		32
++# define PRSTATUS_OFFSET_PR_REG		112
++# define ELF_GREGSET_T_SIZE		256
++# define PRPSINFO_SIZE			136
++# define PRPSINFO_OFFSET_PR_PID		24
++# define PRPSINFO_OFFSET_PR_FNAME	40
++# define PRPSINFO_OFFSET_PR_PSARGS	56
++#endif
++
++/* Support for core dump NOTE sections.  */
++
++static bfd_boolean
++riscv_elf_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
++{
++  switch (note->descsz)
++    {
++      default:
++	return FALSE;
++
++      case PRSTATUS_SIZE:  /* sizeof(struct elf_prstatus) on Linux/RISC-V.  */
++	/* pr_cursig */
++	elf_tdata (abfd)->core->signal
++	  = bfd_get_16 (abfd, note->descdata + PRSTATUS_OFFSET_PR_CURSIG);
++
++	/* pr_pid */
++	elf_tdata (abfd)->core->lwpid
++	  = bfd_get_32 (abfd, note->descdata + PRSTATUS_OFFSET_PR_PID);
++	break;
++    }
++
++  /* Make a ".reg/999" section.  */
++  return _bfd_elfcore_make_pseudosection (abfd, ".reg", ELF_GREGSET_T_SIZE,
++					  note->descpos + PRSTATUS_OFFSET_PR_REG);
++}
++
++static bfd_boolean
++riscv_elf_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
++{
++  switch (note->descsz)
++    {
++      default:
++	return FALSE;
++
++      case PRPSINFO_SIZE: /* sizeof(struct elf_prpsinfo) on Linux/RISC-V.  */
++	/* pr_pid */
++	elf_tdata (abfd)->core->pid
++	  = bfd_get_32 (abfd, note->descdata + PRPSINFO_OFFSET_PR_PID);
++
++	/* pr_fname */
++	elf_tdata (abfd)->core->program = _bfd_elfcore_strndup
++	  (abfd, note->descdata + PRPSINFO_OFFSET_PR_FNAME, 16);
++
++	/* pr_psargs */
++	elf_tdata (abfd)->core->command = _bfd_elfcore_strndup
++	  (abfd, note->descdata + PRPSINFO_OFFSET_PR_PSARGS, 80);
++	break;
++    }
++
++  /* Note that for some reason, a spurious space is tacked
++     onto the end of the args in some (at least one anyway)
++     implementations, so strip it off if it exists.  */
++
++  {
++    char *command = elf_tdata (abfd)->core->command;
++    int n = strlen (command);
++
++    if (0 < n && command[n - 1] == ' ')
++      command[n - 1] = '\0';
++  }
++
++  return TRUE;
++}
++
++
++#define TARGET_LITTLE_SYM		riscv_elfNN_vec
++#define TARGET_LITTLE_NAME		"elfNN-littleriscv"
++
++#define elf_backend_reloc_type_class	     riscv_reloc_type_class
++
++#define bfd_elfNN_bfd_reloc_name_lookup	     riscv_reloc_name_lookup
++#define bfd_elfNN_bfd_link_hash_table_create riscv_elf_link_hash_table_create
++#define bfd_elfNN_bfd_reloc_type_lookup	     riscv_reloc_type_lookup
++#define bfd_elfNN_bfd_merge_private_bfd_data \
++  _bfd_riscv_elf_merge_private_bfd_data
++
++#define elf_backend_copy_indirect_symbol     riscv_elf_copy_indirect_symbol
++#define elf_backend_create_dynamic_sections  riscv_elf_create_dynamic_sections
++#define elf_backend_check_relocs	     riscv_elf_check_relocs
++#define elf_backend_adjust_dynamic_symbol    riscv_elf_adjust_dynamic_symbol
++#define elf_backend_size_dynamic_sections    riscv_elf_size_dynamic_sections
++#define elf_backend_relocate_section	     riscv_elf_relocate_section
++#define elf_backend_finish_dynamic_symbol    riscv_elf_finish_dynamic_symbol
++#define elf_backend_finish_dynamic_sections  riscv_elf_finish_dynamic_sections
++#define elf_backend_gc_mark_hook	     riscv_elf_gc_mark_hook
++#define elf_backend_gc_sweep_hook	     riscv_elf_gc_sweep_hook
++#define elf_backend_plt_sym_val		     riscv_elf_plt_sym_val
++#define elf_backend_grok_prstatus            riscv_elf_grok_prstatus
++#define elf_backend_grok_psinfo              riscv_elf_grok_psinfo
++#define elf_info_to_howto_rel		     NULL
++#define elf_info_to_howto		     riscv_info_to_howto_rela
++#define bfd_elfNN_bfd_relax_section	     _bfd_riscv_relax_section
++
++#define elf_backend_init_index_section	     _bfd_elf_init_1_index_section
++
++#define elf_backend_can_gc_sections	1
++#define elf_backend_can_refcount	1
++#define elf_backend_want_got_plt	1
++#define elf_backend_plt_readonly	1
++#define elf_backend_plt_alignment	4
++#define elf_backend_want_plt_sym	1
++#define elf_backend_got_header_size	(ARCH_SIZE / 8)
++#define elf_backend_rela_normal		1
++#define elf_backend_default_execstack	0
++
++#include "elfNN-target.h"
+diff --git original-binutils/bfd/elfxx-riscv.c binutils-2_27/bfd/elfxx-riscv.c
+new file mode 100644
+index 0000000..e51514c
+--- /dev/null
++++ binutils-2_27/bfd/elfxx-riscv.c
+@@ -0,0 +1,814 @@
++/* RISC-V-specific support for ELF.
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on TILE-Gx and MIPS targets.
++
++   This file is part of BFD, the Binary File Descriptor library.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3 of the License, or
++   (at your option) any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "libbfd.h"
++#include "elf-bfd.h"
++#include "elf/riscv.h"
++#include "opcode/riscv.h"
++#include "libiberty.h"
++#include "elfxx-riscv.h"
++#include <stdint.h>
++
++#define MINUS_ONE ((bfd_vma)0 - 1)
++
++/* The relocation table used for SHT_RELA sections.  */
++
++static reloc_howto_type howto_table[] =
++{
++  /* No relocation.  */
++  HOWTO (R_RISCV_NONE,			/* type */
++	 0,				/* rightshift */
++	 3,				/* size */
++	 0,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_NONE",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 0,				/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 32 bit relocation.  */
++  HOWTO (R_RISCV_32,			/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_32",			/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 0xffffffff,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 64 bit relocation.  */
++  HOWTO (R_RISCV_64,			/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 64,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_64",			/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* Relocation against a local symbol in a shared object.  */
++  HOWTO (R_RISCV_RELATIVE,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_RELATIVE",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 0xffffffff,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  HOWTO (R_RISCV_COPY,			/* type */
++	 0,				/* rightshift */
++	 0,				/* this one is variable size */
++	 0,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_bitfield,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_COPY",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0x0,         			/* src_mask */
++	 0x0,		        	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  HOWTO (R_RISCV_JUMP_SLOT,		/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 64,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_bitfield,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_JUMP_SLOT",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0x0,         			/* src_mask */
++	 0x0,		        	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* Dynamic TLS relocations.  */
++  HOWTO (R_RISCV_TLS_DTPMOD32,		/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc, 	/* special_function */
++	 "R_RISCV_TLS_DTPMOD32",	/* name */
++	 FALSE,				/* partial_inplace */
++	 MINUS_ONE,			/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  HOWTO (R_RISCV_TLS_DTPMOD64,		/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 64,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc, 	/* special_function */
++	 "R_RISCV_TLS_DTPMOD64",	/* name */
++	 FALSE,				/* partial_inplace */
++	 MINUS_ONE,			/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  HOWTO (R_RISCV_TLS_DTPREL32,		/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc, 	/* special_function */
++	 "R_RISCV_TLS_DTPREL32",	/* name */
++	 TRUE,				/* partial_inplace */
++	 MINUS_ONE,			/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  HOWTO (R_RISCV_TLS_DTPREL64,		/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 64,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc, 	/* special_function */
++	 "R_RISCV_TLS_DTPREL64",	/* name */
++	 TRUE,				/* partial_inplace */
++	 MINUS_ONE,			/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  HOWTO (R_RISCV_TLS_TPREL32,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc, 	/* special_function */
++	 "R_RISCV_TLS_TPREL32",		/* name */
++	 FALSE,				/* partial_inplace */
++	 MINUS_ONE,			/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  HOWTO (R_RISCV_TLS_TPREL64,		/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 64,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc, 	/* special_function */
++	 "R_RISCV_TLS_TPREL64",		/* name */
++	 FALSE,				/* partial_inplace */
++	 MINUS_ONE,			/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* Reserved for future relocs that the dynamic linker must understand.  */
++  EMPTY_HOWTO (12),
++  EMPTY_HOWTO (13),
++  EMPTY_HOWTO (14),
++  EMPTY_HOWTO (15),
++
++  /* 12-bit PC-relative branch offset.  */
++  HOWTO (R_RISCV_BRANCH,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_signed,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_BRANCH",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_SBTYPE_IMM (-1U),	/* dst_mask */
++	 TRUE),				/* pcrel_offset */
++
++  /* 20-bit PC-relative jump offset.  */
++  HOWTO (R_RISCV_JAL,			/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++					/* This needs complex overflow
++					   detection, because the upper 36
++					   bits must match the PC + 4.  */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_JAL",			/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UJTYPE_IMM (-1U),	/* dst_mask */
++	 TRUE),				/* pcrel_offset */
++
++  /* 32-bit PC-relative function call (AUIPC/JALR).  */
++  HOWTO (R_RISCV_CALL,			/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 64,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_CALL",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UTYPE_IMM (-1U) | ((bfd_vma) ENCODE_ITYPE_IMM (-1U) << 32),
++					/* dst_mask */
++	 TRUE),				/* pcrel_offset */
++
++  /* 32-bit PC-relative function call (AUIPC/JALR).  */
++  HOWTO (R_RISCV_CALL_PLT,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 64,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_CALL_PLT",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UTYPE_IMM (-1U) | ((bfd_vma) ENCODE_ITYPE_IMM (-1U) << 32),
++					/* dst_mask */
++	 TRUE),				/* pcrel_offset */
++
++  /* High 20 bits of 32-bit PC-relative GOT access.  */
++  HOWTO (R_RISCV_GOT_HI20,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_GOT_HI20",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 20 bits of 32-bit PC-relative TLS IE GOT access.  */
++  HOWTO (R_RISCV_TLS_GOT_HI20,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_TLS_GOT_HI20",	/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 20 bits of 32-bit PC-relative TLS GD GOT reference.  */
++  HOWTO (R_RISCV_TLS_GD_HI20,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_TLS_GD_HI20",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 20 bits of 32-bit PC-relative reference.  */
++  HOWTO (R_RISCV_PCREL_HI20,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_PCREL_HI20",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
++	 TRUE),				/* pcrel_offset */
++
++  /* Low 12 bits of a 32-bit PC-relative load or add.  */
++  HOWTO (R_RISCV_PCREL_LO12_I,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_PCREL_LO12_I",	/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_ITYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* Low 12 bits of a 32-bit PC-relative store.  */
++  HOWTO (R_RISCV_PCREL_LO12_S,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_PCREL_LO12_S",	/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_STYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 20 bits of 32-bit absolute address.  */
++  HOWTO (R_RISCV_HI20,			/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_HI20",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 12 bits of 32-bit load or add.  */
++  HOWTO (R_RISCV_LO12_I,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_LO12_I",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_ITYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 12 bits of 32-bit store.  */
++  HOWTO (R_RISCV_LO12_S,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_LO12_S",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_STYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 20 bits of TLS LE thread pointer offset.  */
++  HOWTO (R_RISCV_TPREL_HI20,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_signed,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_TPREL_HI20",		/* name */
++	 TRUE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_UTYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* Low 12 bits of TLS LE thread pointer offset for loads and adds.  */
++  HOWTO (R_RISCV_TPREL_LO12_I,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_signed,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_TPREL_LO12_I",	/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_ITYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* Low 12 bits of TLS LE thread pointer offset for stores.  */
++  HOWTO (R_RISCV_TPREL_LO12_S,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_signed,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_TPREL_LO12_S",	/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_STYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* TLS LE thread pointer usage.  */
++  HOWTO (R_RISCV_TPREL_ADD,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_TPREL_ADD",		/* name */
++	 TRUE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 0,				/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 8-bit in-place addition, for local label subtraction.  */
++  HOWTO (R_RISCV_ADD8,			/* type */
++	 0,				/* rightshift */
++	 0,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_ADD8",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 16-bit in-place addition, for local label subtraction.  */
++  HOWTO (R_RISCV_ADD16,			/* type */
++	 0,				/* rightshift */
++	 1,				/* size */
++	 16,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_ADD16",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 32-bit in-place addition, for local label subtraction.  */
++  HOWTO (R_RISCV_ADD32,			/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_ADD32",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 64-bit in-place addition, for local label subtraction.  */
++  HOWTO (R_RISCV_ADD64,			/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 64,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_ADD64",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 8-bit in-place addition, for local label subtraction.  */
++  HOWTO (R_RISCV_SUB8,			/* type */
++	 0,				/* rightshift */
++	 0,				/* size */
++	 8,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_SUB8",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 16-bit in-place addition, for local label subtraction.  */
++  HOWTO (R_RISCV_SUB16,			/* type */
++	 0,				/* rightshift */
++	 1,				/* size */
++	 16,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_SUB16",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 32-bit in-place addition, for local label subtraction.  */
++  HOWTO (R_RISCV_SUB32,			/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_SUB32",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* 64-bit in-place addition, for local label subtraction.  */
++  HOWTO (R_RISCV_SUB64,			/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 64,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_SUB64",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 MINUS_ONE,			/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* GNU extension to record C++ vtable hierarchy */
++  HOWTO (R_RISCV_GNU_VTINHERIT,		/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 0,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 NULL,				/* special_function */
++	 "R_RISCV_GNU_VTINHERIT",	/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 0,				/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* GNU extension to record C++ vtable member usage */
++  HOWTO (R_RISCV_GNU_VTENTRY,		/* type */
++	 0,				/* rightshift */
++	 4,				/* size */
++	 0,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 _bfd_elf_rel_vtable_reloc_fn,	/* special_function */
++	 "R_RISCV_GNU_VTENTRY",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 0,				/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* Indicates an alignment statement.  The addend field encodes how many
++     bytes of NOPs follow the statement.  The desired alignment is the
++     addend rounded up to the next power of two.  */
++  HOWTO (R_RISCV_ALIGN,			/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 0,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_ALIGN",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 0,				/* dst_mask */
++	 TRUE),				/* pcrel_offset */
++
++  /* 8-bit PC-relative branch offset.  */
++  HOWTO (R_RISCV_RVC_BRANCH,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_signed,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_RVC_BRANCH",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_RVC_B_IMM (-1U),	/* dst_mask */
++	 TRUE),				/* pcrel_offset */
++
++  /* 11-bit PC-relative jump offset.  */
++  HOWTO (R_RISCV_RVC_JUMP,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 TRUE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++					/* This needs complex overflow
++					   detection, because the upper 36
++					   bits must match the PC + 4.  */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_RVC_JUMP",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_RVC_J_IMM (-1U),	/* dst_mask */
++	 TRUE),				/* pcrel_offset */
++
++  /* High 6 bits of 18-bit absolute address.  */
++  HOWTO (R_RISCV_RVC_LUI,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_RVC_LUI",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_RVC_IMM (-1U),		/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 12 bits of 32-bit load or add.  */
++  HOWTO (R_RISCV_GPREL_I,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_GPREL_I",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_ITYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++
++  /* High 12 bits of 32-bit store.  */
++  HOWTO (R_RISCV_GPREL_S,		/* type */
++	 0,				/* rightshift */
++	 2,				/* size */
++	 32,				/* bitsize */
++	 FALSE,				/* pc_relative */
++	 0,				/* bitpos */
++	 complain_overflow_dont,	/* complain_on_overflow */
++	 bfd_elf_generic_reloc,		/* special_function */
++	 "R_RISCV_GPREL_S",		/* name */
++	 FALSE,				/* partial_inplace */
++	 0,				/* src_mask */
++	 ENCODE_STYPE_IMM (-1U),	/* dst_mask */
++	 FALSE),			/* pcrel_offset */
++};
++
++/* A mapping from BFD reloc types to RISC-V ELF reloc types.  */
++
++struct elf_reloc_map {
++  bfd_reloc_code_real_type bfd_val;
++  enum elf_riscv_reloc_type elf_val;
++};
++
++static const struct elf_reloc_map riscv_reloc_map[] =
++{
++  { BFD_RELOC_NONE, R_RISCV_NONE },
++  { BFD_RELOC_32, R_RISCV_32 },
++  { BFD_RELOC_64, R_RISCV_64 },
++  { BFD_RELOC_RISCV_ADD8, R_RISCV_ADD8 },
++  { BFD_RELOC_RISCV_ADD16, R_RISCV_ADD16 },
++  { BFD_RELOC_RISCV_ADD32, R_RISCV_ADD32 },
++  { BFD_RELOC_RISCV_ADD64, R_RISCV_ADD64 },
++  { BFD_RELOC_RISCV_SUB8, R_RISCV_SUB8 },
++  { BFD_RELOC_RISCV_SUB16, R_RISCV_SUB16 },
++  { BFD_RELOC_RISCV_SUB32, R_RISCV_SUB32 },
++  { BFD_RELOC_RISCV_SUB64, R_RISCV_SUB64 },
++  { BFD_RELOC_CTOR, R_RISCV_64 },
++  { BFD_RELOC_12_PCREL, R_RISCV_BRANCH },
++  { BFD_RELOC_RISCV_HI20, R_RISCV_HI20 },
++  { BFD_RELOC_RISCV_LO12_I, R_RISCV_LO12_I },
++  { BFD_RELOC_RISCV_LO12_S, R_RISCV_LO12_S },
++  { BFD_RELOC_RISCV_PCREL_LO12_I, R_RISCV_PCREL_LO12_I },
++  { BFD_RELOC_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_S },
++  { BFD_RELOC_RISCV_CALL, R_RISCV_CALL },
++  { BFD_RELOC_RISCV_CALL_PLT, R_RISCV_CALL_PLT },
++  { BFD_RELOC_RISCV_PCREL_HI20, R_RISCV_PCREL_HI20 },
++  { BFD_RELOC_RISCV_JMP, R_RISCV_JAL },
++  { BFD_RELOC_RISCV_GOT_HI20, R_RISCV_GOT_HI20 },
++  { BFD_RELOC_RISCV_TLS_DTPMOD32, R_RISCV_TLS_DTPMOD32 },
++  { BFD_RELOC_RISCV_TLS_DTPREL32, R_RISCV_TLS_DTPREL32 },
++  { BFD_RELOC_RISCV_TLS_DTPMOD64, R_RISCV_TLS_DTPMOD64 },
++  { BFD_RELOC_RISCV_TLS_DTPREL64, R_RISCV_TLS_DTPREL64 },
++  { BFD_RELOC_RISCV_TLS_TPREL32, R_RISCV_TLS_TPREL32 },
++  { BFD_RELOC_RISCV_TLS_TPREL64, R_RISCV_TLS_TPREL64 },
++  { BFD_RELOC_RISCV_TPREL_HI20, R_RISCV_TPREL_HI20 },
++  { BFD_RELOC_RISCV_TPREL_ADD, R_RISCV_TPREL_ADD },
++  { BFD_RELOC_RISCV_TPREL_LO12_S, R_RISCV_TPREL_LO12_S },
++  { BFD_RELOC_RISCV_TPREL_LO12_I, R_RISCV_TPREL_LO12_I },
++  { BFD_RELOC_RISCV_TLS_GOT_HI20, R_RISCV_TLS_GOT_HI20 },
++  { BFD_RELOC_RISCV_TLS_GD_HI20, R_RISCV_TLS_GD_HI20 },
++  { BFD_RELOC_RISCV_ALIGN, R_RISCV_ALIGN },
++  { BFD_RELOC_RISCV_RVC_BRANCH, R_RISCV_RVC_BRANCH },
++  { BFD_RELOC_RISCV_RVC_JUMP, R_RISCV_RVC_JUMP },
++  { BFD_RELOC_RISCV_RVC_LUI, R_RISCV_RVC_LUI },
++  { BFD_RELOC_RISCV_GPREL_I, R_RISCV_GPREL_I },
++  { BFD_RELOC_RISCV_GPREL_S, R_RISCV_GPREL_S },
++};
++
++/* Given a BFD reloc type, return a howto structure.  */
++
++reloc_howto_type *
++riscv_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++			 bfd_reloc_code_real_type code)
++{
++  unsigned int i;
++
++  for (i = 0; i < ARRAY_SIZE (riscv_reloc_map); i++)
++    if (riscv_reloc_map[i].bfd_val == code)
++      return &howto_table[(int) riscv_reloc_map[i].elf_val];
++
++  bfd_set_error (bfd_error_bad_value);
++  return NULL;
++}
++
++reloc_howto_type *
++riscv_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, const char *r_name)
++{
++  unsigned int i;
++
++  for (i = 0; i < ARRAY_SIZE (howto_table); i++)
++    if (howto_table[i].name && strcasecmp (howto_table[i].name, r_name) == 0)
++      return &howto_table[i];
++
++  return NULL;
++}
++
++reloc_howto_type *
++riscv_elf_rtype_to_howto (unsigned int r_type)
++{
++  if (r_type >= ARRAY_SIZE (howto_table))
++    {
++      (*_bfd_error_handler) (_("unrecognized relocation (0x%x)"), r_type);
++      bfd_set_error (bfd_error_bad_value);
++      return NULL;
++    }
++  return &howto_table[r_type];
++}
+diff --git original-binutils/bfd/elfxx-riscv.h binutils-2_27/bfd/elfxx-riscv.h
+new file mode 100644
+index 0000000..1e5d91f
+--- /dev/null
++++ binutils-2_27/bfd/elfxx-riscv.h
+@@ -0,0 +1,33 @@
++/* RISC-V ELF specific backend routines.
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target.
++
++   This file is part of BFD, the Binary File Descriptor library.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3 of the License, or
++   (at your option) any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++#include "elf/common.h"
++#include "elf/internal.h"
++
++extern reloc_howto_type *
++riscv_reloc_name_lookup (bfd *, const char *);
++
++extern reloc_howto_type *
++riscv_reloc_type_lookup (bfd *, bfd_reloc_code_real_type);
++
++extern reloc_howto_type *
++riscv_elf_rtype_to_howto (unsigned int r_type);
+diff --git original-binutils/bfd/targets.c binutils-2_27/bfd/targets.c
+index a9edd4c..9036917 100644
+--- original-binutils/bfd/targets.c
++++ binutils-2_27/bfd/targets.c
+@@ -799,6 +799,8 @@ extern const bfd_target powerpc_pe_le_vec;
+ extern const bfd_target powerpc_pei_vec;
+ extern const bfd_target powerpc_pei_le_vec;
+ extern const bfd_target powerpc_xcoff_vec;
++extern const bfd_target riscv_elf32_vec;
++extern const bfd_target riscv_elf64_vec;
+ extern const bfd_target rl78_elf32_vec;
+ extern const bfd_target rs6000_xcoff64_vec;
+ extern const bfd_target rs6000_xcoff64_aix_vec;
+diff --git original-binutils/binutils/readelf.c binutils-2_27/binutils/readelf.c
+index 274ddd1..c31d87a 100644
+--- original-binutils/binutils/readelf.c
++++ binutils-2_27/binutils/readelf.c
+@@ -124,6 +124,7 @@
+ #include "elf/metag.h"
+ #include "elf/microblaze.h"
+ #include "elf/mips.h"
++#include "elf/riscv.h"
+ #include "elf/mmix.h"
+ #include "elf/mn10200.h"
+ #include "elf/mn10300.h"
+@@ -775,6 +776,7 @@ guess_is_rela (unsigned int e_machine)
+     case EM_OR1K:
+     case EM_PPC64:
+     case EM_PPC:
++    case EM_RISCV:
+     case EM_RL78:
+     case EM_RX:
+     case EM_S390:
+@@ -1313,6 +1315,10 @@ dump_relocations (FILE * file,
+ 	  rtype = elf_mips_reloc_type (type);
+ 	  break;
+ 
++	case EM_RISCV:
++	  rtype = elf_riscv_reloc_type (type);
++	  break;
++
+ 	case EM_ALPHA:
+ 	  rtype = elf_alpha_reloc_type (type);
+ 	  break;
+@@ -2325,6 +2331,7 @@ get_machine_name (unsigned e_machine)
+     case EM_CR16:
+     case EM_MICROBLAZE:
+     case EM_MICROBLAZE_OLD:	return "Xilinx MicroBlaze";
++    case EM_RISCV:		return "RISC-V";
+     case EM_RL78:		return "Renesas RL78";
+     case EM_RX:			return "Renesas RX";
+     case EM_METAG:		return "Imagination Technologies Meta processor architecture";
+@@ -3283,6 +3290,29 @@ get_machine_flags (unsigned e_flags, unsigned e_machine)
+ 	  decode_NDS32_machine_flags (e_flags, buf, sizeof buf);
+ 	  break;
+ 
++	case EM_RISCV:
++	  if (e_flags & EF_RISCV_RVC)
++	    strcat (buf, ", RVC");
++    switch (e_flags & EF_RISCV_FLOAT_ABI)
++      {
++      case EF_RISCV_FLOAT_ABI_SOFT:
++        strcat (buf, ", soft-float ABI");
++        break;
++
++      case EF_RISCV_FLOAT_ABI_SINGLE:
++        strcat (buf, ", single-float ABI");
++        break;
++
++      case EF_RISCV_FLOAT_ABI_DOUBLE:
++        strcat (buf, ", double-float ABI");
++        break;
++
++      case EF_RISCV_FLOAT_ABI_QUAD:
++        strcat (buf, ", quad-float ABI");
++        break;
++      }
++	  break;
++
+ 	case EM_SH:
+ 	  switch ((e_flags & EF_SH_MACH_MASK))
+ 	    {
+@@ -11610,6 +11640,8 @@ is_32bit_abs_reloc (unsigned int reloc_type)
+       return reloc_type == 1; /* R_PPC64_ADDR32.  */
+     case EM_PPC:
+       return reloc_type == 1; /* R_PPC_ADDR32.  */
++    case EM_RISCV:
++      return reloc_type == 1; /* R_RISCV_32.  */
+     case EM_RL78:
+       return reloc_type == 1; /* R_RL78_DIR32.  */
+     case EM_RX:
+@@ -11763,6 +11795,8 @@ is_64bit_abs_reloc (unsigned int reloc_type)
+       return reloc_type == 80; /* R_PARISC_DIR64.  */
+     case EM_PPC64:
+       return reloc_type == 38; /* R_PPC64_ADDR64.  */
++    case EM_RISCV:
++      return reloc_type == 2; /* R_RISCV_64.  */
+     case EM_SPARC32PLUS:
+     case EM_SPARCV9:
+     case EM_SPARC:
+@@ -11933,6 +11967,7 @@ is_none_reloc (unsigned int reloc_type)
+     case EM_PARISC:  /* R_PARISC_NONE.  */
+     case EM_PPC64:   /* R_PPC64_NONE.  */
+     case EM_PPC:     /* R_PPC_NONE.  */
++    case EM_RISCV:   /* R_RISCV_NONE.  */
+     case EM_S390:    /* R_390_NONE.  */
+     case EM_S390_OLD:
+     case EM_SH:      /* R_SH_NONE.  */
+diff --git original-binutils/gas/Makefile.am binutils-2_27/gas/Makefile.am
+index 596e469..b9f1584 100644
+--- original-binutils/gas/Makefile.am
++++ binutils-2_27/gas/Makefile.am
+@@ -177,6 +177,7 @@ TARGET_CPU_CFILES = \
+ 	config/tc-pdp11.c \
+ 	config/tc-pj.c \
+ 	config/tc-ppc.c \
++	config/tc-riscv.c \
+ 	config/tc-rl78.c \
+ 	config/tc-rx.c \
+ 	config/tc-s390.c \
+@@ -250,6 +251,7 @@ TARGET_CPU_HFILES = \
+ 	config/tc-pdp11.h \
+ 	config/tc-pj.h \
+ 	config/tc-ppc.h \
++	config/tc-riscv.h \
+ 	config/tc-rl78.h \
+ 	config/tc-rx.h \
+ 	config/tc-s390.h \
+diff --git original-binutils/gas/Makefile.in binutils-2_27/gas/Makefile.in
+index 59746f3..20c16e1 100644
+--- original-binutils/gas/Makefile.in
++++ binutils-2_27/gas/Makefile.in
+@@ -471,6 +471,7 @@ TARGET_CPU_CFILES = \
+ 	config/tc-pdp11.c \
+ 	config/tc-pj.c \
+ 	config/tc-ppc.c \
++	config/tc-riscv.c \
+ 	config/tc-rl78.c \
+ 	config/tc-rx.c \
+ 	config/tc-s390.c \
+@@ -544,6 +545,7 @@ TARGET_CPU_HFILES = \
+ 	config/tc-pdp11.h \
+ 	config/tc-pj.h \
+ 	config/tc-ppc.h \
++	config/tc-riscv.h \
+ 	config/tc-rl78.h \
+ 	config/tc-rx.h \
+ 	config/tc-s390.h \
+@@ -899,6 +901,7 @@ distclean-compile:
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-pdp11.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-pj.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-ppc.Po at am__quote@
++ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-riscv.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-rl78.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-rx.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tc-s390.Po at am__quote@
+@@ -1619,6 +1622,20 @@ tc-ppc.obj: config/tc-ppc.c
+ @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ @am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-ppc.obj `if test -f 'config/tc-ppc.c'; then $(CYGPATH_W) 'config/tc-ppc.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-ppc.c'; fi`
+ 
++tc-riscv.o: config/tc-riscv.c
++ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-riscv.o -MD -MP -MF $(DEPDIR)/tc-riscv.Tpo -c -o tc-riscv.o `test -f 'config/tc-riscv.c' || echo '$(srcdir)/'`config/tc-riscv.c
++ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/tc-riscv.Tpo $(DEPDIR)/tc-riscv.Po
++ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='config/tc-riscv.c' object='tc-riscv.o' libtool=no @AMDEPBACKSLASH@
++ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-riscv.o `test -f 'config/tc-riscv.c' || echo '$(srcdir)/'`config/tc-riscv.c
++
++tc-riscv.obj: config/tc-riscv.c
++ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-riscv.obj -MD -MP -MF $(DEPDIR)/tc-riscv.Tpo -c -o tc-riscv.obj `if test -f 'config/tc-riscv.c'; then $(CYGPATH_W) 'config/tc-riscv.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-riscv.c'; fi`
++ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/tc-riscv.Tpo $(DEPDIR)/tc-riscv.Po
++ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='config/tc-riscv.c' object='tc-riscv.obj' libtool=no @AMDEPBACKSLASH@
++ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-riscv.obj `if test -f 'config/tc-riscv.c'; then $(CYGPATH_W) 'config/tc-riscv.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-riscv.c'; fi`
++
+ tc-rl78.o: config/tc-rl78.c
+ @am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-rl78.o -MD -MP -MF $(DEPDIR)/tc-rl78.Tpo -c -o tc-rl78.o `test -f 'config/tc-rl78.c' || echo '$(srcdir)/'`config/tc-rl78.c
+ @am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/tc-rl78.Tpo $(DEPDIR)/tc-rl78.Po
+diff --git original-binutils/gas/config/tc-riscv.c binutils-2_27/gas/config/tc-riscv.c
+new file mode 100644
+index 0000000..6a0cd05
+--- /dev/null
++++ binutils-2_27/gas/config/tc-riscv.c
+@@ -0,0 +1,2444 @@
++/* tc-riscv.c -- RISC-V assembler
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target.
++
++   This file is part of GAS.
++
++   GAS is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3, or (at your option)
++   any later version.
++
++   GAS is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++#include "as.h"
++#include "config.h"
++#include "subsegs.h"
++#include "safe-ctype.h"
++
++#include "itbl-ops.h"
++#include "dwarf2dbg.h"
++#include "dw2gencfi.h"
++
++#include "elf/riscv.h"
++#include "opcode/riscv.h"
++
++#include <stdint.h>
++
++/* Information about an instruction, including its format, operands
++   and fixups.  */
++struct riscv_cl_insn
++{
++  /* The opcode's entry in riscv_opcodes.  */
++  const struct riscv_opcode *insn_mo;
++
++  /* The encoded instruction bits.  */
++  insn_t insn_opcode;
++
++  /* The frag that contains the instruction.  */
++  struct frag *frag;
++
++  /* The offset into FRAG of the first instruction byte.  */
++  long where;
++
++  /* The relocs associated with the instruction, if any.  */
++  fixS *fixp;
++};
++
++/* The default architecture.  */
++#ifndef DEFAULT_ARCH
++#define DEFAULT_ARCH "riscv64"
++#endif
++static const char default_arch[] = DEFAULT_ARCH;
++
++unsigned xlen = 0; /* width of an x-register */
++#define LOAD_ADDRESS_INSN (xlen == 64 ? "ld" : "lw")
++#define ADD32_INSN (xlen == 64 ? "addiw" : "addi")
++
++static unsigned elf_flags = 0;
++
++/* This is the set of options which the .option pseudo-op may modify.  */
++
++struct riscv_set_options
++{
++  int pic; /* Generate position-independent code.  */
++  int rvc; /* Generate RVC code.  */
++};
++
++static struct riscv_set_options riscv_opts =
++{
++  0,	/* pic */
++  0,	/* rvc */
++};
++
++static void
++riscv_set_rvc (bfd_boolean rvc_value)
++{
++  if (rvc_value)
++    elf_flags |= EF_RISCV_RVC;
++
++  riscv_opts.rvc = rvc_value;
++}
++
++struct riscv_subset
++{
++  const char *name;
++  int version_major;
++  int version_minor;
++
++  struct riscv_subset *next;
++};
++
++static struct riscv_subset *riscv_subsets;
++
++static bfd_boolean
++riscv_subset_supports (const char *feature)
++{
++  struct riscv_subset *s;
++  char *p;
++  unsigned xlen_required = strtoul (feature, &p, 10);
++
++  if (xlen_required && xlen != xlen_required)
++    return FALSE;
++
++  for (s = riscv_subsets; s != NULL; s = s->next)
++    if (strcasecmp (s->name, p) == 0)
++      /* FIXME: once we support version numbers:
++	 return major == s->version_major && minor <= s->version_minor; */
++      return TRUE;
++
++  return FALSE;
++}
++
++static void
++riscv_add_subset (const char *subset)
++{
++  struct riscv_subset *s = xmalloc (sizeof *s);
++  s->name = xstrdup (subset);
++  s->version_major = 2;
++  s->version_minor = 0;
++  s->next = riscv_subsets;
++  riscv_subsets = s;
++}
++
++/* Set which ISA and extensions are available.  Formally, ISA strings must
++   begin with RV32 or RV64, but we allow the prefix to be omitted.
++
++   FIXME: Version numbers are not supported yet.  */
++static void
++riscv_set_arch (const char *arg)
++{
++  char *uppercase = xstrdup (arg);
++  char *p = uppercase;
++  const char *all_subsets = "IMAFDC";
++  const char *extension = NULL;
++  int rvc = 0;
++  int i;
++
++  for (i = 0; uppercase[i]; i++)
++    uppercase[i] = TOUPPER (uppercase[i]);
++
++  if (strncmp (p, "RV32", 4) == 0)
++    {
++      xlen = 32;
++      p += 4;
++    }
++  else if (strncmp (p, "RV64", 4) == 0)
++    {
++      xlen = 64;
++      p += 4;
++    }
++  else if (strncmp (p, "RV", 2) == 0)
++    p += 2;
++
++  switch (*p)
++    {
++      case 'I':
++	break;
++
++      case 'G':
++	p++;
++	/* Fall through.  */
++
++      case '\0':
++	for (i = 0; all_subsets[i] != '\0'; i++)
++	  {
++	    const char subset[] = {all_subsets[i], '\0'};
++	    riscv_add_subset (subset);
++	  }
++	break;
++
++      default:
++	as_fatal ("`I' must be the first ISA subset name specified (got %c)",
++		  *p);
++    }
++
++  while (*p)
++    {
++      if (*p == 'X')
++	{
++	  char *subset = xstrdup (p), *q = subset;
++
++	  while (*++q != '\0' && *q != '_')
++	    ;
++	  *q = '\0';
++
++	  if (extension)
++	    as_fatal ("only one eXtension is supported (found %s and %s)",
++		      extension, subset);
++	  extension = subset;
++	  riscv_add_subset (subset);
++	  p += strlen (subset);
++	  free (subset);
++	}
++      else if (*p == '_')
++	p++;
++      else if ((all_subsets = strchr (all_subsets, *p)) != NULL)
++	{
++	  const char subset[] = {*p, 0};
++	  riscv_add_subset (subset);
++	  if (*p == 'C')
++	    rvc = 1;
++	  all_subsets++;
++	  p++;
++	}
++      else
++	as_fatal ("unsupported ISA subset %c", *p);
++    }
++
++  if (rvc)
++    /* Override -m[no-]rvc setting if C was explicitly listed.  */
++    riscv_set_rvc (TRUE);
++  else
++    /* Add RVC anyway.  -m[no-]rvc toggles its availability.  */
++    riscv_add_subset ("C");
++
++  free (uppercase);
++}
++
++/* handle of the OPCODE hash table */
++static struct hash_control *op_hash = NULL;
++
++/* This array holds the chars that always start a comment.  If the
++    pre-processor is disabled, these aren't very useful */
++const char comment_chars[] = "#";
++
++/* This array holds the chars that only start a comment at the beginning of
++   a line.  If the line seems to have the form '# 123 filename'
++   .line and .file directives will appear in the pre-processed output */
++/* Note that input_file.c hand checks for '#' at the beginning of the
++   first line of the input file.  This is because the compiler outputs
++   #NO_APP at the beginning of its output.  */
++/* Also note that C style comments are always supported.  */
++const char line_comment_chars[] = "#";
++
++/* This array holds machine specific line separator characters.  */
++const char line_separator_chars[] = ";";
++
++/* Chars that can be used to separate mant from exp in floating point nums */
++const char EXP_CHARS[] = "eE";
++
++/* Chars that mean this number is a floating point constant */
++/* As in 0f12.456 */
++/* or    0d1.2345e12 */
++const char FLT_CHARS[] = "rRsSfFdDxXpP";
++
++/* Macros for encoding relaxation state for RVC branches and far jumps.  */
++#define RELAX_BRANCH_ENCODE(uncond, rvc, length)	\
++  ((relax_substateT) 					\
++   (0xc0000000						\
++    | ((uncond) ? 1 : 0)				\
++    | ((rvc) ? 2 : 0)					\
++    | ((length) << 2)))
++#define RELAX_BRANCH_P(i) (((i) & 0xf0000000) == 0xc0000000)
++#define RELAX_BRANCH_LENGTH(i) (((i) >> 2) & 0xF)
++#define RELAX_BRANCH_RVC(i) (((i) & 2) != 0)
++#define RELAX_BRANCH_UNCOND(i) (((i) & 1) != 0)
++
++/* Is the given value a sign-extended 32-bit value?  */
++#define IS_SEXT_32BIT_NUM(x)						\
++  (((x) &~ (offsetT) 0x7fffffff) == 0					\
++   || (((x) &~ (offsetT) 0x7fffffff) == ~ (offsetT) 0x7fffffff))
++
++/* Is the given value a zero-extended 32-bit value?  Or a negated one?  */
++#define IS_ZEXT_32BIT_NUM(x)						\
++  (((x) &~ (offsetT) 0xffffffff) == 0					\
++   || (((x) &~ (offsetT) 0xffffffff) == ~ (offsetT) 0xffffffff))
++
++/* Change INSN's opcode so that the operand given by FIELD has value VALUE.
++   INSN is a riscv_cl_insn structure and VALUE is evaluated exactly once.  */
++#define INSERT_OPERAND(FIELD, INSN, VALUE) \
++  INSERT_BITS ((INSN).insn_opcode, VALUE, OP_MASK_##FIELD, OP_SH_##FIELD)
++
++/* Determine if an instruction matches an opcode.  */
++#define OPCODE_MATCHES(OPCODE, OP) \
++  (((OPCODE) & MASK_##OP) == MATCH_##OP)
++
++static char *expr_end;
++
++/* The default target format to use.  */
++
++const char *
++riscv_target_format (void)
++{
++  return xlen == 64 ? "elf64-littleriscv" : "elf32-littleriscv";
++}
++
++/* Return the length of instruction INSN.  */
++
++static inline unsigned int
++insn_length (const struct riscv_cl_insn *insn)
++{
++  return riscv_insn_length (insn->insn_opcode);
++}
++
++/* Initialise INSN from opcode entry MO.  Leave its position unspecified.  */
++
++static void
++create_insn (struct riscv_cl_insn *insn, const struct riscv_opcode *mo)
++{
++  insn->insn_mo = mo;
++  insn->insn_opcode = mo->match;
++  insn->frag = NULL;
++  insn->where = 0;
++  insn->fixp = NULL;
++}
++
++/* Install INSN at the location specified by its "frag" and "where" fields.  */
++
++static void
++install_insn (const struct riscv_cl_insn *insn)
++{
++  char *f = insn->frag->fr_literal + insn->where;
++  md_number_to_chars (f, insn->insn_opcode, insn_length (insn));
++}
++
++/* Move INSN to offset WHERE in FRAG.  Adjust the fixups accordingly
++   and install the opcode in the new location.  */
++
++static void
++move_insn (struct riscv_cl_insn *insn, fragS *frag, long where)
++{
++  insn->frag = frag;
++  insn->where = where;
++  if (insn->fixp != NULL)
++    {
++      insn->fixp->fx_frag = frag;
++      insn->fixp->fx_where = where;
++    }
++  install_insn (insn);
++}
++
++/* Add INSN to the end of the output.  */
++
++static void
++add_fixed_insn (struct riscv_cl_insn *insn)
++{
++  char *f = frag_more (insn_length (insn));
++  move_insn (insn, frag_now, f - frag_now->fr_literal);
++}
++
++static void
++add_relaxed_insn (struct riscv_cl_insn *insn, int max_chars, int var,
++      relax_substateT subtype, symbolS *symbol, offsetT offset)
++{
++  frag_grow (max_chars);
++  move_insn (insn, frag_now, frag_more (0) - frag_now->fr_literal);
++  frag_var (rs_machine_dependent, max_chars, var,
++      subtype, symbol, offset, NULL);
++}
++
++/* Compute the length of a branch sequence, and adjust the stored length
++   accordingly.  If FRAGP is NULL, the worst-case length is returned.  */
++
++static int
++relaxed_branch_length (fragS *fragp, asection *sec, int update)
++{
++  int jump, rvc, length = 8;
++
++  if (!fragp)
++    return length;
++
++  jump = RELAX_BRANCH_UNCOND (fragp->fr_subtype);
++  rvc = RELAX_BRANCH_RVC (fragp->fr_subtype);
++  length = RELAX_BRANCH_LENGTH (fragp->fr_subtype);
++
++  /* Assume jumps are in range; the linker will catch any that aren't.  */
++  length = jump ? 4 : 8;
++
++  if (fragp->fr_symbol != NULL
++      && S_IS_DEFINED (fragp->fr_symbol)
++      && sec == S_GET_SEGMENT (fragp->fr_symbol))
++    {
++      offsetT val = S_GET_VALUE (fragp->fr_symbol) + fragp->fr_offset;
++      bfd_vma rvc_range = jump ? RVC_JUMP_REACH : RVC_BRANCH_REACH;
++      val -= fragp->fr_address + fragp->fr_fix;
++
++      if (rvc && (bfd_vma)(val + rvc_range/2) < rvc_range)
++	length = 2;
++      else if ((bfd_vma)(val + RISCV_BRANCH_REACH/2) < RISCV_BRANCH_REACH)
++	length = 4;
++      else if (!jump && rvc)
++	length = 6;
++    }
++
++  if (update)
++    fragp->fr_subtype = RELAX_BRANCH_ENCODE (jump, rvc, length);
++
++  return length;
++}
++
++struct regname {
++  const char *name;
++  unsigned int num;
++};
++
++enum reg_class {
++  RCLASS_GPR,
++  RCLASS_FPR,
++  RCLASS_CSR,
++  RCLASS_MAX
++};
++
++static struct hash_control *reg_names_hash = NULL;
++
++#define ENCODE_REG_HASH(cls, n) (void *)(uintptr_t)((n) * RCLASS_MAX + (cls) + 1)
++#define DECODE_REG_CLASS(hash) (((uintptr_t)(hash) - 1) % RCLASS_MAX)
++#define DECODE_REG_NUM(hash) (((uintptr_t)(hash) - 1) / RCLASS_MAX)
++
++static void
++hash_reg_name (enum reg_class class, const char *name, unsigned n)
++{
++  void *hash = ENCODE_REG_HASH (class, n);
++  const char *retval = hash_insert (reg_names_hash, name, hash);
++
++  if (retval != NULL)
++    as_fatal (_("internal error: can't hash `%s': %s"), name, retval);
++}
++
++static void
++hash_reg_names (enum reg_class class, const char * const names[], unsigned n)
++{
++  unsigned i;
++
++  for (i = 0; i < n; i++)
++    hash_reg_name (class, names[i], i);
++}
++
++static unsigned int
++reg_lookup_internal (const char *s, enum reg_class class)
++{
++  struct regname *r = (struct regname *) hash_find (reg_names_hash, s);
++  if (r == NULL || DECODE_REG_CLASS (r) != class)
++    return -1;
++  return DECODE_REG_NUM (r);
++}
++
++static int
++reg_lookup (char **s, enum reg_class class, unsigned int *regnop)
++{
++  char *e;
++  char save_c;
++  int reg = -1;
++
++  /* Find end of name.  */
++  e = *s;
++  if (is_name_beginner (*e))
++    ++e;
++  while (is_part_of_name (*e))
++    ++e;
++
++  /* Terminate name.  */
++  save_c = *e;
++  *e = '\0';
++
++  /* Look for the register.  Advance to next token if one was recognized.  */
++  if ((reg = reg_lookup_internal (*s, class)) >= 0)
++    *s = e;
++
++  *e = save_c;
++  if (regnop)
++    *regnop = reg;
++  return reg >= 0;
++}
++
++static int
++arg_lookup (char **s, const char *const *array, size_t size, unsigned *regnop)
++{
++  const char *p = strchr (*s, ',');
++  size_t i, len = p ? (size_t)(p - *s) : strlen (*s);
++
++  for (i = 0; i < size; i++)
++    if (array[i] != NULL && strncmp (array[i], *s, len) == 0)
++      {
++	*regnop = i;
++	*s += len;
++	return 1;
++      }
++
++  return 0;
++}
++
++/* For consistency checking, verify that all bits are specified either
++   by the match/mask part of the instruction definition, or by the
++   operand list.  */
++static int
++validate_riscv_insn (const struct riscv_opcode *opc)
++{
++  const char *p = opc->args;
++  char c;
++  insn_t used_bits = opc->mask;
++  int insn_width = 8 * riscv_insn_length (opc->match);
++  insn_t required_bits = ~0ULL >> (64 - insn_width);
++
++  if ((used_bits & opc->match) != (opc->match & required_bits))
++    {
++      as_bad (_("internal: bad RISC-V opcode (mask error): %s %s"),
++	      opc->name, opc->args);
++      return 0;
++    }
++
++#define USE_BITS(mask,shift)	(used_bits |= ((insn_t)(mask) << (shift)))
++  while (*p)
++    switch (c = *p++)
++      {
++      /* Xcustom */
++      case '^':
++	switch (c = *p++)
++	  {
++	  case 'd': USE_BITS (OP_MASK_RD, OP_SH_RD); break;
++	  case 's': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
++	  case 't': USE_BITS (OP_MASK_RS2, OP_SH_RS2); break;
++	  case 'j': USE_BITS (OP_MASK_CUSTOM_IMM, OP_SH_CUSTOM_IMM); break;
++	  }
++	break;
++      case 'C': /* RVC */
++	switch (c = *p++)
++	  {
++	  case 'a': used_bits |= ENCODE_RVC_J_IMM(-1U); break;
++	  case 'c': break; /* RS1, constrained to equal sp */
++	  case 'i': used_bits |= ENCODE_RVC_SIMM3(-1U); break;
++	  case 'j': used_bits |= ENCODE_RVC_IMM(-1U); break;
++	  case 'k': used_bits |= ENCODE_RVC_LW_IMM(-1U); break;
++	  case 'l': used_bits |= ENCODE_RVC_LD_IMM(-1U); break;
++	  case 'm': used_bits |= ENCODE_RVC_LWSP_IMM(-1U); break;
++	  case 'n': used_bits |= ENCODE_RVC_LDSP_IMM(-1U); break;
++	  case 'p': used_bits |= ENCODE_RVC_B_IMM(-1U); break;
++	  case 's': USE_BITS (OP_MASK_CRS1S, OP_SH_CRS1S); break;
++	  case 't': USE_BITS (OP_MASK_CRS2S, OP_SH_CRS2S); break;
++	  case 'u': used_bits |= ENCODE_RVC_IMM(-1U); break;
++	  case 'v': used_bits |= ENCODE_RVC_IMM(-1U); break;
++	  case 'w': break; /* RS1S, constrained to equal RD */
++	  case 'x': break; /* RS2S, constrained to equal RD */
++	  case 'K': used_bits |= ENCODE_RVC_ADDI4SPN_IMM(-1U); break;
++	  case 'L': used_bits |= ENCODE_RVC_ADDI16SP_IMM(-1U); break;
++	  case 'M': used_bits |= ENCODE_RVC_SWSP_IMM(-1U); break;
++	  case 'N': used_bits |= ENCODE_RVC_SDSP_IMM(-1U); break;
++	  case 'U': break; /* RS1, constrained to equal RD */
++	  case 'V': USE_BITS (OP_MASK_CRS2, OP_SH_CRS2); break;
++	  case '<': used_bits |= ENCODE_RVC_IMM(-1U); break;
++	  case '>': used_bits |= ENCODE_RVC_IMM(-1U); break;
++	  case 'T': USE_BITS (OP_MASK_CRS2, OP_SH_CRS2); break;
++	  case 'D': USE_BITS (OP_MASK_CRS2S, OP_SH_CRS2S); break;
++	  default:
++	    as_bad (_("internal: bad RISC-V opcode (unknown operand type `C%c'): %s %s"),
++		    c, opc->name, opc->args);
++	    return 0;
++	  }
++	break;
++      case ',': break;
++      case '(': break;
++      case ')': break;
++      case '<': USE_BITS (OP_MASK_SHAMTW,	OP_SH_SHAMTW);	break;
++      case '>':	USE_BITS (OP_MASK_SHAMT,	OP_SH_SHAMT);	break;
++      case 'A': break;
++      case 'D':	USE_BITS (OP_MASK_RD,		OP_SH_RD);	break;
++      case 'Z':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	break;
++      case 'E':	USE_BITS (OP_MASK_CSR,		OP_SH_CSR);	break;
++      case 'I': break;
++      case 'R':	USE_BITS (OP_MASK_RS3,		OP_SH_RS3);	break;
++      case 'S':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	break;
++      case 'U':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	/* fallthru */
++      case 'T':	USE_BITS (OP_MASK_RS2,		OP_SH_RS2);	break;
++      case 'd':	USE_BITS (OP_MASK_RD,		OP_SH_RD);	break;
++      case 'm':	USE_BITS (OP_MASK_RM,		OP_SH_RM);	break;
++      case 's':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	break;
++      case 't':	USE_BITS (OP_MASK_RS2,		OP_SH_RS2);	break;
++      case 'P':	USE_BITS (OP_MASK_PRED,		OP_SH_PRED); break;
++      case 'Q':	USE_BITS (OP_MASK_SUCC,		OP_SH_SUCC); break;
++      case 'o':
++      case 'j': used_bits |= ENCODE_ITYPE_IMM(-1U); break;
++      case 'a':	used_bits |= ENCODE_UJTYPE_IMM(-1U); break;
++      case 'p':	used_bits |= ENCODE_SBTYPE_IMM(-1U); break;
++      case 'q':	used_bits |= ENCODE_STYPE_IMM(-1U); break;
++      case 'u':	used_bits |= ENCODE_UTYPE_IMM(-1U); break;
++      case '[': break;
++      case ']': break;
++      case '0': break;
++      default:
++	as_bad (_("internal: bad RISC-V opcode (unknown operand type `%c'): %s %s"),
++		c, opc->name, opc->args);
++	return 0;
++      }
++#undef USE_BITS
++  if (used_bits != required_bits)
++    {
++      as_bad (_("internal: bad RISC-V opcode (bits 0x%lx undefined): %s %s"),
++	      ~(long)(used_bits & required_bits), opc->name, opc->args);
++      return 0;
++    }
++  return 1;
++}
++
++struct percent_op_match
++{
++  const char *str;
++  bfd_reloc_code_real_type reloc;
++};
++
++/* This function is called once, at assembler startup time.  It should set up
++   all the tables, etc. that the MD part of the assembler will need.  */
++
++void
++md_begin (void)
++{
++  const char *retval = NULL;
++  int i = 0;
++
++  if (! bfd_set_arch_mach (stdoutput, bfd_arch_riscv, 0))
++    as_warn (_("Could not set architecture and machine"));
++
++  op_hash = hash_new ();
++
++  for (i = 0; i < NUMOPCODES;)
++    {
++      const char *name = riscv_opcodes[i].name;
++
++      retval = hash_insert (op_hash, name, (void *) &riscv_opcodes[i]);
++
++      if (retval != NULL)
++	{
++	  fprintf (stderr, _("internal error: can't hash `%s': %s\n"),
++		   riscv_opcodes[i].name, retval);
++	  /* Probably a memory allocation problem?  Give up now.  */
++	  as_fatal (_("Broken assembler.  No assembly attempted."));
++	}
++      do
++	{
++	  if (riscv_opcodes[i].pinfo != INSN_MACRO)
++	    {
++	      if (!validate_riscv_insn (&riscv_opcodes[i]))
++		as_fatal (_("Broken assembler.  No assembly attempted."));
++	    }
++	  ++i;
++	}
++      while ((i < NUMOPCODES) && !strcmp (riscv_opcodes[i].name, name));
++    }
++
++  reg_names_hash = hash_new ();
++  hash_reg_names (RCLASS_GPR, riscv_gpr_names_numeric, NGPR);
++  hash_reg_names (RCLASS_GPR, riscv_gpr_names_abi, NGPR);
++  hash_reg_names (RCLASS_FPR, riscv_fpr_names_numeric, NFPR);
++  hash_reg_names (RCLASS_FPR, riscv_fpr_names_abi, NFPR);
++
++#define DECLARE_CSR(name, num) hash_reg_name (RCLASS_CSR, #name, num);
++#include "opcode/riscv-opc.h"
++#undef DECLARE_CSR
++
++  /* Set the default alignment for the text section.  */
++  record_alignment (text_section, riscv_opts.rvc ? 1 : 2);
++}
++
++/* Output an instruction.  IP is the instruction information.
++   ADDRESS_EXPR is an operand of the instruction to be used with
++   RELOC_TYPE.  */
++
++static void
++append_insn (struct riscv_cl_insn *ip, expressionS *address_expr,
++	     bfd_reloc_code_real_type reloc_type)
++{
++#ifdef OBJ_ELF
++  dwarf2_emit_insn (0);
++#endif
++
++  if (reloc_type != BFD_RELOC_UNUSED)
++    {
++      reloc_howto_type *howto;
++
++      gas_assert(address_expr);
++      if (reloc_type == BFD_RELOC_12_PCREL
++	  || reloc_type == BFD_RELOC_RISCV_JMP)
++	{
++	  int j = reloc_type == BFD_RELOC_RISCV_JMP;
++	  int best_case = riscv_insn_length (ip->insn_opcode);
++	  int worst_case = relaxed_branch_length (NULL, NULL, 0);
++	  add_relaxed_insn (ip, worst_case, best_case,
++			    RELAX_BRANCH_ENCODE (j, best_case == 2, worst_case),
++			    address_expr->X_add_symbol,
++			    address_expr->X_add_number);
++	  return;
++	}
++      else if (address_expr->X_op == O_constant)
++	{
++	  switch (reloc_type)
++	    {
++	    case BFD_RELOC_32:
++	      ip->insn_opcode |= address_expr->X_add_number;
++	      goto append;
++
++	    case BFD_RELOC_RISCV_HI20:
++	      {
++		insn_t imm = RISCV_CONST_HIGH_PART (address_expr->X_add_number);
++		ip->insn_opcode |= ENCODE_UTYPE_IMM (imm);
++		goto append;
++	      }
++
++	    case BFD_RELOC_RISCV_LO12_S:
++	      ip->insn_opcode |= ENCODE_STYPE_IMM (address_expr->X_add_number);
++	      goto append;
++
++	    case BFD_RELOC_RISCV_LO12_I:
++	      ip->insn_opcode |= ENCODE_ITYPE_IMM (address_expr->X_add_number);
++	      goto append;
++
++	    default:
++	      break;
++	    }
++	}
++
++	howto = bfd_reloc_type_lookup (stdoutput, reloc_type);
++	if (howto == NULL)
++	  as_bad (_("Unsupported RISC-V relocation number %d"), reloc_type);
++
++	ip->fixp = fix_new_exp (ip->frag, ip->where,
++				bfd_get_reloc_size (howto),
++				address_expr, FALSE, reloc_type);
++    }
++
++append:
++  add_fixed_insn (ip);
++  install_insn (ip);
++}
++
++/* Build an instruction created by a macro expansion.  This is passed
++   a pointer to the count of instructions created so far, an
++   expression, the name of the instruction to build, an operand format
++   string, and corresponding arguments.  */
++
++static void
++macro_build (expressionS *ep, const char *name, const char *fmt, ...)
++{
++  const struct riscv_opcode *mo;
++  struct riscv_cl_insn insn;
++  bfd_reloc_code_real_type r;
++  va_list args;
++
++  va_start (args, fmt);
++
++  r = BFD_RELOC_UNUSED;
++  mo = (struct riscv_opcode *) hash_find (op_hash, name);
++  gas_assert (mo);
++
++  /* Find a non-RVC variant of the instruction.  */
++  while (riscv_insn_length (mo->match) < 4)
++    mo++;
++  gas_assert (strcmp (name, mo->name) == 0);
++
++  create_insn (&insn, mo);
++  for (;;)
++    {
++      switch (*fmt++)
++	{
++	case 'd':
++	  INSERT_OPERAND (RD, insn, va_arg (args, int));
++	  continue;
++
++	case 's':
++	  INSERT_OPERAND (RS1, insn, va_arg (args, int));
++	  continue;
++
++	case 't':
++	  INSERT_OPERAND (RS2, insn, va_arg (args, int));
++	  continue;
++
++	case '>':
++	  INSERT_OPERAND (SHAMT, insn, va_arg (args, int));
++	  continue;
++
++	case 'j':
++	case 'u':
++	case 'q':
++	  gas_assert (ep != NULL);
++	  r = va_arg (args, int);
++	  continue;
++
++	case '\0':
++	  break;
++	case ',':
++	  continue;
++	default:
++	  as_fatal (_("internal error: invalid macro"));
++	}
++      break;
++    }
++  va_end (args);
++  gas_assert (r == BFD_RELOC_UNUSED ? ep == NULL : ep != NULL);
++
++  append_insn (&insn, ep, r);
++}
++
++/* Sign-extend 32-bit mode constants that have bit 31 set and all higher bits
++   unset.  */
++static void
++normalize_constant_expr (expressionS *ex)
++{
++  if (xlen > 32)
++    return;
++  if ((ex->X_op == O_constant || ex->X_op == O_symbol)
++      && IS_ZEXT_32BIT_NUM (ex->X_add_number))
++    ex->X_add_number = (((ex->X_add_number & 0xffffffff) ^ 0x80000000)
++			- 0x80000000);
++}
++
++/* Warn if an expression is not a constant.  */
++
++static void
++check_absolute_expr (struct riscv_cl_insn *ip, expressionS *ex)
++{
++  if (ex->X_op == O_big)
++    as_bad (_("unsupported large constant"));
++  else if (ex->X_op != O_constant)
++    as_bad (_("Instruction %s requires absolute expression"),
++	    ip->insn_mo->name);
++  normalize_constant_expr (ex);
++}
++
++static symbolS *
++make_internal_label (void)
++{
++  return (symbolS *) local_symbol_make (FAKE_LABEL_NAME, now_seg,
++					(valueT) frag_now_fix(), frag_now);
++}
++
++/* Load an entry from the GOT.  */
++static void
++pcrel_access (int destreg, int tempreg, expressionS *ep,
++	      const char *lo_insn, const char *lo_pattern,
++	      bfd_reloc_code_real_type hi_reloc,
++	      bfd_reloc_code_real_type lo_reloc)
++{
++  expressionS ep2;
++  ep2.X_op = O_symbol;
++  ep2.X_add_symbol = make_internal_label ();
++  ep2.X_add_number = 0;
++
++  macro_build (ep, "auipc", "d,u", tempreg, hi_reloc);
++  macro_build (&ep2, lo_insn, lo_pattern, destreg, tempreg, lo_reloc);
++}
++
++static void
++pcrel_load (int destreg, int tempreg, expressionS *ep, const char *lo_insn,
++	    bfd_reloc_code_real_type hi_reloc,
++	    bfd_reloc_code_real_type lo_reloc)
++{
++  pcrel_access (destreg, tempreg, ep, lo_insn, "d,s,j", hi_reloc, lo_reloc);
++}
++
++static void
++pcrel_store (int srcreg, int tempreg, expressionS *ep, const char *lo_insn,
++	     bfd_reloc_code_real_type hi_reloc,
++	     bfd_reloc_code_real_type lo_reloc)
++{
++  pcrel_access (srcreg, tempreg, ep, lo_insn, "t,s,q", hi_reloc, lo_reloc);
++}
++
++/* PC-relative function call using AUIPC/JALR, relaxed to JAL.  */
++static void
++riscv_call (int destreg, int tempreg, expressionS *ep,
++	    bfd_reloc_code_real_type reloc)
++{
++  macro_build (ep, "auipc", "d,u", tempreg, reloc);
++  macro_build (NULL, "jalr", "d,s", destreg, tempreg);
++}
++
++/* Load an integer constant into a register.  */
++
++static void
++load_const (int reg, expressionS *ep)
++{
++  int shift = RISCV_IMM_BITS;
++  expressionS upper = *ep, lower = *ep;
++  lower.X_add_number = (int32_t) ep->X_add_number << (32-shift) >> (32-shift);
++  upper.X_add_number -= lower.X_add_number;
++
++  if (ep->X_op != O_constant)
++    {
++      as_bad (_("unsupported large constant"));
++      return;
++    }
++
++  if (xlen > 32 && !IS_SEXT_32BIT_NUM(ep->X_add_number))
++    {
++      /* Reduce to a signed 32-bit constant using SLLI and ADDI, which
++	 is not optimal but also not so bad.  */
++      while (((upper.X_add_number >> shift) & 1) == 0)
++	shift++;
++
++      upper.X_add_number = (int64_t) upper.X_add_number >> shift;
++      load_const(reg, &upper);
++
++      macro_build (NULL, "slli", "d,s,>", reg, reg, shift);
++      if (lower.X_add_number != 0)
++	macro_build (&lower, "addi", "d,s,j", reg, reg, BFD_RELOC_RISCV_LO12_I);
++    }
++  else
++    {
++      int hi_reg = 0;
++
++      if (upper.X_add_number != 0)
++	{
++	  macro_build (ep, "lui", "d,u", reg, BFD_RELOC_RISCV_HI20);
++	  hi_reg = reg;
++	}
++
++      if (lower.X_add_number != 0 || hi_reg == 0)
++	macro_build (ep, ADD32_INSN, "d,s,j", reg, hi_reg,
++		     BFD_RELOC_RISCV_LO12_I);
++    }
++}
++
++/* Expand RISC-V assembly macros into one or more instructions.  */
++static void
++macro (struct riscv_cl_insn *ip, expressionS *imm_expr,
++       bfd_reloc_code_real_type *imm_reloc)
++{
++  int rd = (ip->insn_opcode >> OP_SH_RD) & OP_MASK_RD;
++  int rs1 = (ip->insn_opcode >> OP_SH_RS1) & OP_MASK_RS1;
++  int rs2 = (ip->insn_opcode >> OP_SH_RS2) & OP_MASK_RS2;
++  int mask = ip->insn_mo->mask;
++
++  switch (mask)
++    {
++    case M_LI:
++      load_const (rd, imm_expr);
++      break;
++
++    case M_LA:
++    case M_LLA:
++      /* Load the address of a symbol into a register.  */
++      if (!IS_SEXT_32BIT_NUM (imm_expr->X_add_number))
++	as_bad(_("offset too large"));
++
++      if (imm_expr->X_op == O_constant)
++	load_const (rd, imm_expr);
++      else if (riscv_opts.pic && mask == M_LA) /* Global PIC symbol */
++	pcrel_load (rd, rd, imm_expr, LOAD_ADDRESS_INSN,
++		    BFD_RELOC_RISCV_GOT_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      else /* Local PIC symbol, or any non-PIC symbol */
++	pcrel_load (rd, rd, imm_expr, "addi",
++		    BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LA_TLS_GD:
++      pcrel_load (rd, rd, imm_expr, "addi",
++		  BFD_RELOC_RISCV_TLS_GD_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LA_TLS_IE:
++      pcrel_load (rd, rd, imm_expr, LOAD_ADDRESS_INSN,
++		  BFD_RELOC_RISCV_TLS_GOT_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LB:
++      pcrel_load (rd, rd, imm_expr, "lb",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LBU:
++      pcrel_load (rd, rd, imm_expr, "lbu",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LH:
++      pcrel_load (rd, rd, imm_expr, "lh",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LHU:
++      pcrel_load (rd, rd, imm_expr, "lhu",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LW:
++      pcrel_load (rd, rd, imm_expr, "lw",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LWU:
++      pcrel_load (rd, rd, imm_expr, "lwu",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_LD:
++      pcrel_load (rd, rd, imm_expr, "ld",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_FLW:
++      pcrel_load (rd, rs1, imm_expr, "flw",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_FLD:
++      pcrel_load (rd, rs1, imm_expr, "fld",
++		  BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++      break;
++
++    case M_SB:
++      pcrel_store (rs2, rs1, imm_expr, "sb",
++		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++      break;
++
++    case M_SH:
++      pcrel_store (rs2, rs1, imm_expr, "sh",
++		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++      break;
++
++    case M_SW:
++      pcrel_store (rs2, rs1, imm_expr, "sw",
++		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++      break;
++
++    case M_SD:
++      pcrel_store (rs2, rs1, imm_expr, "sd",
++		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++      break;
++
++    case M_FSW:
++      pcrel_store (rs2, rs1, imm_expr, "fsw",
++		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++      break;
++
++    case M_FSD:
++      pcrel_store (rs2, rs1, imm_expr, "fsd",
++		   BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++      break;
++
++    case M_CALL:
++      riscv_call (rd, rs1, imm_expr, *imm_reloc);
++      break;
++
++    default:
++      as_bad (_("Macro %s not implemented"), ip->insn_mo->name);
++      break;
++    }
++}
++
++static const struct percent_op_match percent_op_utype[] =
++{
++  {"%tprel_hi", BFD_RELOC_RISCV_TPREL_HI20},
++  {"%pcrel_hi", BFD_RELOC_RISCV_PCREL_HI20},
++  {"%tls_ie_pcrel_hi", BFD_RELOC_RISCV_TLS_GOT_HI20},
++  {"%tls_gd_pcrel_hi", BFD_RELOC_RISCV_TLS_GD_HI20},
++  {"%hi", BFD_RELOC_RISCV_HI20},
++  {0, 0}
++};
++
++static const struct percent_op_match percent_op_itype[] =
++{
++  {"%lo", BFD_RELOC_RISCV_LO12_I},
++  {"%tprel_lo", BFD_RELOC_RISCV_TPREL_LO12_I},
++  {"%pcrel_lo", BFD_RELOC_RISCV_PCREL_LO12_I},
++  {0, 0}
++};
++
++static const struct percent_op_match percent_op_stype[] =
++{
++  {"%lo", BFD_RELOC_RISCV_LO12_S},
++  {"%tprel_lo", BFD_RELOC_RISCV_TPREL_LO12_S},
++  {"%pcrel_lo", BFD_RELOC_RISCV_PCREL_LO12_S},
++  {0, 0}
++};
++
++static const struct percent_op_match percent_op_rtype[] =
++{
++  {"%tprel_add", BFD_RELOC_RISCV_TPREL_ADD},
++  {0, 0}
++};
++
++/* Return true if *STR points to a relocation operator.  When returning true,
++   move *STR over the operator and store its relocation code in *RELOC.
++   Leave both *STR and *RELOC alone when returning false.  */
++
++static bfd_boolean
++parse_relocation (char **str, bfd_reloc_code_real_type *reloc,
++		  const struct percent_op_match *percent_op)
++{
++  for ( ; percent_op->str; percent_op++)
++    if (strncasecmp (*str, percent_op->str, strlen (percent_op->str)) == 0)
++      {
++	int len = strlen (percent_op->str);
++
++	if (!ISSPACE ((*str)[len]) && (*str)[len] != '(')
++	  continue;
++
++	*str += strlen (percent_op->str);
++	*reloc = percent_op->reloc;
++
++	/* Check whether the output BFD supports this relocation.
++	   If not, issue an error and fall back on something safe.  */
++	if (!bfd_reloc_type_lookup (stdoutput, percent_op->reloc))
++	  {
++	    as_bad ("relocation %s isn't supported by the current ABI",
++		    percent_op->str);
++	    *reloc = BFD_RELOC_UNUSED;
++	  }
++	return TRUE;
++      }
++  return FALSE;
++}
++
++static void
++my_getExpression (expressionS *ep, char *str)
++{
++  char *save_in;
++
++  save_in = input_line_pointer;
++  input_line_pointer = str;
++  expression (ep);
++  expr_end = input_line_pointer;
++  input_line_pointer = save_in;
++}
++
++/* Parse string STR as a 16-bit relocatable operand.  Store the
++   expression in *EP and the relocation, if any, in RELOC.
++   Return the number of relocation operators used (0 or 1).
++
++   On exit, EXPR_END points to the first character after the expression.  */
++
++static size_t
++my_getSmallExpression (expressionS *ep, bfd_reloc_code_real_type *reloc,
++		       char *str, const struct percent_op_match *percent_op)
++{
++  size_t reloc_index;
++  unsigned crux_depth, str_depth, regno;
++  char *crux;
++
++  /* First, check for integer registers.  */
++  if (reg_lookup (&str, RCLASS_GPR, &regno))
++    {
++      ep->X_op = O_register;
++      ep->X_add_number = regno;
++      return 0;
++    }
++
++  /* Search for the start of the main expression.
++     End the loop with CRUX pointing to the start
++     of the main expression and with CRUX_DEPTH containing the number
++     of open brackets at that point.  */
++  reloc_index = -1;
++  str_depth = 0;
++  do
++    {
++      reloc_index++;
++      crux = str;
++      crux_depth = str_depth;
++
++      /* Skip over whitespace and brackets, keeping count of the number
++	 of brackets.  */
++      while (*str == ' ' || *str == '\t' || *str == '(')
++	if (*str++ == '(')
++	  str_depth++;
++    }
++  while (*str == '%'
++	 && reloc_index < 1
++	 && parse_relocation (&str, reloc, percent_op));
++
++  my_getExpression (ep, crux);
++  str = expr_end;
++
++  /* Match every open bracket.  */
++  while (crux_depth > 0 && (*str == ')' || *str == ' ' || *str == '\t'))
++    if (*str++ == ')')
++      crux_depth--;
++
++  if (crux_depth > 0)
++    as_bad ("unclosed '('");
++
++  expr_end = str;
++
++  return reloc_index;
++}
++
++/* This routine assembles an instruction into its binary format.  As a
++   side effect, it sets the global variable imm_reloc to the type of
++   relocation to do if one of the operands is an address expression.  */
++
++static const char *
++riscv_ip (char *str, struct riscv_cl_insn *ip, expressionS *imm_expr,
++	  bfd_reloc_code_real_type *imm_reloc)
++{
++  char *s;
++  const char *args;
++  char c = 0;
++  struct riscv_opcode *insn, *end = &riscv_opcodes[NUMOPCODES];
++  char *argsStart;
++  unsigned int regno;
++  char save_c = 0;
++  int argnum;
++  const struct percent_op_match *p;
++  const char *error = "unrecognized opcode";
++
++  /* Parse the name of the instruction.  Terminate the string if whitespace
++     is found so that hash_find only sees the name part of the string.  */
++  for (s = str; *s != '\0'; ++s)
++    if (ISSPACE (*s))
++      {
++	save_c = *s;
++	*s++ = '\0';
++	break;
++      }
++
++  insn = (struct riscv_opcode *) hash_find (op_hash, str);
++
++  argsStart = s;
++  for ( ; insn && insn < end && strcmp (insn->name, str) == 0; insn++)
++    {
++      if (!riscv_subset_supports (insn->subset))
++	continue;
++
++      create_insn (ip, insn);
++      argnum = 1;
++
++      imm_expr->X_op = O_absent;
++      *imm_reloc = BFD_RELOC_UNUSED;
++      p = percent_op_itype;
++
++      for (args = insn->args;; ++args)
++	{
++	  s += strspn (s, " \t");
++	  switch (*args)
++	    {
++	    case '\0':		/* end of args */
++	      if (insn->pinfo != INSN_MACRO)
++		{
++		  if (!insn->match_func (insn, ip->insn_opcode))
++		    break;
++		  if (riscv_insn_length (insn->match) == 2 && !riscv_opts.rvc)
++		    break;
++		}
++	      if (*s != '\0')
++		break;
++	      /* Successful assembly.  */
++	      error = NULL;
++	      goto out;
++	    /* Xcustom */
++	    case '^':
++	      {
++		unsigned long max = OP_MASK_RD;
++		my_getExpression (imm_expr, s);
++		check_absolute_expr (ip, imm_expr);
++		switch (*++args)
++		  {
++		  case 'j':
++		    max = OP_MASK_CUSTOM_IMM;
++		    INSERT_OPERAND (CUSTOM_IMM, *ip, imm_expr->X_add_number);
++		    break;
++		  case 'd':
++		    INSERT_OPERAND (RD, *ip, imm_expr->X_add_number);
++		    break;
++		  case 's':
++		    INSERT_OPERAND (RS1, *ip, imm_expr->X_add_number);
++		    break;
++		  case 't':
++		    INSERT_OPERAND (RS2, *ip, imm_expr->X_add_number);
++		    break;
++		  }
++		imm_expr->X_op = O_absent;
++		s = expr_end;
++		if ((unsigned long) imm_expr->X_add_number > max)
++		  as_warn ("Bad custom immediate (%lu), must be at most %lu",
++			   (unsigned long)imm_expr->X_add_number, max);
++		continue;
++	      }
++
++	    case 'C': /* RVC */
++	      switch (*++args)
++		{
++		case 's': /* RS1 x8-x15 */
++		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
++		      || !(regno >= 8 && regno <= 15))
++		    break;
++		  INSERT_OPERAND (CRS1S, *ip, regno % 8);
++		  continue;
++		case 'w': /* RS1 x8-x15, constrained to equal RD x8-x15 */
++		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
++		      || EXTRACT_OPERAND (CRS1S, ip->insn_opcode) + 8 != regno)
++		    break;
++		  continue;
++		case 't': /* RS2 x8-x15 */
++		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
++		      || !(regno >= 8 && regno <= 15))
++		    break;
++		  INSERT_OPERAND (CRS2S, *ip, regno % 8);
++		  continue;
++		case 'x': /* RS2 x8-x15, constrained to equal RD x8-x15 */
++		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
++		      || EXTRACT_OPERAND (CRS2S, ip->insn_opcode) + 8 != regno)
++		    break;
++		  continue;
++		case 'U': /* RS1, constrained to equal RD */
++		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
++		      || EXTRACT_OPERAND (RD, ip->insn_opcode) != regno)
++		    break;
++		  continue;
++		case 'V': /* RS2 */
++		  if (!reg_lookup (&s, RCLASS_GPR, &regno))
++		    break;
++		  INSERT_OPERAND (CRS2, *ip, regno);
++		  continue;
++		case 'c': /* RS1, constrained to equal sp */
++		  if (!reg_lookup (&s, RCLASS_GPR, &regno)
++		      || regno != X_SP)
++		    break;
++		  continue;
++		case '>':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || imm_expr->X_add_number <= 0
++		      || imm_expr->X_add_number >= 64)
++		    break;
++		  ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
++rvc_imm_done:
++		  s = expr_end;
++		  imm_expr->X_op = O_absent;
++		  continue;
++		case '<':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_IMM (imm_expr->X_add_number)
++		      || imm_expr->X_add_number <= 0
++		      || imm_expr->X_add_number >= 32)
++		    break;
++		  ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'i':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || imm_expr->X_add_number == 0
++		      || !VALID_RVC_SIMM3 (imm_expr->X_add_number))
++		    break;
++		  ip->insn_opcode |= ENCODE_RVC_SIMM3 (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'j':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || imm_expr->X_add_number == 0
++		      || !VALID_RVC_IMM (imm_expr->X_add_number))
++		    break;
++		  ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'k':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_LW_IMM (imm_expr->X_add_number))
++		    break;
++		  ip->insn_opcode |= ENCODE_RVC_LW_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'l':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_LD_IMM (imm_expr->X_add_number))
++		    break;
++		  ip->insn_opcode |= ENCODE_RVC_LD_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'm':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_LWSP_IMM (imm_expr->X_add_number))
++		    break;
++		  ip->insn_opcode |=
++		    ENCODE_RVC_LWSP_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'n':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_LDSP_IMM (imm_expr->X_add_number))
++		    break;
++		  ip->insn_opcode |=
++		    ENCODE_RVC_LDSP_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'K':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_ADDI4SPN_IMM (imm_expr->X_add_number)
++		      || imm_expr->X_add_number == 0)
++		    break;
++		  ip->insn_opcode |=
++		    ENCODE_RVC_ADDI4SPN_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'L':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_ADDI16SP_IMM (imm_expr->X_add_number)
++		      || imm_expr->X_add_number == 0)
++		    break;
++		  ip->insn_opcode |=
++		    ENCODE_RVC_ADDI16SP_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'M':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_SWSP_IMM (imm_expr->X_add_number))
++		    break;
++		  ip->insn_opcode |=
++		    ENCODE_RVC_SWSP_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'N':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || imm_expr->X_op != O_constant
++		      || !VALID_RVC_SDSP_IMM (imm_expr->X_add_number))
++		    break;
++		  ip->insn_opcode |=
++		    ENCODE_RVC_SDSP_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'u':
++		  p = percent_op_utype;
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p))
++		    break;
++rvc_lui:
++		  if (imm_expr->X_op != O_constant
++		      || imm_expr->X_add_number <= 0
++		      || imm_expr->X_add_number >= RISCV_BIGIMM_REACH
++		      || (imm_expr->X_add_number >= RISCV_RVC_IMM_REACH / 2
++			  && imm_expr->X_add_number <
++			      RISCV_BIGIMM_REACH - RISCV_RVC_IMM_REACH / 2))
++		    break;
++		  ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
++		  goto rvc_imm_done;
++		case 'v':
++		  if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		      || (imm_expr->X_add_number & (RISCV_IMM_REACH - 1))
++		      || (int32_t)imm_expr->X_add_number
++			  != imm_expr->X_add_number)
++		    break;
++		  imm_expr->X_add_number =
++		    ((uint32_t) imm_expr->X_add_number) >> RISCV_IMM_BITS;
++		  goto rvc_lui;
++		case 'p':
++		  goto branch;
++		case 'a':
++		  goto jump;
++		case 'D': /* floating-point RS2 x8-x15 */
++		  if (!reg_lookup (&s, RCLASS_FPR, &regno)
++		      || !(regno >= 8 && regno <= 15))
++		    break;
++		  INSERT_OPERAND (CRS2S, *ip, regno % 8);
++		  continue;
++		case 'T': /* floating-point RS2 */
++		  if (!reg_lookup (&s, RCLASS_FPR, &regno))
++		    break;
++		  INSERT_OPERAND (CRS2, *ip, regno);
++		  continue;
++		default:
++		  as_bad (_("bad RVC field specifier 'C%c'\n"), *args);
++		}
++	      break;
++
++	    case ',':
++	      ++argnum;
++	      if (*s++ == *args)
++		continue;
++	      s--;
++	      break;
++
++	    case '(':
++	    case ')':
++	    case '[':
++	    case ']':
++	      if (*s++ == *args)
++		continue;
++	      break;
++
++	    case '<':		/* shift amount, 0 - 31 */
++	      my_getExpression (imm_expr, s);
++	      check_absolute_expr (ip, imm_expr);
++	      if ((unsigned long) imm_expr->X_add_number > 31)
++		as_warn (_("Improper shift amount (%lu)"),
++			 (unsigned long) imm_expr->X_add_number);
++	      INSERT_OPERAND (SHAMTW, *ip, imm_expr->X_add_number);
++	      imm_expr->X_op = O_absent;
++	      s = expr_end;
++	      continue;
++
++	    case '>':		/* shift amount, 0 - (XLEN-1) */
++	      my_getExpression (imm_expr, s);
++	      check_absolute_expr (ip, imm_expr);
++	      if ((unsigned long) imm_expr->X_add_number >= xlen)
++		as_warn (_("Improper shift amount (%lu)"),
++			 (unsigned long) imm_expr->X_add_number);
++	      INSERT_OPERAND (SHAMT, *ip, imm_expr->X_add_number);
++	      imm_expr->X_op = O_absent;
++	      s = expr_end;
++	      continue;
++
++	    case 'Z':		/* CSRRxI immediate */
++	      my_getExpression (imm_expr, s);
++	      check_absolute_expr (ip, imm_expr);
++	      if ((unsigned long) imm_expr->X_add_number > 31)
++		as_warn (_("Improper CSRxI immediate (%lu)"),
++			 (unsigned long) imm_expr->X_add_number);
++	      INSERT_OPERAND (RS1, *ip, imm_expr->X_add_number);
++	      imm_expr->X_op = O_absent;
++	      s = expr_end;
++	      continue;
++
++	    case 'E':		/* Control register.  */
++	      if (reg_lookup (&s, RCLASS_CSR, &regno))
++		INSERT_OPERAND (CSR, *ip, regno);
++	      else
++		{
++		  my_getExpression (imm_expr, s);
++		  check_absolute_expr (ip, imm_expr);
++		  if ((unsigned long) imm_expr->X_add_number > 0xfff)
++		    as_warn(_("Improper CSR address (%lu)"),
++			    (unsigned long) imm_expr->X_add_number);
++		  INSERT_OPERAND (CSR, *ip, imm_expr->X_add_number);
++		  imm_expr->X_op = O_absent;
++		  s = expr_end;
++		}
++	      continue;
++
++	    case 'm':		/* rounding mode */
++	      if (arg_lookup (&s, riscv_rm, ARRAY_SIZE (riscv_rm), &regno))
++		{
++		  INSERT_OPERAND (RM, *ip, regno);
++		  continue;
++		}
++	      break;
++
++	    case 'P':
++	    case 'Q':		/* fence predecessor/successor */
++	      if (arg_lookup (&s, riscv_pred_succ, ARRAY_SIZE (riscv_pred_succ),
++			      &regno))
++		{
++		  if (*args == 'P')
++		    INSERT_OPERAND (PRED, *ip, regno);
++		  else
++		    INSERT_OPERAND (SUCC, *ip, regno);
++		  continue;
++		}
++	      break;
++
++	    case 'd':		/* destination register */
++	    case 's':		/* source register */
++	    case 't':		/* target register */
++	      if (reg_lookup (&s, RCLASS_GPR, &regno))
++		{
++		  c = *args;
++		  if (*s == ' ')
++		    ++s;
++
++		  /* Now that we have assembled one operand, we use the args
++		     string to figure out where it goes in the instruction.  */
++		  switch (c)
++		    {
++		    case 's':
++		      INSERT_OPERAND (RS1, *ip, regno);
++		      break;
++		    case 'd':
++		      INSERT_OPERAND (RD, *ip, regno);
++		      break;
++		    case 't':
++		      INSERT_OPERAND (RS2, *ip, regno);
++		      break;
++		    }
++		  continue;
++		}
++	      break;
++
++	    case 'D':		/* floating point rd */
++	    case 'S':		/* floating point rs1 */
++	    case 'T':		/* floating point rs2 */
++	    case 'U':		/* floating point rs1 and rs2 */
++	    case 'R':		/* floating point rs3 */
++	      if (reg_lookup (&s, RCLASS_FPR, &regno))
++		{
++		  c = *args;
++		  if (*s == ' ')
++		    ++s;
++		  switch (c)
++		    {
++		    case 'D':
++		      INSERT_OPERAND (RD, *ip, regno);
++		      break;
++		    case 'S':
++		      INSERT_OPERAND (RS1, *ip, regno);
++		      break;
++		    case 'U':
++		      INSERT_OPERAND (RS1, *ip, regno);
++		      /* fallthru */
++		    case 'T':
++		      INSERT_OPERAND (RS2, *ip, regno);
++		      break;
++		    case 'R':
++		      INSERT_OPERAND (RS3, *ip, regno);
++		      break;
++		    }
++		  continue;
++		}
++
++	      break;
++
++	    case 'I':
++	      my_getExpression (imm_expr, s);
++	      if (imm_expr->X_op != O_big
++		  && imm_expr->X_op != O_constant)
++		break;
++	      normalize_constant_expr (imm_expr);
++	      s = expr_end;
++	      continue;
++
++	    case 'A':
++	      my_getExpression (imm_expr, s);
++	      normalize_constant_expr (imm_expr);
++	      /* The 'A' format specifier must be a symbol. */
++	      if (imm_expr->X_op != O_symbol)
++	        break;
++	      *imm_reloc = BFD_RELOC_32;
++	      s = expr_end;
++	      continue;
++
++	    case 'j': /* sign-extended immediate */
++	      *imm_reloc = BFD_RELOC_RISCV_LO12_I;
++	      p = percent_op_itype;
++	      goto alu_op;
++	    case 'q': /* store displacement */
++	      p = percent_op_stype;
++	      *imm_reloc = BFD_RELOC_RISCV_LO12_S;
++	      goto load_store;
++	    case 'o': /* load displacement */
++	      p = percent_op_itype;
++	      *imm_reloc = BFD_RELOC_RISCV_LO12_I;
++	      goto load_store;
++	    case '0': /* AMO "displacement," which must be zero */
++	      p = percent_op_rtype;
++	      *imm_reloc = BFD_RELOC_UNUSED;
++load_store:
++	      /* Check whether there is only a single bracketed expression
++		 left.  If so, it must be the base register and the
++		 constant must be zero.  */
++	      imm_expr->X_op = O_constant;
++	      imm_expr->X_add_number = 0;
++	      if (*s == '(' && strchr (s + 1, '(') == 0)
++		continue;
++alu_op:
++	      /* If this value won't fit into a 16 bit offset, then go
++		 find a macro that will generate the 32 bit offset
++		 code pattern.  */
++	      if (!my_getSmallExpression (imm_expr, imm_reloc, s, p))
++		{
++		  normalize_constant_expr (imm_expr);
++		  if (imm_expr->X_op != O_constant
++		      || (*args == '0' && imm_expr->X_add_number != 0)
++		      || imm_expr->X_add_number >= (signed)RISCV_IMM_REACH/2
++		      || imm_expr->X_add_number < -(signed)RISCV_IMM_REACH/2)
++		    break;
++		}
++
++	      s = expr_end;
++	      continue;
++
++	    case 'p':		/* pc relative offset */
++branch:
++	      *imm_reloc = BFD_RELOC_12_PCREL;
++	      my_getExpression (imm_expr, s);
++	      s = expr_end;
++	      continue;
++
++	    case 'u':		/* upper 20 bits */
++	      p = percent_op_utype;
++	      if (!my_getSmallExpression (imm_expr, imm_reloc, s, p)
++		  && imm_expr->X_op == O_constant)
++		{
++		  if (imm_expr->X_add_number < 0
++		      || imm_expr->X_add_number >= (signed)RISCV_BIGIMM_REACH)
++		    as_bad (_("lui expression not in range 0..1048575"));
++
++		  *imm_reloc = BFD_RELOC_RISCV_HI20;
++		  imm_expr->X_add_number <<= RISCV_IMM_BITS;
++		}
++	      s = expr_end;
++	      continue;
++
++	    case 'a':		/* 26 bit address */
++jump:
++	      my_getExpression (imm_expr, s);
++	      s = expr_end;
++	      *imm_reloc = BFD_RELOC_RISCV_JMP;
++	      continue;
++
++	    case 'c':
++	      my_getExpression (imm_expr, s);
++	      s = expr_end;
++	      if (strcmp (s, "@plt") == 0)
++		{
++		  *imm_reloc = BFD_RELOC_RISCV_CALL_PLT;
++		  s += 4;
++		}
++	      else
++		*imm_reloc = BFD_RELOC_RISCV_CALL;
++	      continue;
++
++	    default:
++	      as_fatal (_("internal error: bad argument type %c"), *args);
++	    }
++	  break;
++	}
++      s = argsStart;
++      error = _("illegal operands");
++    }
++
++out:
++  /* Restore the character we might have clobbered above.  */
++  if (save_c)
++    *(argsStart - 1) = save_c;
++
++  return error;
++}
++
++void
++md_assemble (char *str)
++{
++  struct riscv_cl_insn insn;
++  expressionS imm_expr;
++  bfd_reloc_code_real_type imm_reloc = BFD_RELOC_UNUSED;
++
++  const char *error = riscv_ip (str, &insn, &imm_expr, &imm_reloc);
++
++  if (error)
++    {
++      as_bad ("%s `%s'", error, str);
++      return;
++    }
++
++  if (insn.insn_mo->pinfo == INSN_MACRO)
++    macro (&insn, &imm_expr, &imm_reloc);
++  else
++    append_insn (&insn, &imm_expr, imm_reloc);
++}
++
++const char *
++md_atof (int type, char *litP, int *sizeP)
++{
++  return ieee_md_atof (type, litP, sizeP, TARGET_BYTES_BIG_ENDIAN);
++}
++
++void
++md_number_to_chars (char *buf, valueT val, int n)
++{
++  number_to_chars_littleendian (buf, val, n);
++}
++
++const char *md_shortopts = "O::g::G:";
++
++enum options {
++  OPTION_M32 = OPTION_MD_BASE,
++  OPTION_M64,
++  OPTION_MARCH,
++  OPTION_PIC,
++  OPTION_NO_PIC,
++  OPTION_MFLOAT_ABI,
++  OPTION_MRVC,
++  OPTION_MNO_RVC,
++  OPTION_END_OF_ENUM
++};
++
++struct option md_longopts[] =
++{
++  {"m32", no_argument, NULL, OPTION_M32},
++  {"m64", no_argument, NULL, OPTION_M64},
++  {"march", required_argument, NULL, OPTION_MARCH},
++  {"fPIC", no_argument, NULL, OPTION_PIC},
++  {"fpic", no_argument, NULL, OPTION_PIC},
++  {"fno-pic", no_argument, NULL, OPTION_NO_PIC},
++  {"mrvc", no_argument, NULL, OPTION_MRVC},
++  {"mno-rvc", no_argument, NULL, OPTION_MNO_RVC},
++  {"mfloat-abi", required_argument, NULL, OPTION_MFLOAT_ABI},
++
++  {NULL, no_argument, NULL, 0}
++};
++size_t md_longopts_size = sizeof (md_longopts);
++
++enum float_abi {
++  FLOAT_ABI_DEFAULT = -1,
++  FLOAT_ABI_SOFT,
++  FLOAT_ABI_SINGLE,
++  FLOAT_ABI_DOUBLE,
++  FLOAT_ABI_QUAD
++};
++static enum float_abi float_abi = FLOAT_ABI_DEFAULT;
++
++int
++md_parse_option (int c, const char *arg)
++{
++  switch (c)
++    {
++    case OPTION_MRVC:
++      riscv_set_rvc (TRUE);
++      break;
++
++    case OPTION_MNO_RVC:
++      riscv_set_rvc (FALSE);
++      break;
++
++    case OPTION_MFLOAT_ABI:
++      if (strcmp (arg, "soft") == 0)
++	float_abi = FLOAT_ABI_SOFT;
++      else if (strcmp (arg, "single") == 0)
++	float_abi = FLOAT_ABI_SINGLE;
++      else if (strcmp (arg, "double") == 0)
++	float_abi = FLOAT_ABI_DOUBLE;
++      else if (strcmp (arg, "quad") == 0)
++	float_abi = FLOAT_ABI_QUAD;
++      else
++	return 0;
++      break;
++
++    case OPTION_M32:
++      xlen = 32;
++      break;
++
++    case OPTION_M64:
++      xlen = 64;
++      break;
++
++    case OPTION_MARCH:
++      riscv_set_arch (arg);
++      break;
++
++    case OPTION_NO_PIC:
++      riscv_opts.pic = FALSE;
++      break;
++
++    case OPTION_PIC:
++      riscv_opts.pic = TRUE;
++      break;
++
++    default:
++      return 0;
++    }
++
++  return 1;
++}
++
++void
++riscv_after_parse_args (void)
++{
++  if (riscv_subsets == NULL)
++    riscv_set_arch ("RVIMAFDXcustom");
++
++  if (xlen == 0)
++    {
++      if (strcmp (default_arch, "riscv32") == 0)
++	xlen = 32;
++      else if (strcmp (default_arch, "riscv64") == 0)
++	xlen = 64;
++      else
++	as_bad ("unknown default architecture `%s'", default_arch);
++    }
++
++  if (float_abi == FLOAT_ABI_DEFAULT)
++    {
++      struct riscv_subset *subset;
++
++      /* Assume soft-float unless D extension is present.  */
++      float_abi = FLOAT_ABI_SOFT;
++
++      for (subset = riscv_subsets; subset != NULL; subset = subset->next)
++	if (strcasecmp (subset->name, "D") == 0)
++	  float_abi = FLOAT_ABI_DOUBLE;
++    }
++
++  /* Insert float_abi into the EF_RISCV_FLOAT_ABI field of elf_flags.  */
++  elf_flags |= float_abi * (EF_RISCV_FLOAT_ABI & ~(EF_RISCV_FLOAT_ABI << 1));
++}
++
++void
++riscv_init_after_args (void)
++{
++  /* initialize opcodes */
++  bfd_riscv_num_opcodes = bfd_riscv_num_builtin_opcodes;
++  riscv_opcodes = (struct riscv_opcode *) riscv_builtin_opcodes;
++}
++
++long
++md_pcrel_from (fixS *fixP)
++{
++  return fixP->fx_where + fixP->fx_frag->fr_address;
++}
++
++/* Apply a fixup to the object file.  */
++
++void
++md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
++{
++  bfd_byte *buf = (bfd_byte *) (fixP->fx_frag->fr_literal + fixP->fx_where);
++
++  /* Remember value for tc_gen_reloc.  */
++  fixP->fx_addnumber = *valP;
++
++  switch (fixP->fx_r_type)
++    {
++    case BFD_RELOC_RISCV_TLS_GOT_HI20:
++    case BFD_RELOC_RISCV_TLS_GD_HI20:
++    case BFD_RELOC_RISCV_TLS_DTPREL32:
++    case BFD_RELOC_RISCV_TLS_DTPREL64:
++    case BFD_RELOC_RISCV_TPREL_HI20:
++    case BFD_RELOC_RISCV_TPREL_LO12_I:
++    case BFD_RELOC_RISCV_TPREL_LO12_S:
++    case BFD_RELOC_RISCV_TPREL_ADD:
++      S_SET_THREAD_LOCAL (fixP->fx_addsy);
++      /* fall through */
++
++    case BFD_RELOC_RISCV_GOT_HI20:
++    case BFD_RELOC_RISCV_PCREL_HI20:
++    case BFD_RELOC_RISCV_HI20:
++    case BFD_RELOC_RISCV_LO12_I:
++    case BFD_RELOC_RISCV_LO12_S:
++    case BFD_RELOC_RISCV_ADD8:
++    case BFD_RELOC_RISCV_ADD16:
++    case BFD_RELOC_RISCV_ADD32:
++    case BFD_RELOC_RISCV_ADD64:
++    case BFD_RELOC_RISCV_SUB8:
++    case BFD_RELOC_RISCV_SUB16:
++    case BFD_RELOC_RISCV_SUB32:
++    case BFD_RELOC_RISCV_SUB64:
++      gas_assert (fixP->fx_addsy != NULL);
++      /* Nothing needed to do.  The value comes from the reloc entry.  */
++      break;
++
++    case BFD_RELOC_64:
++    case BFD_RELOC_32:
++    case BFD_RELOC_16:
++    case BFD_RELOC_8:
++      if (fixP->fx_addsy && fixP->fx_subsy)
++	{
++	  fixP->fx_next = xmemdup (fixP, sizeof (*fixP), sizeof (*fixP));
++	  fixP->fx_next->fx_addsy = fixP->fx_subsy;
++	  fixP->fx_next->fx_subsy = NULL;
++	  fixP->fx_next->fx_offset = 0;
++	  fixP->fx_subsy = NULL;
++
++	  if (fixP->fx_r_type == BFD_RELOC_64)
++	    {
++	      fixP->fx_r_type = BFD_RELOC_RISCV_ADD64;
++	      fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB64;
++	    }
++	  else if (fixP->fx_r_type == BFD_RELOC_32)
++	    {
++	      fixP->fx_r_type = BFD_RELOC_RISCV_ADD32;
++	      fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB32;
++	    }
++	  else if (fixP->fx_r_type == BFD_RELOC_16)
++	    {
++	      fixP->fx_r_type = BFD_RELOC_RISCV_ADD16;
++	      fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB16;
++	    }
++	  else
++	    {
++	      fixP->fx_r_type = BFD_RELOC_RISCV_ADD8;
++	      fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB8;
++	    }
++	}
++      /* fall through */
++
++    case BFD_RELOC_RVA:
++      /* If we are deleting this reloc entry, we must fill in the
++	 value now.  This can happen if we have a .word which is not
++	 resolved when it appears but is later defined.  */
++      if (fixP->fx_addsy == NULL)
++	{
++	  gas_assert (fixP->fx_size <= sizeof (valueT));
++	  md_number_to_chars ((char *) buf, *valP, fixP->fx_size);
++	  fixP->fx_done = 1;
++	}
++      break;
++
++    case BFD_RELOC_RISCV_JMP:
++      if (fixP->fx_addsy)
++	{
++	  /* Fill in a tentative value to improve objdump readability.  */
++	  bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
++	  bfd_vma delta = target - md_pcrel_from (fixP);
++	  bfd_putl32 (bfd_getl32 (buf) | ENCODE_UJTYPE_IMM (delta), buf);
++	}
++      break;
++
++    case BFD_RELOC_12_PCREL:
++      if (fixP->fx_addsy)
++	{
++	  /* Fill in a tentative value to improve objdump readability.  */
++	  bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
++	  bfd_vma delta = target - md_pcrel_from (fixP);
++	  bfd_putl32 (bfd_getl32 (buf) | ENCODE_SBTYPE_IMM (delta), buf);
++	}
++      break;
++
++    case BFD_RELOC_RISCV_RVC_BRANCH:
++      if (fixP->fx_addsy)
++	{
++	  /* Fill in a tentative value to improve objdump readability.  */
++	  bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
++	  bfd_vma delta = target - md_pcrel_from (fixP);
++	  bfd_putl16 (bfd_getl16 (buf) | ENCODE_RVC_B_IMM (delta), buf);
++	}
++      break;
++
++    case BFD_RELOC_RISCV_RVC_JUMP:
++      if (fixP->fx_addsy)
++	{
++	  /* Fill in a tentative value to improve objdump readability.  */
++	  bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
++	  bfd_vma delta = target - md_pcrel_from (fixP);
++	  bfd_putl16 (bfd_getl16 (buf) | ENCODE_RVC_J_IMM (delta), buf);
++	}
++      break;
++
++    case BFD_RELOC_RISCV_PCREL_LO12_S:
++    case BFD_RELOC_RISCV_PCREL_LO12_I:
++    case BFD_RELOC_RISCV_CALL:
++    case BFD_RELOC_RISCV_CALL_PLT:
++    case BFD_RELOC_RISCV_ALIGN:
++      break;
++
++    default:
++      /* We ignore generic BFD relocations we don't know about.  */
++      if (bfd_reloc_type_lookup (stdoutput, fixP->fx_r_type) != NULL)
++	as_fatal (_("internal error: bad relocation #%d"), fixP->fx_r_type);
++    }
++}
++
++/* This structure is used to hold a stack of .option values.  */
++
++struct riscv_option_stack
++{
++  struct riscv_option_stack *next;
++  struct riscv_set_options options;
++};
++
++static struct riscv_option_stack *riscv_opts_stack;
++
++/* Handle the .option pseudo-op.  */
++
++static void
++s_riscv_option (int x ATTRIBUTE_UNUSED)
++{
++  char *name = input_line_pointer, ch;
++
++  while (!is_end_of_line[(unsigned char) *input_line_pointer])
++    ++input_line_pointer;
++  ch = *input_line_pointer;
++  *input_line_pointer = '\0';
++
++  if (strcmp (name, "rvc") == 0)
++    riscv_set_rvc (TRUE);
++  else if (strcmp (name, "norvc") == 0)
++    riscv_set_rvc (FALSE);
++  else if (strcmp (name, "pic") == 0)
++    riscv_opts.pic = TRUE;
++  else if (strcmp (name, "nopic") == 0)
++    riscv_opts.pic = FALSE;
++  else if (strcmp (name, "push") == 0)
++    {
++      struct riscv_option_stack *s;
++
++      s = (struct riscv_option_stack *) xmalloc (sizeof *s);
++      s->next = riscv_opts_stack;
++      s->options = riscv_opts;
++      riscv_opts_stack = s;
++    }
++  else if (strcmp (name, "pop") == 0)
++    {
++      struct riscv_option_stack *s;
++
++      s = riscv_opts_stack;
++      if (s == NULL)
++	as_bad (_(".option pop with no .option push"));
++      else
++	{
++	  riscv_opts = s->options;
++	  riscv_opts_stack = s->next;
++	  free (s);
++	}
++    }
++  else
++    {
++      as_warn (_("Unrecognized .option directive: %s\n"), name);
++    }
++  *input_line_pointer = ch;
++  demand_empty_rest_of_line ();
++}
++
++/* Handle the .dtprelword and .dtpreldword pseudo-ops.  They generate
++   a 32-bit or 64-bit DTP-relative relocation (BYTES says which) for
++   use in DWARF debug information.  */
++
++static void
++s_dtprel (int bytes)
++{
++  expressionS ex;
++  char *p;
++
++  expression (&ex);
++
++  if (ex.X_op != O_symbol)
++    {
++      as_bad (_("Unsupported use of %s"), (bytes == 8
++					   ? ".dtpreldword"
++					   : ".dtprelword"));
++      ignore_rest_of_line ();
++    }
++
++  p = frag_more (bytes);
++  md_number_to_chars (p, 0, bytes);
++  fix_new_exp (frag_now, p - frag_now->fr_literal, bytes, &ex, FALSE,
++	       (bytes == 8
++		? BFD_RELOC_RISCV_TLS_DTPREL64
++		: BFD_RELOC_RISCV_TLS_DTPREL32));
++
++  demand_empty_rest_of_line ();
++}
++
++/* Handle the .bss pseudo-op.  */
++
++static void
++s_bss (int ignore ATTRIBUTE_UNUSED)
++{
++  subseg_set (bss_section, 0);
++  demand_empty_rest_of_line ();
++}
++
++/* Align to a given power of two.  */
++
++static void
++s_align (int bytes_p)
++{
++  int fill_value = 0, fill_value_specified = 0;
++  int min_text_alignment = riscv_opts.rvc ? 2 : 4;
++  int alignment = get_absolute_expression(), bytes;
++
++  if (bytes_p)
++    {
++      bytes = alignment;
++      if (bytes < 1 || (bytes & (bytes-1)) != 0)
++	as_bad (_("alignment not a power of 2: %d"), bytes);
++      for (alignment = 0; bytes > 1; bytes >>= 1)
++	alignment++;
++    }
++
++  bytes = 1 << alignment;
++
++  if (alignment < 0 || alignment > 31)
++    as_bad (_("unsatisfiable alignment: %d"), alignment);
++
++  if (*input_line_pointer == ',')
++    {
++      ++input_line_pointer;
++      fill_value = get_absolute_expression ();
++      fill_value_specified = 1;
++    }
++
++  if (!fill_value_specified
++      && subseg_text_p (now_seg)
++      && bytes > min_text_alignment)
++    {
++      /* Emit the worst-case NOP string.  The linker will delete any
++	 unnecessary NOPs.  This allows us to support code alignment
++	 in spite of linker relaxations.  */
++      bfd_vma i, worst_case_bytes = bytes - min_text_alignment;
++      char *nops = frag_more (worst_case_bytes);
++      for (i = 0; i < worst_case_bytes - 2; i += 4)
++	md_number_to_chars (nops + i, RISCV_NOP, 4);
++      if (i < worst_case_bytes)
++	md_number_to_chars (nops + i, RVC_NOP, 2);
++
++      expressionS ex;
++      ex.X_op = O_constant;
++      ex.X_add_number = worst_case_bytes;
++
++      fix_new_exp (frag_now, nops - frag_now->fr_literal, 0,
++		   &ex, FALSE, BFD_RELOC_RISCV_ALIGN);
++    }
++  else if (alignment)
++    frag_align (alignment, fill_value, 0);
++
++  record_alignment (now_seg, alignment);
++
++  demand_empty_rest_of_line ();
++}
++
++int
++md_estimate_size_before_relax (fragS *fragp, asection *segtype)
++{
++  return (fragp->fr_var = relaxed_branch_length (fragp, segtype, FALSE));
++}
++
++/* Translate internal representation of relocation info to BFD target
++   format.  */
++
++arelent *
++tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
++{
++  arelent *reloc = (arelent *) xmalloc (sizeof (arelent));
++
++  reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
++  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
++  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
++  reloc->addend = fixp->fx_addnumber;
++
++  reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
++  if (reloc->howto == NULL)
++    {
++      if ((fixp->fx_r_type == BFD_RELOC_16 || fixp->fx_r_type == BFD_RELOC_8)
++	  && fixp->fx_addsy != NULL && fixp->fx_subsy != NULL)
++	{
++	  /* We don't have R_RISCV_8/16, but for this special case,
++	     we can use R_RISCV_ADD8/16 with R_RISCV_SUB8/16.  */
++	  return reloc;
++	}
++
++      as_bad_where (fixp->fx_file, fixp->fx_line,
++		    _("cannot represent %s relocation in object file"),
++		    bfd_get_reloc_code_name (fixp->fx_r_type));
++      return NULL;
++    }
++
++  return reloc;
++}
++
++int
++riscv_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
++{
++  if (RELAX_BRANCH_P (fragp->fr_subtype))
++    {
++      offsetT old_var = fragp->fr_var;
++      fragp->fr_var = relaxed_branch_length (fragp, sec, TRUE);
++      return fragp->fr_var - old_var;
++    }
++
++  return 0;
++}
++
++/* Expand far branches to multi-instruction sequences.  */
++
++static void
++md_convert_frag_branch (fragS *fragp)
++{
++  bfd_byte *buf;
++  expressionS exp;
++  fixS *fixp;
++  insn_t insn;
++  int rs1, reloc;
++
++  buf = (bfd_byte *)fragp->fr_literal + fragp->fr_fix;
++
++  exp.X_op = O_symbol;
++  exp.X_add_symbol = fragp->fr_symbol;
++  exp.X_add_number = fragp->fr_offset;
++
++  gas_assert (fragp->fr_var == RELAX_BRANCH_LENGTH (fragp->fr_subtype));
++
++  if (RELAX_BRANCH_RVC (fragp->fr_subtype))
++    {
++      switch (RELAX_BRANCH_LENGTH (fragp->fr_subtype))
++	{
++	  case 8:
++	  case 4:
++	    /* Expand the RVC branch into a RISC-V one.  */
++	    insn = bfd_getl16 (buf);
++	    rs1 = 8 + ((insn >> OP_SH_CRS1S) & OP_MASK_CRS1S);
++	    if ((insn & MASK_C_J) == MATCH_C_J)
++	      insn = MATCH_JAL;
++	    else if ((insn & MASK_C_JAL) == MATCH_C_JAL)
++	      insn = MATCH_JAL | (X_RA << OP_SH_RD);
++	    else if ((insn & MASK_C_BEQZ) == MATCH_C_BEQZ)
++	      insn = MATCH_BEQ | (rs1 << OP_SH_RS1);
++	    else if ((insn & MASK_C_BNEZ) == MATCH_C_BNEZ)
++	      insn = MATCH_BNE | (rs1 << OP_SH_RS1);
++	    else
++	      abort ();
++	    bfd_putl32 (insn, buf);
++	    break;
++
++	  case 6:
++	    /* Invert the branch condition.  Branch over the jump.  */
++	    insn = bfd_getl16 (buf);
++	    insn ^= MATCH_C_BEQZ ^ MATCH_C_BNEZ;
++	    insn |= ENCODE_RVC_B_IMM (6);
++	    bfd_putl16 (insn, buf);
++	    buf += 2;
++	    goto jump;
++
++	  case 2:
++	    /* Just keep the RVC branch.  */
++	    reloc = RELAX_BRANCH_UNCOND (fragp->fr_subtype)
++		    ? BFD_RELOC_RISCV_RVC_JUMP : BFD_RELOC_RISCV_RVC_BRANCH;
++	    fixp = fix_new_exp (fragp, buf - (bfd_byte *)fragp->fr_literal,
++				2, &exp, FALSE, reloc);
++	    buf += 2;
++	    goto done;
++
++	  default:
++	    abort();
++	}
++    }
++
++  switch (RELAX_BRANCH_LENGTH (fragp->fr_subtype))
++    {
++    case 8:
++      gas_assert (!RELAX_BRANCH_UNCOND (fragp->fr_subtype));
++
++      /* Invert the branch condition.  Branch over the jump.  */
++      insn = bfd_getl32 (buf);
++      insn ^= MATCH_BEQ ^ MATCH_BNE;
++      insn |= ENCODE_SBTYPE_IMM (8);
++      md_number_to_chars ((char *) buf, insn, 4);
++      buf += 4;
++
++jump:
++      /* Jump to the target.  */
++      fixp = fix_new_exp (fragp, buf - (bfd_byte *)fragp->fr_literal,
++			  4, &exp, FALSE, BFD_RELOC_RISCV_JMP);
++      md_number_to_chars ((char *) buf, MATCH_JAL, 4);
++      buf += 4;
++      break;
++
++    case 4:
++      reloc = RELAX_BRANCH_UNCOND (fragp->fr_subtype)
++	      ? BFD_RELOC_RISCV_JMP : BFD_RELOC_12_PCREL;
++      fixp = fix_new_exp (fragp, buf - (bfd_byte *)fragp->fr_literal,
++			  4, &exp, FALSE, reloc);
++      buf += 4;
++      break;
++
++    default:
++      abort ();
++    }
++
++done:
++  fixp->fx_file = fragp->fr_file;
++  fixp->fx_line = fragp->fr_line;
++
++  gas_assert (buf == (bfd_byte *)fragp->fr_literal
++	      + fragp->fr_fix + fragp->fr_var);
++
++  fragp->fr_fix += fragp->fr_var;
++}
++
++/* Relax a machine dependent frag.  This returns the amount by which
++   the current size of the frag should change.  */
++
++void
++md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT asec ATTRIBUTE_UNUSED,
++		 fragS *fragp)
++{
++  gas_assert (RELAX_BRANCH_P (fragp->fr_subtype));
++  md_convert_frag_branch (fragp);
++}
++
++void
++md_show_usage (FILE *stream)
++{
++  fprintf (stream, _("\
++RISC-V options:\n\
++  -m32           assemble RV32 code\n\
++  -m64           assemble RV64 code (default)\n\
++  -fpic          generate position-independent code\n\
++  -fno-pic       don't generate position-independent code (default)\n\
++"));
++}
++
++/* Standard calling conventions leave the CFA at SP on entry.  */
++void
++riscv_cfi_frame_initial_instructions (void)
++{
++  cfi_add_CFA_def_cfa_register (X_SP);
++}
++
++int
++tc_riscv_regname_to_dw2regnum (char *regname)
++{
++  int reg;
++
++  if ((reg = reg_lookup_internal (regname, RCLASS_GPR)) >= 0)
++    return reg;
++
++  if ((reg = reg_lookup_internal (regname, RCLASS_FPR)) >= 0)
++    return reg + 32;
++
++  as_bad (_("unknown register `%s'"), regname);
++  return -1;
++}
++
++void
++riscv_elf_final_processing (void)
++{
++  elf_elfheader (stdoutput)->e_flags |= elf_flags;
++}
++
++/* Parse the .sleb128 and .uleb128 pseudos.  Only allow constant expressions,
++   since these directives break relaxation when used with symbol deltas.  */
++
++static void
++s_riscv_leb128 (int sign)
++{
++  expressionS exp;
++  char *save_in = input_line_pointer;
++
++  expression (&exp);
++  if (exp.X_op != O_constant)
++    as_bad (_("non-constant .%cleb128 is not supported"), sign ? 's' : 'u');
++  demand_empty_rest_of_line ();
++
++  input_line_pointer = save_in;
++  return s_leb128 (sign);
++}
++
++/* Pseudo-op table.  */
++
++static const pseudo_typeS riscv_pseudo_table[] =
++{
++  /* RISC-V-specific pseudo-ops.  */
++  {"option", s_riscv_option, 0},
++  {"half", cons, 2},
++  {"word", cons, 4},
++  {"dword", cons, 8},
++  {"dtprelword", s_dtprel, 4},
++  {"dtpreldword", s_dtprel, 8},
++  {"bss", s_bss, 0},
++  {"align", s_align, 0},
++  {"p2align", s_align, 0},
++  {"balign", s_align, 1},
++  {"uleb128", s_riscv_leb128, 0},
++  {"sleb128", s_riscv_leb128, 1},
++
++  { NULL, NULL, 0 },
++};
++
++void
++riscv_pop_insert (void)
++{
++  extern void pop_insert (const pseudo_typeS *);
++
++  pop_insert (riscv_pseudo_table);
++}
+diff --git original-binutils/gas/config/tc-riscv.h binutils-2_27/gas/config/tc-riscv.h
+new file mode 100644
+index 0000000..ed654ad
+--- /dev/null
++++ binutils-2_27/gas/config/tc-riscv.h
+@@ -0,0 +1,102 @@
++/* tc-riscv.h -- header file for tc-riscv.c.
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target.
++
++   This file is part of GAS.
++
++   GAS is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3, or (at your option)
++   any later version.
++
++   GAS is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++#ifndef TC_RISCV
++#define TC_RISCV
++
++#include "opcode/riscv.h"
++
++struct frag;
++struct expressionS;
++
++#define TARGET_BYTES_BIG_ENDIAN 0
++
++#define TARGET_ARCH bfd_arch_riscv
++
++#define WORKING_DOT_WORD	1
++#define LOCAL_LABELS_FB 1
++
++/* Symbols named FAKE_LABEL_NAME are emitted when generating DWARF, so make
++   sure FAKE_LABEL_NAME is printable.  It still must be distinct from any
++   real label name.  So, append a space, which other labels can't contain.  */
++#define FAKE_LABEL_NAME ".L0 "
++
++#define md_relax_frag(segment, fragp, stretch) \
++  riscv_relax_frag(segment, fragp, stretch)
++extern int riscv_relax_frag (asection *, struct frag *, long);
++
++#define md_section_align(seg,size)	(size)
++#define md_undefined_symbol(name)	(0)
++#define md_operand(x)
++
++/* FIXME: it is unclear if this is used, or if it is even correct.  */
++#define MAX_MEM_FOR_RS_ALIGN_CODE  (1 + 2)
++
++/* The ISA of the target may change based on command-line arguments.  */
++#define TARGET_FORMAT riscv_target_format()
++extern const char *riscv_target_format (void);
++
++#define md_after_parse_args() riscv_after_parse_args()
++extern void riscv_after_parse_args (void);
++
++#define tc_init_after_args() riscv_init_after_args()
++extern void riscv_init_after_args (void);
++
++#define md_parse_long_option(arg) riscv_parse_long_option (arg)
++extern int riscv_parse_long_option (const char *);
++
++/* Let the linker resolve all the relocs due to relaxation.  */
++#define tc_fix_adjustable(fixp) 0
++#define md_allow_local_subtract(l,r,s) 0
++
++/* Values passed to md_apply_fix don't include symbol values.  */
++#define MD_APPLY_SYM_VALUE(FIX) 0
++
++/* Global syms must not be resolved, to support ELF shared libraries.  */
++#define EXTERN_FORCE_RELOC			\
++  (OUTPUT_FLAVOR == bfd_target_elf_flavour)
++
++#define TC_FORCE_RELOCATION_SUB_SAME(FIX, SEG) ((SEG)->flags & SEC_CODE)
++#define TC_FORCE_RELOCATION_SUB_LOCAL(FIX, SEG) 1
++#define TC_VALIDATE_FIX_SUB(FIX, SEG) 1
++#define TC_FORCE_RELOCATION_LOCAL(FIX) 1
++#define DIFF_EXPR_OK 1
++
++extern void riscv_pop_insert (void);
++#define md_pop_insert()		riscv_pop_insert()
++
++#define TARGET_USE_CFIPOP 1
++
++#define tc_cfi_frame_initial_instructions riscv_cfi_frame_initial_instructions
++extern void riscv_cfi_frame_initial_instructions (void);
++
++#define tc_regname_to_dw2regnum tc_riscv_regname_to_dw2regnum
++extern int tc_riscv_regname_to_dw2regnum (char *regname);
++
++extern unsigned xlen;
++#define DWARF2_DEFAULT_RETURN_COLUMN X_RA
++#define DWARF2_CIE_DATA_ALIGNMENT (-(int) (xlen / 8))
++
++#define elf_tc_final_processing riscv_elf_final_processing
++extern void riscv_elf_final_processing (void);
++
++#endif /* TC_RISCV */
+diff --git original-binutils/gas/configure binutils-2_27/gas/configure
+index 7b48a58..3b4bf25 100755
+--- original-binutils/gas/configure
++++ binutils-2_27/gas/configure
+@@ -12508,7 +12508,7 @@ $as_echo "#define NDS32_DEFAULT_AUDIO_EXT 1" >>confdefs.h
+ $as_echo "$enable_audio_ext" >&6; }
+ 	;;
+ 
+-      i386 | s390 | sparc)
++      i386 | riscv | s390 | sparc)
+ 	if test $this_target = $target ; then
+ 
+ cat >>confdefs.h <<_ACEOF
+diff --git original-binutils/gas/configure.ac binutils-2_27/gas/configure.ac
+index 13f5107..c0d5989 100644
+--- original-binutils/gas/configure.ac
++++ binutils-2_27/gas/configure.ac
+@@ -510,7 +510,7 @@ changequote([,])dnl
+ 	AC_MSG_RESULT($enable_audio_ext)
+ 	;;
+ 
+-      i386 | s390 | sparc)
++      i386 | riscv | s390 | sparc)
+ 	if test $this_target = $target ; then
+ 	  AC_DEFINE_UNQUOTED(DEFAULT_ARCH, "${arch}", [Default architecture.])
+ 	fi
+diff --git original-binutils/gas/configure.tgt binutils-2_27/gas/configure.tgt
+index e2df659..3e230cb 100644
+--- original-binutils/gas/configure.tgt
++++ binutils-2_27/gas/configure.tgt
+@@ -87,6 +87,8 @@ case ${cpu} in
+   pj*)			cpu_type=pj endian=big ;;
+   powerpc*le*)		cpu_type=ppc endian=little ;;
+   powerpc*)		cpu_type=ppc endian=big ;;
++  riscv32*)		cpu_type=riscv endian=little arch=riscv32 ;;
++  riscv64*)		cpu_type=riscv endian=little arch=riscv64 ;;
+   rs6000*)		cpu_type=ppc ;;
+   rl78*)		cpu_type=rl78 ;;
+   rx)			cpu_type=rx ;;
+@@ -391,6 +393,8 @@ case ${generic_target} in
+   ppc-*-kaos*)				fmt=elf ;;
+   ppc-*-lynxos*)			fmt=elf em=lynx ;;
+ 
++  riscv*-*-*)			fmt=elf endian=little em=linux ;;
++
+   s390-*-linux-*)			fmt=elf em=linux ;;
+   s390-*-tpf*)				fmt=elf ;;
+ 
+@@ -488,7 +492,7 @@ case ${generic_target} in
+ esac
+ 
+ case ${cpu_type} in
+-  aarch64 | alpha | arm | i386 | ia64 | microblaze | mips | ns32k | or1k | or1knd | pdp11 | ppc | sparc | z80 | z8k)
++  aarch64 | alpha | arm | i386 | ia64 | microblaze | mips | ns32k | or1k | or1knd | pdp11 | ppc | riscv | sparc | z80 | z8k)
+     bfd_gas=yes
+     ;;
+ esac
+diff --git original-binutils/include/dis-asm.h binutils-2_27/include/dis-asm.h
+index 60bbc8d..09b7b19 100644
+--- original-binutils/include/dis-asm.h
++++ binutils-2_27/include/dis-asm.h
+@@ -263,6 +263,7 @@ extern int print_insn_little_arm	(bfd_vma, disassemble_info *);
+ extern int print_insn_little_mips	(bfd_vma, disassemble_info *);
+ extern int print_insn_little_nios2	(bfd_vma, disassemble_info *);
+ extern int print_insn_little_powerpc	(bfd_vma, disassemble_info *);
++extern int print_insn_riscv		(bfd_vma, disassemble_info *);
+ extern int print_insn_little_score      (bfd_vma, disassemble_info *); 
+ extern int print_insn_lm32		(bfd_vma, disassemble_info *);
+ extern int print_insn_m32c	        (bfd_vma, disassemble_info *);
+@@ -327,6 +328,7 @@ extern void print_aarch64_disassembler_options (FILE *);
+ extern void print_i386_disassembler_options (FILE *);
+ extern void print_mips_disassembler_options (FILE *);
+ extern void print_ppc_disassembler_options (FILE *);
++extern void print_riscv_disassembler_options (FILE *);
+ extern void print_arm_disassembler_options (FILE *);
+ extern void parse_arm_disassembler_option (char *);
+ extern void print_s390_disassembler_options (FILE *);
+diff --git original-binutils/include/elf/common.h binutils-2_27/include/elf/common.h
+index d2da009d..5b2c3ef 100644
+--- original-binutils/include/elf/common.h
++++ binutils-2_27/include/elf/common.h
+@@ -306,6 +306,7 @@
+ #define EM_VISIUM	221	/* Controls and Data Services VISIUMcore processor */
+ #define EM_FT32         222     /* FTDI Chip FT32 high performance 32-bit RISC architecture */
+ #define EM_MOXIE        223     /* Moxie processor family */
++#define EM_RISCV	243	/* RISC-V */
+ 
+ /* If it is necessary to assign new unofficial EM_* values, please pick large
+    random numbers (0x8523, 0xa7f2, etc.) to minimize the chances of collision
+diff --git original-binutils/include/elf/riscv.h binutils-2_27/include/elf/riscv.h
+new file mode 100644
+index 0000000..4ac9c4b
+--- /dev/null
++++ binutils-2_27/include/elf/riscv.h
+@@ -0,0 +1,104 @@
++/* RISC-V ELF support for BFD.
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrw Waterman <waterman at cs.berkeley.edu> at UC Berkeley.
++   Based on MIPS ELF support for BFD, by Ian Lance Taylor.
++
++   This file is part of BFD, the Binary File Descriptor library.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3 of the License, or
++   (at your option) any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++/* This file holds definitions specific to the RISCV ELF ABI.  Note
++   that most of this is not actually implemented by BFD.  */
++
++#ifndef _ELF_RISCV_H
++#define _ELF_RISCV_H
++
++#include "elf/reloc-macros.h"
++#include "libiberty.h"
++
++/* Relocation types.  */
++START_RELOC_NUMBERS (elf_riscv_reloc_type)
++  /* Relocation types used by the dynamic linker.  */
++  RELOC_NUMBER (R_RISCV_NONE, 0)
++  RELOC_NUMBER (R_RISCV_32, 1)
++  RELOC_NUMBER (R_RISCV_64, 2)
++  RELOC_NUMBER (R_RISCV_RELATIVE, 3)
++  RELOC_NUMBER (R_RISCV_COPY, 4)
++  RELOC_NUMBER (R_RISCV_JUMP_SLOT, 5)
++  RELOC_NUMBER (R_RISCV_TLS_DTPMOD32, 6)
++  RELOC_NUMBER (R_RISCV_TLS_DTPMOD64, 7)
++  RELOC_NUMBER (R_RISCV_TLS_DTPREL32, 8)
++  RELOC_NUMBER (R_RISCV_TLS_DTPREL64, 9)
++  RELOC_NUMBER (R_RISCV_TLS_TPREL32, 10)
++  RELOC_NUMBER (R_RISCV_TLS_TPREL64, 11)
++
++  /* Relocation types not used by the dynamic linker.  */
++  RELOC_NUMBER (R_RISCV_BRANCH, 16)
++  RELOC_NUMBER (R_RISCV_JAL, 17)
++  RELOC_NUMBER (R_RISCV_CALL, 18)
++  RELOC_NUMBER (R_RISCV_CALL_PLT, 19)
++  RELOC_NUMBER (R_RISCV_GOT_HI20, 20)
++  RELOC_NUMBER (R_RISCV_TLS_GOT_HI20, 21)
++  RELOC_NUMBER (R_RISCV_TLS_GD_HI20, 22)
++  RELOC_NUMBER (R_RISCV_PCREL_HI20, 23)
++  RELOC_NUMBER (R_RISCV_PCREL_LO12_I, 24)
++  RELOC_NUMBER (R_RISCV_PCREL_LO12_S, 25)
++  RELOC_NUMBER (R_RISCV_HI20, 26)
++  RELOC_NUMBER (R_RISCV_LO12_I, 27)
++  RELOC_NUMBER (R_RISCV_LO12_S, 28)
++  RELOC_NUMBER (R_RISCV_TPREL_HI20, 29)
++  RELOC_NUMBER (R_RISCV_TPREL_LO12_I, 30)
++  RELOC_NUMBER (R_RISCV_TPREL_LO12_S, 31)
++  RELOC_NUMBER (R_RISCV_TPREL_ADD, 32)
++  RELOC_NUMBER (R_RISCV_ADD8, 33)
++  RELOC_NUMBER (R_RISCV_ADD16, 34)
++  RELOC_NUMBER (R_RISCV_ADD32, 35)
++  RELOC_NUMBER (R_RISCV_ADD64, 36)
++  RELOC_NUMBER (R_RISCV_SUB8, 37)
++  RELOC_NUMBER (R_RISCV_SUB16, 38)
++  RELOC_NUMBER (R_RISCV_SUB32, 39)
++  RELOC_NUMBER (R_RISCV_SUB64, 40)
++  RELOC_NUMBER (R_RISCV_GNU_VTINHERIT, 41)
++  RELOC_NUMBER (R_RISCV_GNU_VTENTRY, 42)
++  RELOC_NUMBER (R_RISCV_ALIGN, 43)
++  RELOC_NUMBER (R_RISCV_RVC_BRANCH, 44)
++  RELOC_NUMBER (R_RISCV_RVC_JUMP, 45)
++  RELOC_NUMBER (R_RISCV_RVC_LUI, 46)
++  RELOC_NUMBER (R_RISCV_GPREL_I, 47)
++  RELOC_NUMBER (R_RISCV_GPREL_S, 48)
++END_RELOC_NUMBERS (R_RISCV_max)
++
++/* Processor specific flags for the ELF header e_flags field.  */
++
++/* File may contain compressed instructions.  */
++#define EF_RISCV_RVC 0x0001
++
++/* Which floating-point ABI a file uses.  */
++#define EF_RISCV_FLOAT_ABI 0x0006
++
++/* File uses the soft-float ABI.  */
++#define EF_RISCV_FLOAT_ABI_SOFT 0x0000
++
++/* File uses the single-float ABI.  */
++#define EF_RISCV_FLOAT_ABI_SINGLE 0x0002
++
++/* File uses the double-float ABI.  */
++#define EF_RISCV_FLOAT_ABI_DOUBLE 0x0004
++
++/* File uses the quad-float ABI.  */
++#define EF_RISCV_FLOAT_ABI_QUAD 0x0006
++
++#endif /* _ELF_RISCV_H */
+diff --git original-binutils/include/opcode/riscv-opc.h binutils-2_27/include/opcode/riscv-opc.h
+new file mode 100644
+index 0000000..4369eac
+--- /dev/null
++++ binutils-2_27/include/opcode/riscv-opc.h
+@@ -0,0 +1,1160 @@
++/* Automatically generated by parse-opcodes */
++#ifndef RISCV_ENCODING_H
++#define RISCV_ENCODING_H
++#define MATCH_SLLI_RV32 0x1013
++#define MASK_SLLI_RV32  0xfe00707f
++#define MATCH_SRLI_RV32 0x5013
++#define MASK_SRLI_RV32  0xfe00707f
++#define MATCH_SRAI_RV32 0x40005013
++#define MASK_SRAI_RV32  0xfe00707f
++#define MATCH_FRFLAGS 0x102073
++#define MASK_FRFLAGS  0xfffff07f
++#define MATCH_FSFLAGS 0x101073
++#define MASK_FSFLAGS  0xfff0707f
++#define MATCH_FSFLAGSI 0x105073
++#define MASK_FSFLAGSI  0xfff0707f
++#define MATCH_FRRM 0x202073
++#define MASK_FRRM  0xfffff07f
++#define MATCH_FSRM 0x201073
++#define MASK_FSRM  0xfff0707f
++#define MATCH_FSRMI 0x205073
++#define MASK_FSRMI  0xfff0707f
++#define MATCH_FSCSR 0x301073
++#define MASK_FSCSR  0xfff0707f
++#define MATCH_FRCSR 0x302073
++#define MASK_FRCSR  0xfffff07f
++#define MATCH_RDCYCLE 0xc0002073
++#define MASK_RDCYCLE  0xfffff07f
++#define MATCH_RDTIME 0xc0102073
++#define MASK_RDTIME  0xfffff07f
++#define MATCH_RDINSTRET 0xc0202073
++#define MASK_RDINSTRET  0xfffff07f
++#define MATCH_RDCYCLEH 0xc8002073
++#define MASK_RDCYCLEH  0xfffff07f
++#define MATCH_RDTIMEH 0xc8102073
++#define MASK_RDTIMEH  0xfffff07f
++#define MATCH_RDINSTRETH 0xc8202073
++#define MASK_RDINSTRETH  0xfffff07f
++#define MATCH_SCALL 0x73
++#define MASK_SCALL  0xffffffff
++#define MATCH_SBREAK 0x100073
++#define MASK_SBREAK  0xffffffff
++#define MATCH_BEQ 0x63
++#define MASK_BEQ  0x707f
++#define MATCH_BNE 0x1063
++#define MASK_BNE  0x707f
++#define MATCH_BLT 0x4063
++#define MASK_BLT  0x707f
++#define MATCH_BGE 0x5063
++#define MASK_BGE  0x707f
++#define MATCH_BLTU 0x6063
++#define MASK_BLTU  0x707f
++#define MATCH_BGEU 0x7063
++#define MASK_BGEU  0x707f
++#define MATCH_JALR 0x67
++#define MASK_JALR  0x707f
++#define MATCH_JAL 0x6f
++#define MASK_JAL  0x7f
++#define MATCH_LUI 0x37
++#define MASK_LUI  0x7f
++#define MATCH_AUIPC 0x17
++#define MASK_AUIPC  0x7f
++#define MATCH_ADDI 0x13
++#define MASK_ADDI  0x707f
++#define MATCH_SLLI 0x1013
++#define MASK_SLLI  0xfc00707f
++#define MATCH_SLTI 0x2013
++#define MASK_SLTI  0x707f
++#define MATCH_SLTIU 0x3013
++#define MASK_SLTIU  0x707f
++#define MATCH_XORI 0x4013
++#define MASK_XORI  0x707f
++#define MATCH_SRLI 0x5013
++#define MASK_SRLI  0xfc00707f
++#define MATCH_SRAI 0x40005013
++#define MASK_SRAI  0xfc00707f
++#define MATCH_ORI 0x6013
++#define MASK_ORI  0x707f
++#define MATCH_ANDI 0x7013
++#define MASK_ANDI  0x707f
++#define MATCH_ADD 0x33
++#define MASK_ADD  0xfe00707f
++#define MATCH_SUB 0x40000033
++#define MASK_SUB  0xfe00707f
++#define MATCH_SLL 0x1033
++#define MASK_SLL  0xfe00707f
++#define MATCH_SLT 0x2033
++#define MASK_SLT  0xfe00707f
++#define MATCH_SLTU 0x3033
++#define MASK_SLTU  0xfe00707f
++#define MATCH_XOR 0x4033
++#define MASK_XOR  0xfe00707f
++#define MATCH_SRL 0x5033
++#define MASK_SRL  0xfe00707f
++#define MATCH_SRA 0x40005033
++#define MASK_SRA  0xfe00707f
++#define MATCH_OR 0x6033
++#define MASK_OR  0xfe00707f
++#define MATCH_AND 0x7033
++#define MASK_AND  0xfe00707f
++#define MATCH_ADDIW 0x1b
++#define MASK_ADDIW  0x707f
++#define MATCH_SLLIW 0x101b
++#define MASK_SLLIW  0xfe00707f
++#define MATCH_SRLIW 0x501b
++#define MASK_SRLIW  0xfe00707f
++#define MATCH_SRAIW 0x4000501b
++#define MASK_SRAIW  0xfe00707f
++#define MATCH_ADDW 0x3b
++#define MASK_ADDW  0xfe00707f
++#define MATCH_SUBW 0x4000003b
++#define MASK_SUBW  0xfe00707f
++#define MATCH_SLLW 0x103b
++#define MASK_SLLW  0xfe00707f
++#define MATCH_SRLW 0x503b
++#define MASK_SRLW  0xfe00707f
++#define MATCH_SRAW 0x4000503b
++#define MASK_SRAW  0xfe00707f
++#define MATCH_LB 0x3
++#define MASK_LB  0x707f
++#define MATCH_LH 0x1003
++#define MASK_LH  0x707f
++#define MATCH_LW 0x2003
++#define MASK_LW  0x707f
++#define MATCH_LD 0x3003
++#define MASK_LD  0x707f
++#define MATCH_LBU 0x4003
++#define MASK_LBU  0x707f
++#define MATCH_LHU 0x5003
++#define MASK_LHU  0x707f
++#define MATCH_LWU 0x6003
++#define MASK_LWU  0x707f
++#define MATCH_SB 0x23
++#define MASK_SB  0x707f
++#define MATCH_SH 0x1023
++#define MASK_SH  0x707f
++#define MATCH_SW 0x2023
++#define MASK_SW  0x707f
++#define MATCH_SD 0x3023
++#define MASK_SD  0x707f
++#define MATCH_FENCE 0xf
++#define MASK_FENCE  0x707f
++#define MATCH_FENCE_I 0x100f
++#define MASK_FENCE_I  0x707f
++#define MATCH_MUL 0x2000033
++#define MASK_MUL  0xfe00707f
++#define MATCH_MULH 0x2001033
++#define MASK_MULH  0xfe00707f
++#define MATCH_MULHSU 0x2002033
++#define MASK_MULHSU  0xfe00707f
++#define MATCH_MULHU 0x2003033
++#define MASK_MULHU  0xfe00707f
++#define MATCH_DIV 0x2004033
++#define MASK_DIV  0xfe00707f
++#define MATCH_DIVU 0x2005033
++#define MASK_DIVU  0xfe00707f
++#define MATCH_REM 0x2006033
++#define MASK_REM  0xfe00707f
++#define MATCH_REMU 0x2007033
++#define MASK_REMU  0xfe00707f
++#define MATCH_MULW 0x200003b
++#define MASK_MULW  0xfe00707f
++#define MATCH_DIVW 0x200403b
++#define MASK_DIVW  0xfe00707f
++#define MATCH_DIVUW 0x200503b
++#define MASK_DIVUW  0xfe00707f
++#define MATCH_REMW 0x200603b
++#define MASK_REMW  0xfe00707f
++#define MATCH_REMUW 0x200703b
++#define MASK_REMUW  0xfe00707f
++#define MATCH_AMOADD_W 0x202f
++#define MASK_AMOADD_W  0xf800707f
++#define MATCH_AMOXOR_W 0x2000202f
++#define MASK_AMOXOR_W  0xf800707f
++#define MATCH_AMOOR_W 0x4000202f
++#define MASK_AMOOR_W  0xf800707f
++#define MATCH_AMOAND_W 0x6000202f
++#define MASK_AMOAND_W  0xf800707f
++#define MATCH_AMOMIN_W 0x8000202f
++#define MASK_AMOMIN_W  0xf800707f
++#define MATCH_AMOMAX_W 0xa000202f
++#define MASK_AMOMAX_W  0xf800707f
++#define MATCH_AMOMINU_W 0xc000202f
++#define MASK_AMOMINU_W  0xf800707f
++#define MATCH_AMOMAXU_W 0xe000202f
++#define MASK_AMOMAXU_W  0xf800707f
++#define MATCH_AMOSWAP_W 0x800202f
++#define MASK_AMOSWAP_W  0xf800707f
++#define MATCH_LR_W 0x1000202f
++#define MASK_LR_W  0xf9f0707f
++#define MATCH_SC_W 0x1800202f
++#define MASK_SC_W  0xf800707f
++#define MATCH_AMOADD_D 0x302f
++#define MASK_AMOADD_D  0xf800707f
++#define MATCH_AMOXOR_D 0x2000302f
++#define MASK_AMOXOR_D  0xf800707f
++#define MATCH_AMOOR_D 0x4000302f
++#define MASK_AMOOR_D  0xf800707f
++#define MATCH_AMOAND_D 0x6000302f
++#define MASK_AMOAND_D  0xf800707f
++#define MATCH_AMOMIN_D 0x8000302f
++#define MASK_AMOMIN_D  0xf800707f
++#define MATCH_AMOMAX_D 0xa000302f
++#define MASK_AMOMAX_D  0xf800707f
++#define MATCH_AMOMINU_D 0xc000302f
++#define MASK_AMOMINU_D  0xf800707f
++#define MATCH_AMOMAXU_D 0xe000302f
++#define MASK_AMOMAXU_D  0xf800707f
++#define MATCH_AMOSWAP_D 0x800302f
++#define MASK_AMOSWAP_D  0xf800707f
++#define MATCH_LR_D 0x1000302f
++#define MASK_LR_D  0xf9f0707f
++#define MATCH_SC_D 0x1800302f
++#define MASK_SC_D  0xf800707f
++#define MATCH_ECALL 0x73
++#define MASK_ECALL  0xffffffff
++#define MATCH_EBREAK 0x100073
++#define MASK_EBREAK  0xffffffff
++#define MATCH_URET 0x200073
++#define MASK_URET  0xffffffff
++#define MATCH_SRET 0x10200073
++#define MASK_SRET  0xffffffff
++#define MATCH_HRET 0x20200073
++#define MASK_HRET  0xffffffff
++#define MATCH_MRET 0x30200073
++#define MASK_MRET  0xffffffff
++#define MATCH_DRET 0x7b200073
++#define MASK_DRET  0xffffffff
++#define MATCH_SFENCE_VM 0x10400073
++#define MASK_SFENCE_VM  0xfff07fff
++#define MATCH_WFI 0x10500073
++#define MASK_WFI  0xffffffff
++#define MATCH_CSRRW 0x1073
++#define MASK_CSRRW  0x707f
++#define MATCH_CSRRS 0x2073
++#define MASK_CSRRS  0x707f
++#define MATCH_CSRRC 0x3073
++#define MASK_CSRRC  0x707f
++#define MATCH_CSRRWI 0x5073
++#define MASK_CSRRWI  0x707f
++#define MATCH_CSRRSI 0x6073
++#define MASK_CSRRSI  0x707f
++#define MATCH_CSRRCI 0x7073
++#define MASK_CSRRCI  0x707f
++#define MATCH_FADD_S 0x53
++#define MASK_FADD_S  0xfe00007f
++#define MATCH_FSUB_S 0x8000053
++#define MASK_FSUB_S  0xfe00007f
++#define MATCH_FMUL_S 0x10000053
++#define MASK_FMUL_S  0xfe00007f
++#define MATCH_FDIV_S 0x18000053
++#define MASK_FDIV_S  0xfe00007f
++#define MATCH_FSGNJ_S 0x20000053
++#define MASK_FSGNJ_S  0xfe00707f
++#define MATCH_FSGNJN_S 0x20001053
++#define MASK_FSGNJN_S  0xfe00707f
++#define MATCH_FSGNJX_S 0x20002053
++#define MASK_FSGNJX_S  0xfe00707f
++#define MATCH_FMIN_S 0x28000053
++#define MASK_FMIN_S  0xfe00707f
++#define MATCH_FMAX_S 0x28001053
++#define MASK_FMAX_S  0xfe00707f
++#define MATCH_FSQRT_S 0x58000053
++#define MASK_FSQRT_S  0xfff0007f
++#define MATCH_FADD_D 0x2000053
++#define MASK_FADD_D  0xfe00007f
++#define MATCH_FSUB_D 0xa000053
++#define MASK_FSUB_D  0xfe00007f
++#define MATCH_FMUL_D 0x12000053
++#define MASK_FMUL_D  0xfe00007f
++#define MATCH_FDIV_D 0x1a000053
++#define MASK_FDIV_D  0xfe00007f
++#define MATCH_FSGNJ_D 0x22000053
++#define MASK_FSGNJ_D  0xfe00707f
++#define MATCH_FSGNJN_D 0x22001053
++#define MASK_FSGNJN_D  0xfe00707f
++#define MATCH_FSGNJX_D 0x22002053
++#define MASK_FSGNJX_D  0xfe00707f
++#define MATCH_FMIN_D 0x2a000053
++#define MASK_FMIN_D  0xfe00707f
++#define MATCH_FMAX_D 0x2a001053
++#define MASK_FMAX_D  0xfe00707f
++#define MATCH_FCVT_S_D 0x40100053
++#define MASK_FCVT_S_D  0xfff0007f
++#define MATCH_FCVT_D_S 0x42000053
++#define MASK_FCVT_D_S  0xfff0007f
++#define MATCH_FSQRT_D 0x5a000053
++#define MASK_FSQRT_D  0xfff0007f
++#define MATCH_FLE_S 0xa0000053
++#define MASK_FLE_S  0xfe00707f
++#define MATCH_FLT_S 0xa0001053
++#define MASK_FLT_S  0xfe00707f
++#define MATCH_FEQ_S 0xa0002053
++#define MASK_FEQ_S  0xfe00707f
++#define MATCH_FLE_D 0xa2000053
++#define MASK_FLE_D  0xfe00707f
++#define MATCH_FLT_D 0xa2001053
++#define MASK_FLT_D  0xfe00707f
++#define MATCH_FEQ_D 0xa2002053
++#define MASK_FEQ_D  0xfe00707f
++#define MATCH_FCVT_W_S 0xc0000053
++#define MASK_FCVT_W_S  0xfff0007f
++#define MATCH_FCVT_WU_S 0xc0100053
++#define MASK_FCVT_WU_S  0xfff0007f
++#define MATCH_FCVT_L_S 0xc0200053
++#define MASK_FCVT_L_S  0xfff0007f
++#define MATCH_FCVT_LU_S 0xc0300053
++#define MASK_FCVT_LU_S  0xfff0007f
++#define MATCH_FMV_X_S 0xe0000053
++#define MASK_FMV_X_S  0xfff0707f
++#define MATCH_FCLASS_S 0xe0001053
++#define MASK_FCLASS_S  0xfff0707f
++#define MATCH_FCVT_W_D 0xc2000053
++#define MASK_FCVT_W_D  0xfff0007f
++#define MATCH_FCVT_WU_D 0xc2100053
++#define MASK_FCVT_WU_D  0xfff0007f
++#define MATCH_FCVT_L_D 0xc2200053
++#define MASK_FCVT_L_D  0xfff0007f
++#define MATCH_FCVT_LU_D 0xc2300053
++#define MASK_FCVT_LU_D  0xfff0007f
++#define MATCH_FMV_X_D 0xe2000053
++#define MASK_FMV_X_D  0xfff0707f
++#define MATCH_FCLASS_D 0xe2001053
++#define MASK_FCLASS_D  0xfff0707f
++#define MATCH_FCVT_S_W 0xd0000053
++#define MASK_FCVT_S_W  0xfff0007f
++#define MATCH_FCVT_S_WU 0xd0100053
++#define MASK_FCVT_S_WU  0xfff0007f
++#define MATCH_FCVT_S_L 0xd0200053
++#define MASK_FCVT_S_L  0xfff0007f
++#define MATCH_FCVT_S_LU 0xd0300053
++#define MASK_FCVT_S_LU  0xfff0007f
++#define MATCH_FMV_S_X 0xf0000053
++#define MASK_FMV_S_X  0xfff0707f
++#define MATCH_FCVT_D_W 0xd2000053
++#define MASK_FCVT_D_W  0xfff0007f
++#define MATCH_FCVT_D_WU 0xd2100053
++#define MASK_FCVT_D_WU  0xfff0007f
++#define MATCH_FCVT_D_L 0xd2200053
++#define MASK_FCVT_D_L  0xfff0007f
++#define MATCH_FCVT_D_LU 0xd2300053
++#define MASK_FCVT_D_LU  0xfff0007f
++#define MATCH_FMV_D_X 0xf2000053
++#define MASK_FMV_D_X  0xfff0707f
++#define MATCH_FLW 0x2007
++#define MASK_FLW  0x707f
++#define MATCH_FLD 0x3007
++#define MASK_FLD  0x707f
++#define MATCH_FSW 0x2027
++#define MASK_FSW  0x707f
++#define MATCH_FSD 0x3027
++#define MASK_FSD  0x707f
++#define MATCH_FMADD_S 0x43
++#define MASK_FMADD_S  0x600007f
++#define MATCH_FMSUB_S 0x47
++#define MASK_FMSUB_S  0x600007f
++#define MATCH_FNMSUB_S 0x4b
++#define MASK_FNMSUB_S  0x600007f
++#define MATCH_FNMADD_S 0x4f
++#define MASK_FNMADD_S  0x600007f
++#define MATCH_FMADD_D 0x2000043
++#define MASK_FMADD_D  0x600007f
++#define MATCH_FMSUB_D 0x2000047
++#define MASK_FMSUB_D  0x600007f
++#define MATCH_FNMSUB_D 0x200004b
++#define MASK_FNMSUB_D  0x600007f
++#define MATCH_FNMADD_D 0x200004f
++#define MASK_FNMADD_D  0x600007f
++#define MATCH_C_ADDI4SPN 0x0
++#define MASK_C_ADDI4SPN  0xe003
++#define MATCH_C_FLD 0x2000
++#define MASK_C_FLD  0xe003
++#define MATCH_C_LW 0x4000
++#define MASK_C_LW  0xe003
++#define MATCH_C_FLW 0x6000
++#define MASK_C_FLW  0xe003
++#define MATCH_C_FSD 0xa000
++#define MASK_C_FSD  0xe003
++#define MATCH_C_SW 0xc000
++#define MASK_C_SW  0xe003
++#define MATCH_C_FSW 0xe000
++#define MASK_C_FSW  0xe003
++#define MATCH_C_ADDI 0x1
++#define MASK_C_ADDI  0xe003
++#define MATCH_C_JAL 0x2001
++#define MASK_C_JAL  0xe003
++#define MATCH_C_LI 0x4001
++#define MASK_C_LI  0xe003
++#define MATCH_C_LUI 0x6001
++#define MASK_C_LUI  0xe003
++#define MATCH_C_SRLI 0x8001
++#define MASK_C_SRLI  0xec03
++#define MATCH_C_SRAI 0x8401
++#define MASK_C_SRAI  0xec03
++#define MATCH_C_ANDI 0x8801
++#define MASK_C_ANDI  0xec03
++#define MATCH_C_SUB 0x8c01
++#define MASK_C_SUB  0xfc63
++#define MATCH_C_XOR 0x8c21
++#define MASK_C_XOR  0xfc63
++#define MATCH_C_OR 0x8c41
++#define MASK_C_OR  0xfc63
++#define MATCH_C_AND 0x8c61
++#define MASK_C_AND  0xfc63
++#define MATCH_C_SUBW 0x9c01
++#define MASK_C_SUBW  0xfc63
++#define MATCH_C_ADDW 0x9c21
++#define MASK_C_ADDW  0xfc63
++#define MATCH_C_J 0xa001
++#define MASK_C_J  0xe003
++#define MATCH_C_BEQZ 0xc001
++#define MASK_C_BEQZ  0xe003
++#define MATCH_C_BNEZ 0xe001
++#define MASK_C_BNEZ  0xe003
++#define MATCH_C_SLLI 0x2
++#define MASK_C_SLLI  0xe003
++#define MATCH_C_FLDSP 0x2002
++#define MASK_C_FLDSP  0xe003
++#define MATCH_C_LWSP 0x4002
++#define MASK_C_LWSP  0xe003
++#define MATCH_C_FLWSP 0x6002
++#define MASK_C_FLWSP  0xe003
++#define MATCH_C_MV 0x8002
++#define MASK_C_MV  0xf003
++#define MATCH_C_ADD 0x9002
++#define MASK_C_ADD  0xf003
++#define MATCH_C_FSDSP 0xa002
++#define MASK_C_FSDSP  0xe003
++#define MATCH_C_SWSP 0xc002
++#define MASK_C_SWSP  0xe003
++#define MATCH_C_FSWSP 0xe002
++#define MASK_C_FSWSP  0xe003
++#define MATCH_C_NOP 0x1
++#define MASK_C_NOP  0xffff
++#define MATCH_C_ADDI16SP 0x6101
++#define MASK_C_ADDI16SP  0xef83
++#define MATCH_C_JR 0x8002
++#define MASK_C_JR  0xf07f
++#define MATCH_C_JALR 0x9002
++#define MASK_C_JALR  0xf07f
++#define MATCH_C_EBREAK 0x9002
++#define MASK_C_EBREAK  0xffff
++#define MATCH_C_LD 0x6000
++#define MASK_C_LD  0xe003
++#define MATCH_C_SD 0xe000
++#define MASK_C_SD  0xe003
++#define MATCH_C_ADDIW 0x2001
++#define MASK_C_ADDIW  0xe003
++#define MATCH_C_LDSP 0x6002
++#define MASK_C_LDSP  0xe003
++#define MATCH_C_SDSP 0xe002
++#define MASK_C_SDSP  0xe003
++#define MATCH_CUSTOM0 0xb
++#define MASK_CUSTOM0  0x707f
++#define MATCH_CUSTOM0_RS1 0x200b
++#define MASK_CUSTOM0_RS1  0x707f
++#define MATCH_CUSTOM0_RS1_RS2 0x300b
++#define MASK_CUSTOM0_RS1_RS2  0x707f
++#define MATCH_CUSTOM0_RD 0x400b
++#define MASK_CUSTOM0_RD  0x707f
++#define MATCH_CUSTOM0_RD_RS1 0x600b
++#define MASK_CUSTOM0_RD_RS1  0x707f
++#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
++#define MASK_CUSTOM0_RD_RS1_RS2  0x707f
++#define MATCH_CUSTOM1 0x2b
++#define MASK_CUSTOM1  0x707f
++#define MATCH_CUSTOM1_RS1 0x202b
++#define MASK_CUSTOM1_RS1  0x707f
++#define MATCH_CUSTOM1_RS1_RS2 0x302b
++#define MASK_CUSTOM1_RS1_RS2  0x707f
++#define MATCH_CUSTOM1_RD 0x402b
++#define MASK_CUSTOM1_RD  0x707f
++#define MATCH_CUSTOM1_RD_RS1 0x602b
++#define MASK_CUSTOM1_RD_RS1  0x707f
++#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
++#define MASK_CUSTOM1_RD_RS1_RS2  0x707f
++#define MATCH_CUSTOM2 0x5b
++#define MASK_CUSTOM2  0x707f
++#define MATCH_CUSTOM2_RS1 0x205b
++#define MASK_CUSTOM2_RS1  0x707f
++#define MATCH_CUSTOM2_RS1_RS2 0x305b
++#define MASK_CUSTOM2_RS1_RS2  0x707f
++#define MATCH_CUSTOM2_RD 0x405b
++#define MASK_CUSTOM2_RD  0x707f
++#define MATCH_CUSTOM2_RD_RS1 0x605b
++#define MASK_CUSTOM2_RD_RS1  0x707f
++#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
++#define MASK_CUSTOM2_RD_RS1_RS2  0x707f
++#define MATCH_CUSTOM3 0x7b
++#define MASK_CUSTOM3  0x707f
++#define MATCH_CUSTOM3_RS1 0x207b
++#define MASK_CUSTOM3_RS1  0x707f
++#define MATCH_CUSTOM3_RS1_RS2 0x307b
++#define MASK_CUSTOM3_RS1_RS2  0x707f
++#define MATCH_CUSTOM3_RD 0x407b
++#define MASK_CUSTOM3_RD  0x707f
++#define MATCH_CUSTOM3_RD_RS1 0x607b
++#define MASK_CUSTOM3_RD_RS1  0x707f
++#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
++#define MASK_CUSTOM3_RD_RS1_RS2  0x707f
++#define CSR_FFLAGS 0x1
++#define CSR_FRM 0x2
++#define CSR_FCSR 0x3
++#define CSR_CYCLE 0xc00
++#define CSR_TIME 0xc01
++#define CSR_INSTRET 0xc02
++#define CSR_HPMCOUNTER3 0xc03
++#define CSR_HPMCOUNTER4 0xc04
++#define CSR_HPMCOUNTER5 0xc05
++#define CSR_HPMCOUNTER6 0xc06
++#define CSR_HPMCOUNTER7 0xc07
++#define CSR_HPMCOUNTER8 0xc08
++#define CSR_HPMCOUNTER9 0xc09
++#define CSR_HPMCOUNTER10 0xc0a
++#define CSR_HPMCOUNTER11 0xc0b
++#define CSR_HPMCOUNTER12 0xc0c
++#define CSR_HPMCOUNTER13 0xc0d
++#define CSR_HPMCOUNTER14 0xc0e
++#define CSR_HPMCOUNTER15 0xc0f
++#define CSR_HPMCOUNTER16 0xc10
++#define CSR_HPMCOUNTER17 0xc11
++#define CSR_HPMCOUNTER18 0xc12
++#define CSR_HPMCOUNTER19 0xc13
++#define CSR_HPMCOUNTER20 0xc14
++#define CSR_HPMCOUNTER21 0xc15
++#define CSR_HPMCOUNTER22 0xc16
++#define CSR_HPMCOUNTER23 0xc17
++#define CSR_HPMCOUNTER24 0xc18
++#define CSR_HPMCOUNTER25 0xc19
++#define CSR_HPMCOUNTER26 0xc1a
++#define CSR_HPMCOUNTER27 0xc1b
++#define CSR_HPMCOUNTER28 0xc1c
++#define CSR_HPMCOUNTER29 0xc1d
++#define CSR_HPMCOUNTER30 0xc1e
++#define CSR_HPMCOUNTER31 0xc1f
++#define CSR_SSTATUS 0x100
++#define CSR_SIE 0x104
++#define CSR_STVEC 0x105
++#define CSR_SSCRATCH 0x140
++#define CSR_SEPC 0x141
++#define CSR_SCAUSE 0x142
++#define CSR_SBADADDR 0x143
++#define CSR_SIP 0x144
++#define CSR_SPTBR 0x180
++#define CSR_MSTATUS 0x300
++#define CSR_MISA 0x301
++#define CSR_MEDELEG 0x302
++#define CSR_MIDELEG 0x303
++#define CSR_MIE 0x304
++#define CSR_MTVEC 0x305
++#define CSR_MSCRATCH 0x340
++#define CSR_MEPC 0x341
++#define CSR_MCAUSE 0x342
++#define CSR_MBADADDR 0x343
++#define CSR_MIP 0x344
++#define CSR_TSELECT 0x7a0
++#define CSR_TDATA1 0x7a1
++#define CSR_TDATA2 0x7a2
++#define CSR_TDATA3 0x7a3
++#define CSR_DCSR 0x7b0
++#define CSR_DPC 0x7b1
++#define CSR_DSCRATCH 0x7b2
++#define CSR_MCYCLE 0xb00
++#define CSR_MINSTRET 0xb02
++#define CSR_MHPMCOUNTER3 0xb03
++#define CSR_MHPMCOUNTER4 0xb04
++#define CSR_MHPMCOUNTER5 0xb05
++#define CSR_MHPMCOUNTER6 0xb06
++#define CSR_MHPMCOUNTER7 0xb07
++#define CSR_MHPMCOUNTER8 0xb08
++#define CSR_MHPMCOUNTER9 0xb09
++#define CSR_MHPMCOUNTER10 0xb0a
++#define CSR_MHPMCOUNTER11 0xb0b
++#define CSR_MHPMCOUNTER12 0xb0c
++#define CSR_MHPMCOUNTER13 0xb0d
++#define CSR_MHPMCOUNTER14 0xb0e
++#define CSR_MHPMCOUNTER15 0xb0f
++#define CSR_MHPMCOUNTER16 0xb10
++#define CSR_MHPMCOUNTER17 0xb11
++#define CSR_MHPMCOUNTER18 0xb12
++#define CSR_MHPMCOUNTER19 0xb13
++#define CSR_MHPMCOUNTER20 0xb14
++#define CSR_MHPMCOUNTER21 0xb15
++#define CSR_MHPMCOUNTER22 0xb16
++#define CSR_MHPMCOUNTER23 0xb17
++#define CSR_MHPMCOUNTER24 0xb18
++#define CSR_MHPMCOUNTER25 0xb19
++#define CSR_MHPMCOUNTER26 0xb1a
++#define CSR_MHPMCOUNTER27 0xb1b
++#define CSR_MHPMCOUNTER28 0xb1c
++#define CSR_MHPMCOUNTER29 0xb1d
++#define CSR_MHPMCOUNTER30 0xb1e
++#define CSR_MHPMCOUNTER31 0xb1f
++#define CSR_MUCOUNTEREN 0x320
++#define CSR_MSCOUNTEREN 0x321
++#define CSR_MHPMEVENT3 0x323
++#define CSR_MHPMEVENT4 0x324
++#define CSR_MHPMEVENT5 0x325
++#define CSR_MHPMEVENT6 0x326
++#define CSR_MHPMEVENT7 0x327
++#define CSR_MHPMEVENT8 0x328
++#define CSR_MHPMEVENT9 0x329
++#define CSR_MHPMEVENT10 0x32a
++#define CSR_MHPMEVENT11 0x32b
++#define CSR_MHPMEVENT12 0x32c
++#define CSR_MHPMEVENT13 0x32d
++#define CSR_MHPMEVENT14 0x32e
++#define CSR_MHPMEVENT15 0x32f
++#define CSR_MHPMEVENT16 0x330
++#define CSR_MHPMEVENT17 0x331
++#define CSR_MHPMEVENT18 0x332
++#define CSR_MHPMEVENT19 0x333
++#define CSR_MHPMEVENT20 0x334
++#define CSR_MHPMEVENT21 0x335
++#define CSR_MHPMEVENT22 0x336
++#define CSR_MHPMEVENT23 0x337
++#define CSR_MHPMEVENT24 0x338
++#define CSR_MHPMEVENT25 0x339
++#define CSR_MHPMEVENT26 0x33a
++#define CSR_MHPMEVENT27 0x33b
++#define CSR_MHPMEVENT28 0x33c
++#define CSR_MHPMEVENT29 0x33d
++#define CSR_MHPMEVENT30 0x33e
++#define CSR_MHPMEVENT31 0x33f
++#define CSR_MVENDORID 0xf11
++#define CSR_MARCHID 0xf12
++#define CSR_MIMPID 0xf13
++#define CSR_MHARTID 0xf14
++#define CSR_CYCLEH 0xc80
++#define CSR_TIMEH 0xc81
++#define CSR_INSTRETH 0xc82
++#define CSR_HPMCOUNTER3H 0xc83
++#define CSR_HPMCOUNTER4H 0xc84
++#define CSR_HPMCOUNTER5H 0xc85
++#define CSR_HPMCOUNTER6H 0xc86
++#define CSR_HPMCOUNTER7H 0xc87
++#define CSR_HPMCOUNTER8H 0xc88
++#define CSR_HPMCOUNTER9H 0xc89
++#define CSR_HPMCOUNTER10H 0xc8a
++#define CSR_HPMCOUNTER11H 0xc8b
++#define CSR_HPMCOUNTER12H 0xc8c
++#define CSR_HPMCOUNTER13H 0xc8d
++#define CSR_HPMCOUNTER14H 0xc8e
++#define CSR_HPMCOUNTER15H 0xc8f
++#define CSR_HPMCOUNTER16H 0xc90
++#define CSR_HPMCOUNTER17H 0xc91
++#define CSR_HPMCOUNTER18H 0xc92
++#define CSR_HPMCOUNTER19H 0xc93
++#define CSR_HPMCOUNTER20H 0xc94
++#define CSR_HPMCOUNTER21H 0xc95
++#define CSR_HPMCOUNTER22H 0xc96
++#define CSR_HPMCOUNTER23H 0xc97
++#define CSR_HPMCOUNTER24H 0xc98
++#define CSR_HPMCOUNTER25H 0xc99
++#define CSR_HPMCOUNTER26H 0xc9a
++#define CSR_HPMCOUNTER27H 0xc9b
++#define CSR_HPMCOUNTER28H 0xc9c
++#define CSR_HPMCOUNTER29H 0xc9d
++#define CSR_HPMCOUNTER30H 0xc9e
++#define CSR_HPMCOUNTER31H 0xc9f
++#define CSR_MCYCLEH 0xb80
++#define CSR_MINSTRETH 0xb82
++#define CSR_MHPMCOUNTER3H 0xb83
++#define CSR_MHPMCOUNTER4H 0xb84
++#define CSR_MHPMCOUNTER5H 0xb85
++#define CSR_MHPMCOUNTER6H 0xb86
++#define CSR_MHPMCOUNTER7H 0xb87
++#define CSR_MHPMCOUNTER8H 0xb88
++#define CSR_MHPMCOUNTER9H 0xb89
++#define CSR_MHPMCOUNTER10H 0xb8a
++#define CSR_MHPMCOUNTER11H 0xb8b
++#define CSR_MHPMCOUNTER12H 0xb8c
++#define CSR_MHPMCOUNTER13H 0xb8d
++#define CSR_MHPMCOUNTER14H 0xb8e
++#define CSR_MHPMCOUNTER15H 0xb8f
++#define CSR_MHPMCOUNTER16H 0xb90
++#define CSR_MHPMCOUNTER17H 0xb91
++#define CSR_MHPMCOUNTER18H 0xb92
++#define CSR_MHPMCOUNTER19H 0xb93
++#define CSR_MHPMCOUNTER20H 0xb94
++#define CSR_MHPMCOUNTER21H 0xb95
++#define CSR_MHPMCOUNTER22H 0xb96
++#define CSR_MHPMCOUNTER23H 0xb97
++#define CSR_MHPMCOUNTER24H 0xb98
++#define CSR_MHPMCOUNTER25H 0xb99
++#define CSR_MHPMCOUNTER26H 0xb9a
++#define CSR_MHPMCOUNTER27H 0xb9b
++#define CSR_MHPMCOUNTER28H 0xb9c
++#define CSR_MHPMCOUNTER29H 0xb9d
++#define CSR_MHPMCOUNTER30H 0xb9e
++#define CSR_MHPMCOUNTER31H 0xb9f
++#define CAUSE_MISALIGNED_FETCH 0x0
++#define CAUSE_FAULT_FETCH 0x1
++#define CAUSE_ILLEGAL_INSTRUCTION 0x2
++#define CAUSE_BREAKPOINT 0x3
++#define CAUSE_MISALIGNED_LOAD 0x4
++#define CAUSE_FAULT_LOAD 0x5
++#define CAUSE_MISALIGNED_STORE 0x6
++#define CAUSE_FAULT_STORE 0x7
++#define CAUSE_USER_ECALL 0x8
++#define CAUSE_SUPERVISOR_ECALL 0x9
++#define CAUSE_HYPERVISOR_ECALL 0xa
++#define CAUSE_MACHINE_ECALL 0xb
++#endif
++#ifdef DECLARE_INSN
++DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
++DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
++DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
++DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
++DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
++DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
++DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
++DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
++DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
++DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
++DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
++DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
++DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
++DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
++DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
++DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
++DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
++DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
++DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
++DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
++DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
++DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
++DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
++DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
++DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
++DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
++DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
++DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
++DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
++DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
++DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
++DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
++DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
++DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
++DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
++DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
++DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
++DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
++DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
++DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
++DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
++DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
++DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
++DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
++DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
++DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
++DECLARE_INSN(or, MATCH_OR, MASK_OR)
++DECLARE_INSN(and, MATCH_AND, MASK_AND)
++DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
++DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
++DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
++DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
++DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
++DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
++DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
++DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
++DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
++DECLARE_INSN(lb, MATCH_LB, MASK_LB)
++DECLARE_INSN(lh, MATCH_LH, MASK_LH)
++DECLARE_INSN(lw, MATCH_LW, MASK_LW)
++DECLARE_INSN(ld, MATCH_LD, MASK_LD)
++DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
++DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
++DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
++DECLARE_INSN(sb, MATCH_SB, MASK_SB)
++DECLARE_INSN(sh, MATCH_SH, MASK_SH)
++DECLARE_INSN(sw, MATCH_SW, MASK_SW)
++DECLARE_INSN(sd, MATCH_SD, MASK_SD)
++DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
++DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
++DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
++DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
++DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
++DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
++DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
++DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
++DECLARE_INSN(rem, MATCH_REM, MASK_REM)
++DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
++DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
++DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
++DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
++DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
++DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
++DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
++DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
++DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
++DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
++DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
++DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
++DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
++DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
++DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
++DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
++DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
++DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
++DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
++DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
++DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
++DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
++DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
++DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
++DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
++DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
++DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
++DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
++DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
++DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
++DECLARE_INSN(uret, MATCH_URET, MASK_URET)
++DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
++DECLARE_INSN(hret, MATCH_HRET, MASK_HRET)
++DECLARE_INSN(mret, MATCH_MRET, MASK_MRET)
++DECLARE_INSN(dret, MATCH_DRET, MASK_DRET)
++DECLARE_INSN(sfence_vm, MATCH_SFENCE_VM, MASK_SFENCE_VM)
++DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
++DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
++DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
++DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
++DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
++DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
++DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
++DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
++DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
++DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
++DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
++DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
++DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
++DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
++DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
++DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
++DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
++DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
++DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
++DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
++DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
++DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
++DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
++DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
++DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
++DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
++DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
++DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
++DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
++DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
++DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
++DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
++DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
++DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
++DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
++DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
++DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
++DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
++DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
++DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
++DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
++DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
++DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
++DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
++DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
++DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
++DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
++DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
++DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
++DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
++DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
++DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
++DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
++DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
++DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
++DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
++DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
++DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
++DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
++DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
++DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
++DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
++DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
++DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
++DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
++DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
++DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
++DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
++DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
++DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN)
++DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
++DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
++DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW)
++DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD)
++DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
++DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW)
++DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
++DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
++DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
++DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI)
++DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
++DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
++DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI)
++DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
++DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR)
++DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR)
++DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND)
++DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW)
++DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
++DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
++DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
++DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
++DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
++DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP)
++DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
++DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP)
++DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV)
++DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
++DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP)
++DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
++DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP)
++DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP)
++DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP)
++DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
++DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
++DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK)
++DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
++DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
++DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
++DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
++DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
++DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
++DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
++DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
++DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
++DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
++DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
++DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
++DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
++DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
++DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
++DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
++DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
++DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
++DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
++DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
++DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
++DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
++DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
++DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
++DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
++DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
++DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
++DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
++DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
++#endif
++#ifdef DECLARE_CSR
++DECLARE_CSR(fflags, CSR_FFLAGS)
++DECLARE_CSR(frm, CSR_FRM)
++DECLARE_CSR(fcsr, CSR_FCSR)
++DECLARE_CSR(cycle, CSR_CYCLE)
++DECLARE_CSR(time, CSR_TIME)
++DECLARE_CSR(instret, CSR_INSTRET)
++DECLARE_CSR(hpmcounter3, CSR_HPMCOUNTER3)
++DECLARE_CSR(hpmcounter4, CSR_HPMCOUNTER4)
++DECLARE_CSR(hpmcounter5, CSR_HPMCOUNTER5)
++DECLARE_CSR(hpmcounter6, CSR_HPMCOUNTER6)
++DECLARE_CSR(hpmcounter7, CSR_HPMCOUNTER7)
++DECLARE_CSR(hpmcounter8, CSR_HPMCOUNTER8)
++DECLARE_CSR(hpmcounter9, CSR_HPMCOUNTER9)
++DECLARE_CSR(hpmcounter10, CSR_HPMCOUNTER10)
++DECLARE_CSR(hpmcounter11, CSR_HPMCOUNTER11)
++DECLARE_CSR(hpmcounter12, CSR_HPMCOUNTER12)
++DECLARE_CSR(hpmcounter13, CSR_HPMCOUNTER13)
++DECLARE_CSR(hpmcounter14, CSR_HPMCOUNTER14)
++DECLARE_CSR(hpmcounter15, CSR_HPMCOUNTER15)
++DECLARE_CSR(hpmcounter16, CSR_HPMCOUNTER16)
++DECLARE_CSR(hpmcounter17, CSR_HPMCOUNTER17)
++DECLARE_CSR(hpmcounter18, CSR_HPMCOUNTER18)
++DECLARE_CSR(hpmcounter19, CSR_HPMCOUNTER19)
++DECLARE_CSR(hpmcounter20, CSR_HPMCOUNTER20)
++DECLARE_CSR(hpmcounter21, CSR_HPMCOUNTER21)
++DECLARE_CSR(hpmcounter22, CSR_HPMCOUNTER22)
++DECLARE_CSR(hpmcounter23, CSR_HPMCOUNTER23)
++DECLARE_CSR(hpmcounter24, CSR_HPMCOUNTER24)
++DECLARE_CSR(hpmcounter25, CSR_HPMCOUNTER25)
++DECLARE_CSR(hpmcounter26, CSR_HPMCOUNTER26)
++DECLARE_CSR(hpmcounter27, CSR_HPMCOUNTER27)
++DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28)
++DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29)
++DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30)
++DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31)
++DECLARE_CSR(sstatus, CSR_SSTATUS)
++DECLARE_CSR(sie, CSR_SIE)
++DECLARE_CSR(stvec, CSR_STVEC)
++DECLARE_CSR(sscratch, CSR_SSCRATCH)
++DECLARE_CSR(sepc, CSR_SEPC)
++DECLARE_CSR(scause, CSR_SCAUSE)
++DECLARE_CSR(sbadaddr, CSR_SBADADDR)
++DECLARE_CSR(sip, CSR_SIP)
++DECLARE_CSR(sptbr, CSR_SPTBR)
++DECLARE_CSR(mstatus, CSR_MSTATUS)
++DECLARE_CSR(misa, CSR_MISA)
++DECLARE_CSR(medeleg, CSR_MEDELEG)
++DECLARE_CSR(mideleg, CSR_MIDELEG)
++DECLARE_CSR(mie, CSR_MIE)
++DECLARE_CSR(mtvec, CSR_MTVEC)
++DECLARE_CSR(mscratch, CSR_MSCRATCH)
++DECLARE_CSR(mepc, CSR_MEPC)
++DECLARE_CSR(mcause, CSR_MCAUSE)
++DECLARE_CSR(mbadaddr, CSR_MBADADDR)
++DECLARE_CSR(mip, CSR_MIP)
++DECLARE_CSR(tselect, CSR_TSELECT)
++DECLARE_CSR(tdata1, CSR_TDATA1)
++DECLARE_CSR(tdata2, CSR_TDATA2)
++DECLARE_CSR(tdata3, CSR_TDATA3)
++DECLARE_CSR(dcsr, CSR_DCSR)
++DECLARE_CSR(dpc, CSR_DPC)
++DECLARE_CSR(dscratch, CSR_DSCRATCH)
++DECLARE_CSR(mcycle, CSR_MCYCLE)
++DECLARE_CSR(minstret, CSR_MINSTRET)
++DECLARE_CSR(mhpmcounter3, CSR_MHPMCOUNTER3)
++DECLARE_CSR(mhpmcounter4, CSR_MHPMCOUNTER4)
++DECLARE_CSR(mhpmcounter5, CSR_MHPMCOUNTER5)
++DECLARE_CSR(mhpmcounter6, CSR_MHPMCOUNTER6)
++DECLARE_CSR(mhpmcounter7, CSR_MHPMCOUNTER7)
++DECLARE_CSR(mhpmcounter8, CSR_MHPMCOUNTER8)
++DECLARE_CSR(mhpmcounter9, CSR_MHPMCOUNTER9)
++DECLARE_CSR(mhpmcounter10, CSR_MHPMCOUNTER10)
++DECLARE_CSR(mhpmcounter11, CSR_MHPMCOUNTER11)
++DECLARE_CSR(mhpmcounter12, CSR_MHPMCOUNTER12)
++DECLARE_CSR(mhpmcounter13, CSR_MHPMCOUNTER13)
++DECLARE_CSR(mhpmcounter14, CSR_MHPMCOUNTER14)
++DECLARE_CSR(mhpmcounter15, CSR_MHPMCOUNTER15)
++DECLARE_CSR(mhpmcounter16, CSR_MHPMCOUNTER16)
++DECLARE_CSR(mhpmcounter17, CSR_MHPMCOUNTER17)
++DECLARE_CSR(mhpmcounter18, CSR_MHPMCOUNTER18)
++DECLARE_CSR(mhpmcounter19, CSR_MHPMCOUNTER19)
++DECLARE_CSR(mhpmcounter20, CSR_MHPMCOUNTER20)
++DECLARE_CSR(mhpmcounter21, CSR_MHPMCOUNTER21)
++DECLARE_CSR(mhpmcounter22, CSR_MHPMCOUNTER22)
++DECLARE_CSR(mhpmcounter23, CSR_MHPMCOUNTER23)
++DECLARE_CSR(mhpmcounter24, CSR_MHPMCOUNTER24)
++DECLARE_CSR(mhpmcounter25, CSR_MHPMCOUNTER25)
++DECLARE_CSR(mhpmcounter26, CSR_MHPMCOUNTER26)
++DECLARE_CSR(mhpmcounter27, CSR_MHPMCOUNTER27)
++DECLARE_CSR(mhpmcounter28, CSR_MHPMCOUNTER28)
++DECLARE_CSR(mhpmcounter29, CSR_MHPMCOUNTER29)
++DECLARE_CSR(mhpmcounter30, CSR_MHPMCOUNTER30)
++DECLARE_CSR(mhpmcounter31, CSR_MHPMCOUNTER31)
++DECLARE_CSR(mucounteren, CSR_MUCOUNTEREN)
++DECLARE_CSR(mscounteren, CSR_MSCOUNTEREN)
++DECLARE_CSR(mhpmevent3, CSR_MHPMEVENT3)
++DECLARE_CSR(mhpmevent4, CSR_MHPMEVENT4)
++DECLARE_CSR(mhpmevent5, CSR_MHPMEVENT5)
++DECLARE_CSR(mhpmevent6, CSR_MHPMEVENT6)
++DECLARE_CSR(mhpmevent7, CSR_MHPMEVENT7)
++DECLARE_CSR(mhpmevent8, CSR_MHPMEVENT8)
++DECLARE_CSR(mhpmevent9, CSR_MHPMEVENT9)
++DECLARE_CSR(mhpmevent10, CSR_MHPMEVENT10)
++DECLARE_CSR(mhpmevent11, CSR_MHPMEVENT11)
++DECLARE_CSR(mhpmevent12, CSR_MHPMEVENT12)
++DECLARE_CSR(mhpmevent13, CSR_MHPMEVENT13)
++DECLARE_CSR(mhpmevent14, CSR_MHPMEVENT14)
++DECLARE_CSR(mhpmevent15, CSR_MHPMEVENT15)
++DECLARE_CSR(mhpmevent16, CSR_MHPMEVENT16)
++DECLARE_CSR(mhpmevent17, CSR_MHPMEVENT17)
++DECLARE_CSR(mhpmevent18, CSR_MHPMEVENT18)
++DECLARE_CSR(mhpmevent19, CSR_MHPMEVENT19)
++DECLARE_CSR(mhpmevent20, CSR_MHPMEVENT20)
++DECLARE_CSR(mhpmevent21, CSR_MHPMEVENT21)
++DECLARE_CSR(mhpmevent22, CSR_MHPMEVENT22)
++DECLARE_CSR(mhpmevent23, CSR_MHPMEVENT23)
++DECLARE_CSR(mhpmevent24, CSR_MHPMEVENT24)
++DECLARE_CSR(mhpmevent25, CSR_MHPMEVENT25)
++DECLARE_CSR(mhpmevent26, CSR_MHPMEVENT26)
++DECLARE_CSR(mhpmevent27, CSR_MHPMEVENT27)
++DECLARE_CSR(mhpmevent28, CSR_MHPMEVENT28)
++DECLARE_CSR(mhpmevent29, CSR_MHPMEVENT29)
++DECLARE_CSR(mhpmevent30, CSR_MHPMEVENT30)
++DECLARE_CSR(mhpmevent31, CSR_MHPMEVENT31)
++DECLARE_CSR(mvendorid, CSR_MVENDORID)
++DECLARE_CSR(marchid, CSR_MARCHID)
++DECLARE_CSR(mimpid, CSR_MIMPID)
++DECLARE_CSR(mhartid, CSR_MHARTID)
++DECLARE_CSR(cycleh, CSR_CYCLEH)
++DECLARE_CSR(timeh, CSR_TIMEH)
++DECLARE_CSR(instreth, CSR_INSTRETH)
++DECLARE_CSR(hpmcounter3h, CSR_HPMCOUNTER3H)
++DECLARE_CSR(hpmcounter4h, CSR_HPMCOUNTER4H)
++DECLARE_CSR(hpmcounter5h, CSR_HPMCOUNTER5H)
++DECLARE_CSR(hpmcounter6h, CSR_HPMCOUNTER6H)
++DECLARE_CSR(hpmcounter7h, CSR_HPMCOUNTER7H)
++DECLARE_CSR(hpmcounter8h, CSR_HPMCOUNTER8H)
++DECLARE_CSR(hpmcounter9h, CSR_HPMCOUNTER9H)
++DECLARE_CSR(hpmcounter10h, CSR_HPMCOUNTER10H)
++DECLARE_CSR(hpmcounter11h, CSR_HPMCOUNTER11H)
++DECLARE_CSR(hpmcounter12h, CSR_HPMCOUNTER12H)
++DECLARE_CSR(hpmcounter13h, CSR_HPMCOUNTER13H)
++DECLARE_CSR(hpmcounter14h, CSR_HPMCOUNTER14H)
++DECLARE_CSR(hpmcounter15h, CSR_HPMCOUNTER15H)
++DECLARE_CSR(hpmcounter16h, CSR_HPMCOUNTER16H)
++DECLARE_CSR(hpmcounter17h, CSR_HPMCOUNTER17H)
++DECLARE_CSR(hpmcounter18h, CSR_HPMCOUNTER18H)
++DECLARE_CSR(hpmcounter19h, CSR_HPMCOUNTER19H)
++DECLARE_CSR(hpmcounter20h, CSR_HPMCOUNTER20H)
++DECLARE_CSR(hpmcounter21h, CSR_HPMCOUNTER21H)
++DECLARE_CSR(hpmcounter22h, CSR_HPMCOUNTER22H)
++DECLARE_CSR(hpmcounter23h, CSR_HPMCOUNTER23H)
++DECLARE_CSR(hpmcounter24h, CSR_HPMCOUNTER24H)
++DECLARE_CSR(hpmcounter25h, CSR_HPMCOUNTER25H)
++DECLARE_CSR(hpmcounter26h, CSR_HPMCOUNTER26H)
++DECLARE_CSR(hpmcounter27h, CSR_HPMCOUNTER27H)
++DECLARE_CSR(hpmcounter28h, CSR_HPMCOUNTER28H)
++DECLARE_CSR(hpmcounter29h, CSR_HPMCOUNTER29H)
++DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H)
++DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H)
++DECLARE_CSR(mcycleh, CSR_MCYCLEH)
++DECLARE_CSR(minstreth, CSR_MINSTRETH)
++DECLARE_CSR(mhpmcounter3h, CSR_MHPMCOUNTER3H)
++DECLARE_CSR(mhpmcounter4h, CSR_MHPMCOUNTER4H)
++DECLARE_CSR(mhpmcounter5h, CSR_MHPMCOUNTER5H)
++DECLARE_CSR(mhpmcounter6h, CSR_MHPMCOUNTER6H)
++DECLARE_CSR(mhpmcounter7h, CSR_MHPMCOUNTER7H)
++DECLARE_CSR(mhpmcounter8h, CSR_MHPMCOUNTER8H)
++DECLARE_CSR(mhpmcounter9h, CSR_MHPMCOUNTER9H)
++DECLARE_CSR(mhpmcounter10h, CSR_MHPMCOUNTER10H)
++DECLARE_CSR(mhpmcounter11h, CSR_MHPMCOUNTER11H)
++DECLARE_CSR(mhpmcounter12h, CSR_MHPMCOUNTER12H)
++DECLARE_CSR(mhpmcounter13h, CSR_MHPMCOUNTER13H)
++DECLARE_CSR(mhpmcounter14h, CSR_MHPMCOUNTER14H)
++DECLARE_CSR(mhpmcounter15h, CSR_MHPMCOUNTER15H)
++DECLARE_CSR(mhpmcounter16h, CSR_MHPMCOUNTER16H)
++DECLARE_CSR(mhpmcounter17h, CSR_MHPMCOUNTER17H)
++DECLARE_CSR(mhpmcounter18h, CSR_MHPMCOUNTER18H)
++DECLARE_CSR(mhpmcounter19h, CSR_MHPMCOUNTER19H)
++DECLARE_CSR(mhpmcounter20h, CSR_MHPMCOUNTER20H)
++DECLARE_CSR(mhpmcounter21h, CSR_MHPMCOUNTER21H)
++DECLARE_CSR(mhpmcounter22h, CSR_MHPMCOUNTER22H)
++DECLARE_CSR(mhpmcounter23h, CSR_MHPMCOUNTER23H)
++DECLARE_CSR(mhpmcounter24h, CSR_MHPMCOUNTER24H)
++DECLARE_CSR(mhpmcounter25h, CSR_MHPMCOUNTER25H)
++DECLARE_CSR(mhpmcounter26h, CSR_MHPMCOUNTER26H)
++DECLARE_CSR(mhpmcounter27h, CSR_MHPMCOUNTER27H)
++DECLARE_CSR(mhpmcounter28h, CSR_MHPMCOUNTER28H)
++DECLARE_CSR(mhpmcounter29h, CSR_MHPMCOUNTER29H)
++DECLARE_CSR(mhpmcounter30h, CSR_MHPMCOUNTER30H)
++DECLARE_CSR(mhpmcounter31h, CSR_MHPMCOUNTER31H)
++#endif
++#ifdef DECLARE_CAUSE
++DECLARE_CAUSE("misaligned fetch", CAUSE_MISALIGNED_FETCH)
++DECLARE_CAUSE("fault fetch", CAUSE_FAULT_FETCH)
++DECLARE_CAUSE("illegal instruction", CAUSE_ILLEGAL_INSTRUCTION)
++DECLARE_CAUSE("breakpoint", CAUSE_BREAKPOINT)
++DECLARE_CAUSE("misaligned load", CAUSE_MISALIGNED_LOAD)
++DECLARE_CAUSE("fault load", CAUSE_FAULT_LOAD)
++DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE)
++DECLARE_CAUSE("fault store", CAUSE_FAULT_STORE)
++DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL)
++DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL)
++DECLARE_CAUSE("hypervisor_ecall", CAUSE_HYPERVISOR_ECALL)
++DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL)
++#endif
+diff --git original-binutils/include/opcode/riscv.h binutils-2_27/include/opcode/riscv.h
+new file mode 100644
+index 0000000..4c2c3c3
+--- /dev/null
++++ binutils-2_27/include/opcode/riscv.h
+@@ -0,0 +1,344 @@
++/* riscv.h.  RISC-V opcode list for GDB, the GNU debugger.
++   Copyright 2011
++   Free Software Foundation, Inc.
++   Contributed by Andrew Waterman
++
++This file is part of GDB, GAS, and the GNU binutils.
++
++GDB, GAS, and the GNU binutils are free software; you can redistribute
++them and/or modify them under the terms of the GNU General Public
++License as published by the Free Software Foundation; either version
++1, or (at your option) any later version.
++
++GDB, GAS, and the GNU binutils are distributed in the hope that they
++will be useful, but WITHOUT ANY WARRANTY; without even the implied
++warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
++the GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this file; see the file COPYING.  If not, write to the Free
++Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
++
++#ifndef _RISCV_H_
++#define _RISCV_H_
++
++#include "riscv-opc.h"
++#include <stdlib.h>
++#include <stdint.h>
++
++typedef uint64_t insn_t;
++
++static inline unsigned int riscv_insn_length (insn_t insn)
++{
++  if ((insn & 0x3) != 0x3) /* RVC.  */
++    return 2;
++  if ((insn & 0x1f) != 0x1f) /* Base ISA and extensions in 32-bit space.  */
++    return 4;
++  if ((insn & 0x3f) == 0x1f) /* 48-bit extensions.  */
++    return 6;
++  if ((insn & 0x7f) == 0x3f) /* 64-bit extensions.  */
++    return 8;
++  /* Longer instructions not supported at the moment.  */
++  return 2;
++}
++
++static const char * const riscv_rm[8] = {
++  "rne", "rtz", "rdn", "rup", "rmm", 0, 0, "dyn"
++};
++static const char * const riscv_pred_succ[16] = {
++  0,   "w",  "r",  "rw",  "o",  "ow",  "or",  "orw",
++  "i", "iw", "ir", "irw", "io", "iow", "ior", "iorw",
++};
++
++#define RVC_JUMP_BITS 11
++#define RVC_JUMP_REACH ((1ULL << RVC_JUMP_BITS) * RISCV_JUMP_ALIGN)
++
++#define RVC_BRANCH_BITS 8
++#define RVC_BRANCH_REACH ((1ULL << RVC_BRANCH_BITS) * RISCV_BRANCH_ALIGN)
++
++#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
++#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
++
++#define EXTRACT_ITYPE_IMM(x) \
++  (RV_X(x, 20, 12) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_STYPE_IMM(x) \
++  (RV_X(x, 7, 5) | (RV_X(x, 25, 7) << 5) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_SBTYPE_IMM(x) \
++  ((RV_X(x, 8, 4) << 1) | (RV_X(x, 25, 6) << 5) | (RV_X(x, 7, 1) << 11) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_UTYPE_IMM(x) \
++  ((RV_X(x, 12, 20) << 12) | (RV_IMM_SIGN(x) << 32))
++#define EXTRACT_UJTYPE_IMM(x) \
++  ((RV_X(x, 21, 10) << 1) | (RV_X(x, 20, 1) << 11) | (RV_X(x, 12, 8) << 12) | (RV_IMM_SIGN(x) << 20))
++#define EXTRACT_RVC_IMM(x) \
++  (RV_X(x, 2, 5) | (-RV_X(x, 12, 1) << 5))
++#define EXTRACT_RVC_LUI_IMM(x) \
++  (EXTRACT_RVC_IMM (x) << RISCV_IMM_BITS)
++#define EXTRACT_RVC_SIMM3(x) \
++  (RV_X(x, 10, 2) | (-RV_X(x, 12, 1) << 2))
++#define EXTRACT_RVC_ADDI4SPN_IMM(x) \
++  ((RV_X(x, 6, 1) << 2) | (RV_X(x, 5, 1) << 3) | (RV_X(x, 11, 2) << 4) | (RV_X(x, 7, 4) << 6))
++#define EXTRACT_RVC_ADDI16SP_IMM(x) \
++  ((RV_X(x, 6, 1) << 4) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 5, 1) << 6) | (RV_X(x, 3, 2) << 7) | (-RV_X(x, 12, 1) << 9))
++#define EXTRACT_RVC_LW_IMM(x) \
++  ((RV_X(x, 6, 1) << 2) | (RV_X(x, 10, 3) << 3) | (RV_X(x, 5, 1) << 6))
++#define EXTRACT_RVC_LD_IMM(x) \
++  ((RV_X(x, 10, 3) << 3) | (RV_X(x, 5, 2) << 6))
++#define EXTRACT_RVC_LWSP_IMM(x) \
++  ((RV_X(x, 4, 3) << 2) | (RV_X(x, 12, 1) << 5) | (RV_X(x, 2, 2) << 6))
++#define EXTRACT_RVC_LDSP_IMM(x) \
++  ((RV_X(x, 5, 2) << 3) | (RV_X(x, 12, 1) << 5) | (RV_X(x, 2, 3) << 6))
++#define EXTRACT_RVC_SWSP_IMM(x) \
++  ((RV_X(x, 9, 4) << 2) | (RV_X(x, 7, 2) << 6))
++#define EXTRACT_RVC_SDSP_IMM(x) \
++  ((RV_X(x, 10, 3) << 3) | (RV_X(x, 7, 3) << 6))
++#define EXTRACT_RVC_B_IMM(x) \
++  ((RV_X(x, 3, 2) << 1) | (RV_X(x, 10, 2) << 3) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 5, 2) << 6) | (-RV_X(x, 12, 1) << 8))
++#define EXTRACT_RVC_J_IMM(x) \
++  ((RV_X(x, 3, 3) << 1) | (RV_X(x, 11, 1) << 4) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 6, 1) << 7) | (RV_X(x, 9, 2) << 8) | (RV_X(x, 8, 1) << 10) | (-RV_X(x, 12, 1) << 11))
++
++#define ENCODE_ITYPE_IMM(x) \
++  (RV_X(x, 0, 12) << 20)
++#define ENCODE_STYPE_IMM(x) \
++  ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25))
++#define ENCODE_SBTYPE_IMM(x) \
++  ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
++#define ENCODE_UTYPE_IMM(x) \
++  (RV_X(x, 12, 20) << 12)
++#define ENCODE_UJTYPE_IMM(x) \
++  ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
++#define ENCODE_RVC_IMM(x) \
++  ((RV_X(x, 0, 5) << 2) | (RV_X(x, 5, 1) << 12))
++#define ENCODE_RVC_LUI_IMM(x) \
++  ENCODE_RVC_IMM ((x) >> RISCV_IMM_BITS)
++#define ENCODE_RVC_SIMM3(x) \
++  (RV_X(x, 0, 3) << 10)
++#define ENCODE_RVC_ADDI4SPN_IMM(x) \
++  ((RV_X(x, 2, 1) << 6) | (RV_X(x, 3, 1) << 5) | (RV_X(x, 4, 2) << 11) | (RV_X(x, 6, 4) << 7))
++#define ENCODE_RVC_ADDI16SP_IMM(x) \
++  ((RV_X(x, 4, 1) << 6) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 1) << 5) | (RV_X(x, 7, 2) << 3) | (RV_X(x, 9, 1) << 12))
++#define ENCODE_RVC_LW_IMM(x) \
++  ((RV_X(x, 2, 1) << 6) | (RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 1) << 5))
++#define ENCODE_RVC_LD_IMM(x) \
++  ((RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 2) << 5))
++#define ENCODE_RVC_LWSP_IMM(x) \
++  ((RV_X(x, 2, 3) << 4) | (RV_X(x, 5, 1) << 12) | (RV_X(x, 6, 2) << 2))
++#define ENCODE_RVC_LDSP_IMM(x) \
++  ((RV_X(x, 3, 2) << 5) | (RV_X(x, 5, 1) << 12) | (RV_X(x, 6, 3) << 2))
++#define ENCODE_RVC_SWSP_IMM(x) \
++  ((RV_X(x, 2, 4) << 9) | (RV_X(x, 6, 2) << 7))
++#define ENCODE_RVC_SDSP_IMM(x) \
++  ((RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 3) << 7))
++#define ENCODE_RVC_B_IMM(x) \
++  ((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12))
++#define ENCODE_RVC_J_IMM(x) \
++  ((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | (RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12))
++
++#define VALID_ITYPE_IMM(x) (EXTRACT_ITYPE_IMM(ENCODE_ITYPE_IMM(x)) == (x))
++#define VALID_STYPE_IMM(x) (EXTRACT_STYPE_IMM(ENCODE_STYPE_IMM(x)) == (x))
++#define VALID_SBTYPE_IMM(x) (EXTRACT_SBTYPE_IMM(ENCODE_SBTYPE_IMM(x)) == (x))
++#define VALID_UTYPE_IMM(x) (EXTRACT_UTYPE_IMM(ENCODE_UTYPE_IMM(x)) == (x))
++#define VALID_UJTYPE_IMM(x) (EXTRACT_UJTYPE_IMM(ENCODE_UJTYPE_IMM(x)) == (x))
++#define VALID_RVC_IMM(x) (EXTRACT_RVC_IMM(ENCODE_RVC_IMM(x)) == (x))
++#define VALID_RVC_LUI_IMM(x) (EXTRACT_RVC_LUI_IMM(ENCODE_RVC_LUI_IMM(x)) == (x))
++#define VALID_RVC_SIMM3(x) (EXTRACT_RVC_SIMM3(ENCODE_RVC_SIMM3(x)) == (x))
++#define VALID_RVC_ADDI4SPN_IMM(x) (EXTRACT_RVC_ADDI4SPN_IMM(ENCODE_RVC_ADDI4SPN_IMM(x)) == (x))
++#define VALID_RVC_ADDI16SP_IMM(x) (EXTRACT_RVC_ADDI16SP_IMM(ENCODE_RVC_ADDI16SP_IMM(x)) == (x))
++#define VALID_RVC_LW_IMM(x) (EXTRACT_RVC_LW_IMM(ENCODE_RVC_LW_IMM(x)) == (x))
++#define VALID_RVC_LD_IMM(x) (EXTRACT_RVC_LD_IMM(ENCODE_RVC_LD_IMM(x)) == (x))
++#define VALID_RVC_LWSP_IMM(x) (EXTRACT_RVC_LWSP_IMM(ENCODE_RVC_LWSP_IMM(x)) == (x))
++#define VALID_RVC_LDSP_IMM(x) (EXTRACT_RVC_LDSP_IMM(ENCODE_RVC_LDSP_IMM(x)) == (x))
++#define VALID_RVC_SWSP_IMM(x) (EXTRACT_RVC_SWSP_IMM(ENCODE_RVC_SWSP_IMM(x)) == (x))
++#define VALID_RVC_SDSP_IMM(x) (EXTRACT_RVC_SDSP_IMM(ENCODE_RVC_SDSP_IMM(x)) == (x))
++#define VALID_RVC_B_IMM(x) (EXTRACT_RVC_B_IMM(ENCODE_RVC_B_IMM(x)) == (x))
++#define VALID_RVC_J_IMM(x) (EXTRACT_RVC_J_IMM(ENCODE_RVC_J_IMM(x)) == (x))
++
++#define RISCV_RTYPE(insn, rd, rs1, rs2) \
++  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2))
++#define RISCV_ITYPE(insn, rd, rs1, imm) \
++  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ENCODE_ITYPE_IMM(imm))
++#define RISCV_STYPE(insn, rs1, rs2, imm) \
++  ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_STYPE_IMM(imm))
++#define RISCV_SBTYPE(insn, rs1, rs2, target) \
++  ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_SBTYPE_IMM(target))
++#define RISCV_UTYPE(insn, rd, bigimm) \
++  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UTYPE_IMM(bigimm))
++#define RISCV_UJTYPE(insn, rd, target) \
++  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UJTYPE_IMM(target))
++
++#define RISCV_NOP RISCV_ITYPE(ADDI, 0, 0, 0)
++#define RVC_NOP MATCH_C_ADDI
++
++#define RISCV_CONST_HIGH_PART(VALUE) \
++  (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1))
++#define RISCV_CONST_LOW_PART(VALUE) ((VALUE) - RISCV_CONST_HIGH_PART (VALUE))
++#define RISCV_PCREL_HIGH_PART(VALUE, PC) RISCV_CONST_HIGH_PART((VALUE) - (PC))
++#define RISCV_PCREL_LOW_PART(VALUE, PC) RISCV_CONST_LOW_PART((VALUE) - (PC))
++
++#define RISCV_JUMP_BITS RISCV_BIGIMM_BITS
++#define RISCV_JUMP_ALIGN_BITS 1
++#define RISCV_JUMP_ALIGN (1 << RISCV_JUMP_ALIGN_BITS)
++#define RISCV_JUMP_REACH ((1ULL << RISCV_JUMP_BITS) * RISCV_JUMP_ALIGN)
++
++#define RISCV_IMM_BITS 12
++#define RISCV_BIGIMM_BITS (32 - RISCV_IMM_BITS)
++#define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS)
++#define RISCV_BIGIMM_REACH (1LL << RISCV_BIGIMM_BITS)
++#define RISCV_RVC_IMM_REACH (1LL << 6)
++#define RISCV_BRANCH_BITS RISCV_IMM_BITS
++#define RISCV_BRANCH_ALIGN_BITS RISCV_JUMP_ALIGN_BITS
++#define RISCV_BRANCH_ALIGN (1 << RISCV_BRANCH_ALIGN_BITS)
++#define RISCV_BRANCH_REACH (RISCV_IMM_REACH * RISCV_BRANCH_ALIGN)
++
++/* RV fields.  */
++
++#define OP_MASK_OP		0x7f
++#define OP_SH_OP		0
++#define OP_MASK_RS2		0x1f
++#define OP_SH_RS2		20
++#define OP_MASK_RS1		0x1f
++#define OP_SH_RS1		15
++#define OP_MASK_RS3		0x1f
++#define OP_SH_RS3		27
++#define OP_MASK_RD		0x1f
++#define OP_SH_RD		7
++#define OP_MASK_SHAMT		0x3f
++#define OP_SH_SHAMT		20
++#define OP_MASK_SHAMTW		0x1f
++#define OP_SH_SHAMTW		20
++#define OP_MASK_RM		0x7
++#define OP_SH_RM		12
++#define OP_MASK_PRED		0xf
++#define OP_SH_PRED		24
++#define OP_MASK_SUCC		0xf
++#define OP_SH_SUCC		20
++#define OP_MASK_AQ		0x1
++#define OP_SH_AQ		26
++#define OP_MASK_RL		0x1
++#define OP_SH_RL		25
++
++#define OP_MASK_CUSTOM_IMM	0x7f
++#define OP_SH_CUSTOM_IMM	25
++#define OP_MASK_CSR		0xfff
++#define OP_SH_CSR		20
++
++/* RVC fields.  */
++
++#define OP_MASK_CRS2 0x1f
++#define OP_SH_CRS2 2
++#define OP_MASK_CRS1S 0x7
++#define OP_SH_CRS1S 7
++#define OP_MASK_CRS2S 0x7
++#define OP_SH_CRS2S 2
++
++/* ABI names for selected x-registers.  */
++
++#define X_RA 1
++#define X_SP 2
++#define X_GP 3
++#define X_TP 4
++#define X_T0 5
++#define X_T1 6
++#define X_T2 7
++#define X_T3 28
++
++#define NGPR 32
++#define NFPR 32
++
++/* Replace bits MASK << SHIFT of STRUCT with the equivalent bits in
++   VALUE << SHIFT.  VALUE is evaluated exactly once.  */
++#define INSERT_BITS(STRUCT, VALUE, MASK, SHIFT) \
++  (STRUCT) = (((STRUCT) & ~((insn_t)(MASK) << (SHIFT))) \
++	      | ((insn_t)((VALUE) & (MASK)) << (SHIFT)))
++
++/* Extract bits MASK << SHIFT from STRUCT and shift them right
++   SHIFT places.  */
++#define EXTRACT_BITS(STRUCT, MASK, SHIFT) \
++  (((STRUCT) >> (SHIFT)) & (MASK))
++
++/* Extract the operand given by FIELD from integer INSN.  */
++#define EXTRACT_OPERAND(FIELD, INSN) \
++  EXTRACT_BITS ((INSN), OP_MASK_##FIELD, OP_SH_##FIELD)
++
++/* This structure holds information for a particular instruction.  */
++
++struct riscv_opcode
++{
++  /* The name of the instruction.  */
++  const char *name;
++  /* The ISA subset name (I, M, A, F, D, Xextension).  */
++  const char *subset;
++  /* A string describing the arguments for this instruction.  */
++  const char *args;
++  /* The basic opcode for the instruction.  When assembling, this
++     opcode is modified by the arguments to produce the actual opcode
++     that is used.  If pinfo is INSN_MACRO, then this is 0.  */
++  insn_t match;
++  /* If pinfo is not INSN_MACRO, then this is a bit mask for the
++     relevant portions of the opcode when disassembling.  If the
++     actual opcode anded with the match field equals the opcode field,
++     then we have found the correct instruction.  If pinfo is
++     INSN_MACRO, then this field is the macro identifier.  */
++  insn_t mask;
++  /* A function to determine if a word corresponds to this instruction.
++     Usually, this computes ((word & mask) == match).  */
++  int (*match_func) (const struct riscv_opcode *op, insn_t word);
++  /* For a macro, this is INSN_MACRO.  Otherwise, it is a collection
++     of bits describing the instruction, notably any relevant hazard
++     information.  */
++  unsigned long pinfo;
++};
++
++/* Instruction is a simple alias (e.g. "mv" for "addi").  */
++#define	INSN_ALIAS		0x00000001
++/* Instruction is actually a macro.  It should be ignored by the
++   disassembler, and requires special treatment by the assembler.  */
++#define INSN_MACRO		0xffffffff
++
++/* This is a list of macro expanded instructions.
++
++   _I appended means immediate
++   _A appended means address
++   _AB appended means address with base register
++   _D appended means 64 bit floating point constant
++   _S appended means 32 bit floating point constant.  */
++
++enum
++{
++  M_LA,
++  M_LLA,
++  M_LA_TLS_GD,
++  M_LA_TLS_IE,
++  M_LB,
++  M_LBU,
++  M_LH,
++  M_LHU,
++  M_LW,
++  M_LWU,
++  M_LD,
++  M_SB,
++  M_SH,
++  M_SW,
++  M_SD,
++  M_FLW,
++  M_FLD,
++  M_FSW,
++  M_FSD,
++  M_CALL,
++  M_J,
++  M_LI,
++  M_NUM_MACROS
++};
++
++
++extern const char * const riscv_gpr_names_numeric[NGPR];
++extern const char * const riscv_gpr_names_abi[NGPR];
++extern const char * const riscv_fpr_names_numeric[NFPR];
++extern const char * const riscv_fpr_names_abi[NFPR];
++
++extern const struct riscv_opcode riscv_builtin_opcodes[];
++extern const int bfd_riscv_num_builtin_opcodes;
++extern struct riscv_opcode *riscv_opcodes;
++extern int bfd_riscv_num_opcodes;
++#define NUMOPCODES bfd_riscv_num_opcodes
++
++#endif /* _RISCV_H_ */
+diff --git original-binutils/ld/Makefile.am binutils-2_27/ld/Makefile.am
+index 0598923..802c237 100644
+--- original-binutils/ld/Makefile.am
++++ binutils-2_27/ld/Makefile.am
+@@ -274,6 +274,7 @@ ALL_EMULATION_SOURCES = \
+ 	eelf32ppcsim.c \
+ 	eelf32ppcvxworks.c \
+ 	eelf32ppcwindiss.c \
++	eelf32lriscv.c \
+ 	eelf32rl78.c \
+ 	eelf32rx.c \
+ 	eelf32tilegx.c \
+@@ -490,6 +491,7 @@ ALL_64_EMULATION_SOURCES = \
+ 	eelf64btsmip_fbsd.c \
+ 	eelf64hppa.c \
+ 	eelf64lppc.c \
++	eelf64lriscv.c \
+ 	eelf64ltsmip.c \
+ 	eelf64ltsmip_fbsd.c \
+ 	eelf64mmix.c \
+@@ -1161,6 +1163,11 @@ eelf32lppcsim.c: $(srcdir)/emulparams/elf32lppcsim.sh \
+   $(srcdir)/emultempl/ppc32elf.em ldemul-list.h \
+   $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
+ 
++eelf32lriscv.c: $(srcdir)/emulparams/elf32lriscv.sh \
++  $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
++  $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
++  ${GEN_DEPENDS}
++
+ eelf32lsmip.c: $(srcdir)/emulparams/elf32lsmip.sh \
+   $(srcdir)/emulparams/elf32lmip.sh $(srcdir)/emulparams/elf32bmip.sh \
+   $(ELF_DEPS) $(srcdir)/emultempl/mipself.em $(srcdir)/scripttempl/elf.sc \
+@@ -1955,6 +1962,12 @@ eelf64lppc.c: $(srcdir)/emulparams/elf64lppc.sh \
+   ldemul-list.h \
+   $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
+ 
++eelf64lriscv.c: $(srcdir)/emulparams/elf64lriscv.sh \
++  $(srcdir)/emulparams/elf64lriscv-defs.sh \
++  $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
++  $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
++  ${GEN_DEPENDS}
++
+ eelf64ltsmip.c: $(srcdir)/emulparams/elf64ltsmip.sh \
+   $(srcdir)/emulparams/elf64btsmip.sh $(srcdir)/emulparams/elf64bmip-defs.sh \
+   $(srcdir)/emulparams/elf32bmipn32-defs.sh $(ELF_DEPS) \
+diff --git original-binutils/ld/Makefile.in binutils-2_27/ld/Makefile.in
+index 7c78198..69a495e 100644
+--- original-binutils/ld/Makefile.in
++++ binutils-2_27/ld/Makefile.in
+@@ -622,6 +622,7 @@ ALL_EMULATION_SOURCES = \
+ 	eelf32lppclinux.c \
+ 	eelf32lppcnto.c \
+ 	eelf32lppcsim.c \
++	eelf32lriscv.c \
+ 	eelf32m32c.c \
+ 	eelf32mb_linux.c \
+ 	eelf32mbel_linux.c \
+@@ -857,6 +858,7 @@ ALL_64_EMULATION_SOURCES = \
+ 	eelf64btsmip_fbsd.c \
+ 	eelf64hppa.c \
+ 	eelf64lppc.c \
++	eelf64lriscv.c \
+ 	eelf64ltsmip.c \
+ 	eelf64ltsmip_fbsd.c \
+ 	eelf64mmix.c \
+@@ -1275,6 +1277,7 @@ distclean-compile:
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lppclinux.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lppcnto.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lppcsim.Po at am__quote@
++ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lriscv.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lr5900.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lr5900n32.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf32lsmip.Po at am__quote@
+@@ -1330,6 +1333,7 @@ distclean-compile:
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64btsmip_fbsd.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64hppa.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64lppc.Po at am__quote@
++ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64lriscv.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64ltsmip.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64ltsmip_fbsd.Po at am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/eelf64mmix.Po at am__quote@
+@@ -2749,6 +2753,11 @@ eelf32lppcsim.c: $(srcdir)/emulparams/elf32lppcsim.sh \
+   $(srcdir)/emultempl/ppc32elf.em ldemul-list.h \
+   $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
+ 
++eelf32lriscv.c: $(srcdir)/emulparams/elf32lriscv.sh \
++  $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
++  $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
++  ${GEN_DEPENDS}
++
+ eelf32lsmip.c: $(srcdir)/emulparams/elf32lsmip.sh \
+   $(srcdir)/emulparams/elf32lmip.sh $(srcdir)/emulparams/elf32bmip.sh \
+   $(ELF_DEPS) $(srcdir)/emultempl/mipself.em $(srcdir)/scripttempl/elf.sc \
+@@ -3543,6 +3552,12 @@ eelf64lppc.c: $(srcdir)/emulparams/elf64lppc.sh \
+   ldemul-list.h \
+   $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
+ 
++eelf64lriscv.c: $(srcdir)/emulparams/elf64lriscv.sh \
++  $(srcdir)/emulparams/elf64lriscv-defs.sh \
++  $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
++  $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
++  ${GEN_DEPENDS}
++
+ eelf64ltsmip.c: $(srcdir)/emulparams/elf64ltsmip.sh \
+   $(srcdir)/emulparams/elf64btsmip.sh $(srcdir)/emulparams/elf64bmip-defs.sh \
+   $(srcdir)/emulparams/elf32bmipn32-defs.sh $(ELF_DEPS) \
+diff --git original-binutils/ld/configure.tgt binutils-2_27/ld/configure.tgt
+index a3db909..48e2009 100644
+--- original-binutils/ld/configure.tgt
++++ binutils-2_27/ld/configure.tgt
+@@ -644,6 +644,12 @@ powerpc-*-aix*)		targ_emul=aixppc ;;
+ powerpc-*-beos*)	targ_emul=aixppc ;;
+ powerpc-*-windiss*)	targ_emul=elf32ppcwindiss ;;
+ powerpc-*-lynxos*)	targ_emul=ppclynx ;;
++riscv32*-*-*)		targ_emul=elf32lriscv
++			targ_extra_emuls="elf64lriscv"
++			targ_extra_libpath=$targ_extra_emuls ;;
++riscv64*-*-*)		targ_emul=elf64lriscv
++			targ_extra_emuls="elf32lriscv"
++			targ_extra_libpath=$targ_extra_emuls ;;
+ rs6000-*-aix[5-9]*)	targ_emul=aix5rs6 ;;
+ rs6000-*-aix*)		targ_emul=aixrs6
+ 			;;
+diff --git original-binutils/ld/emulparams/elf32lriscv-defs.sh binutils-2_27/ld/emulparams/elf32lriscv-defs.sh
+new file mode 100644
+index 0000000..0e4b723
+--- /dev/null
++++ binutils-2_27/ld/emulparams/elf32lriscv-defs.sh
+@@ -0,0 +1,42 @@
++# This is an ELF platform.
++SCRIPT_NAME=elf
++ARCH=riscv
++NO_REL_RELOCS=yes
++
++TEMPLATE_NAME=elf32
++EXTRA_EM_FILE=riscvelf
++
++ELFSIZE=32
++
++if test `echo "$host" | sed -e s/64//` = `echo "$target" | sed -e s/64//`; then
++  case " $EMULATION_LIBPATH " in
++    *" ${EMULATION_NAME} "*)
++      NATIVE=yes
++      ;;
++  esac
++fi
++
++GENERATE_SHLIB_SCRIPT=yes
++GENERATE_PIE_SCRIPT=yes
++
++TEXT_START_ADDR=0x10000
++MAXPAGESIZE="CONSTANT (MAXPAGESIZE)"
++COMMONPAGESIZE="CONSTANT (COMMONPAGESIZE)"
++
++SDATA_START_SYMBOLS="_gp = . + 0x800;
++    *(.srodata.cst16) *(.srodata.cst8) *(.srodata.cst4) *(.srodata.cst2) *(.srodata .srodata.*)"
++
++# Place the data section before text section.  This enables more compact
++# global variable access for RVC code via linker relaxation.
++INITIAL_READONLY_SECTIONS="
++  .data           : { *(.data) *(.data.*) *(.gnu.linkonce.d.*) }
++  .rodata         : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r.*) }
++  .srodata        : { ${SDATA_START_SYMBOLS} }
++  .sdata          : { *(.sdata .sdata.* .gnu.linkonce.s.*) }
++  .sbss           : { *(.dynsbss) *(.sbss .sbss.* .gnu.linkonce.sb.*) }
++  .bss            : { *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(COMMON) }
++  . = ALIGN(${SEGMENT_SIZE}) + (. & (${MAXPAGESIZE} - 1));"
++INITIAL_READONLY_SECTIONS=".interp         : { *(.interp) } ${CREATE_PIE-${INITIAL_READONLY_SECTIONS}}"
++INITIAL_READONLY_SECTIONS="${RELOCATING+${CREATE_SHLIB-${INITIAL_READONLY_SECTIONS}}}"
++
++SDATA_START_SYMBOLS="${CREATE_PIE+${SDATA_START_SYMBOLS}}"
+diff --git original-binutils/ld/emulparams/elf32lriscv.sh binutils-2_27/ld/emulparams/elf32lriscv.sh
+new file mode 100644
+index 0000000..aac08e7
+--- /dev/null
++++ binutils-2_27/ld/emulparams/elf32lriscv.sh
+@@ -0,0 +1,2 @@
++. ${srcdir}/emulparams/elf32lriscv-defs.sh
++OUTPUT_FORMAT="elf32-littleriscv"
+diff --git original-binutils/ld/emulparams/elf64lriscv-defs.sh binutils-2_27/ld/emulparams/elf64lriscv-defs.sh
+new file mode 100644
+index 0000000..6308714
+--- /dev/null
++++ binutils-2_27/ld/emulparams/elf64lriscv-defs.sh
+@@ -0,0 +1,13 @@
++. ${srcdir}/emulparams/elf32lriscv-defs.sh
++ELFSIZE=64
++
++# Look for 64 bit target libraries in /lib64, /usr/lib64 etc., first
++# on Linux.
++case "$target" in
++  riscv64*-linux*)
++    case "$EMULATION_NAME" in
++      *64*)
++	LIBPATH_SUFFIX=64 ;;
++    esac
++    ;;
++esac
+diff --git original-binutils/ld/emulparams/elf64lriscv.sh binutils-2_27/ld/emulparams/elf64lriscv.sh
+new file mode 100644
+index 0000000..3a6a652
+--- /dev/null
++++ binutils-2_27/ld/emulparams/elf64lriscv.sh
+@@ -0,0 +1,2 @@
++. ${srcdir}/emulparams/elf64lriscv-defs.sh
++OUTPUT_FORMAT="elf64-littleriscv"
+diff --git original-binutils/ld/emultempl/riscvelf.em binutils-2_27/ld/emultempl/riscvelf.em
+new file mode 100644
+index 0000000..de35cc1
+--- /dev/null
++++ binutils-2_27/ld/emultempl/riscvelf.em
+@@ -0,0 +1,68 @@
++# This shell script emits a C file. -*- C -*-
++#   Copyright 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
++#
++# This file is part of the GNU Binutils.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++# MA 02110-1301, USA.
++
++fragment <<EOF
++
++#include "ldmain.h"
++#include "ldctor.h"
++#include "elf/riscv.h"
++#include "elfxx-riscv.h"
++
++static void
++riscv_elf_before_allocation (void)
++{
++  gld${EMULATION_NAME}_before_allocation ();
++
++  if (link_info.discard == discard_sec_merge)
++    link_info.discard = discard_l;
++
++  /* We always need at least some relaxation to handle code alignment.  */
++  if (RELAXATION_DISABLED_BY_USER)
++    TARGET_ENABLE_RELAXATION;
++  else
++    ENABLE_RELAXATION;
++
++  link_info.relax_pass = 2;
++}
++
++static void
++gld${EMULATION_NAME}_after_allocation (void)
++{
++  int need_layout = 0;
++
++  /* Don't attempt to discard unused .eh_frame sections until the final link,
++     as we can't reliably tell if they're used until after relaxation.  */
++  if (!bfd_link_relocatable (&link_info))
++    {
++      need_layout = bfd_elf_discard_info (link_info.output_bfd, &link_info);
++      if (need_layout < 0)
++	{
++	  einfo ("%X%P: .eh_frame/.stab edit: %E\n");
++	  return;
++	}
++    }
++
++  gld${EMULATION_NAME}_map_segments (need_layout);
++}
++
++EOF
++
++LDEMUL_BEFORE_ALLOCATION=riscv_elf_before_allocation
++LDEMUL_AFTER_ALLOCATION=gld${EMULATION_NAME}_after_allocation
+diff --git original-binutils/opcodes/configure binutils-2_27/opcodes/configure
+index 5a4da06..8a38c39 100755
+--- original-binutils/opcodes/configure
++++ binutils-2_27/opcodes/configure
+@@ -12640,6 +12640,7 @@ if test x${all_targets} = xfalse ; then
+ 	bfd_powerpc_arch)	ta="$ta ppc-dis.lo ppc-opc.lo" ;;
+ 	bfd_powerpc_64_arch)	ta="$ta ppc-dis.lo ppc-opc.lo" ;;
+ 	bfd_pyramid_arch)	;;
++	bfd_riscv_arch)		ta="$ta riscv-dis.lo riscv-opc.lo" ;;
+ 	bfd_romp_arch)		;;
+ 	bfd_rs6000_arch)	ta="$ta ppc-dis.lo ppc-opc.lo" ;;
+ 	bfd_rl78_arch)		ta="$ta rl78-dis.lo rl78-decode.lo";;
+diff --git original-binutils/opcodes/disassemble.c binutils-2_27/opcodes/disassemble.c
+index b818d8b..dce6f71 100644
+--- original-binutils/opcodes/disassemble.c
++++ binutils-2_27/opcodes/disassemble.c
+@@ -375,6 +375,11 @@ disassembler (bfd *abfd)
+ 	disassemble = print_insn_little_powerpc;
+       break;
+ #endif
++#ifdef ARCH_riscv
++    case bfd_arch_riscv:
++      disassemble = print_insn_riscv;
++      break;
++#endif
+ #ifdef ARCH_rs6000
+     case bfd_arch_rs6000:
+       if (bfd_get_mach (abfd) == bfd_mach_ppc_620)
+@@ -556,6 +561,9 @@ disassembler_usage (FILE *stream ATTRIBUTE_UNUSED)
+ #ifdef ARCH_powerpc
+   print_ppc_disassembler_options (stream);
+ #endif
++#ifdef ARCH_riscv
++  print_riscv_disassembler_options (stream);
++#endif
+ #ifdef ARCH_i386
+   print_i386_disassembler_options (stream);
+ #endif
+diff --git original-binutils/opcodes/riscv-dis.c binutils-2_27/opcodes/riscv-dis.c
+new file mode 100644
+index 0000000..535a265
+--- /dev/null
++++ binutils-2_27/opcodes/riscv-dis.c
+@@ -0,0 +1,521 @@
++/* RISC-V disassembler
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target.
++
++   This file is part of the GNU opcodes library.
++
++   This library is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3, or (at your option)
++   any later version.
++
++   It is distributed in the hope that it will be useful, but WITHOUT
++   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++   License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++#include "sysdep.h"
++#include "dis-asm.h"
++#include "libiberty.h"
++#include "opcode/riscv.h"
++#include "opintl.h"
++#include "elf-bfd.h"
++#include "elf/riscv.h"
++
++#include <stdint.h>
++#include <ctype.h>
++
++struct riscv_private_data
++{
++  bfd_vma gp;
++  bfd_vma print_addr;
++  bfd_vma hi_addr[OP_MASK_RD + 1];
++};
++
++static const char * const *riscv_gpr_names;
++static const char * const *riscv_fpr_names;
++
++/* Other options */
++static int no_aliases;	/* If set disassemble as most general inst.  */
++
++static void
++set_default_riscv_dis_options (void)
++{
++  riscv_gpr_names = riscv_gpr_names_abi;
++  riscv_fpr_names = riscv_fpr_names_abi;
++  no_aliases = 0;
++}
++
++static void
++parse_riscv_dis_option (const char *option)
++{
++  if (CONST_STRNEQ (option, "no-aliases"))
++    no_aliases = 1;
++  else if (CONST_STRNEQ (option, "numeric"))
++    {
++      riscv_gpr_names = riscv_gpr_names_numeric;
++      riscv_fpr_names = riscv_fpr_names_numeric;
++    }
++  else
++    {
++      /* Invalid option.  */
++      fprintf (stderr, _("Unrecognized disassembler option: %s\n"), option);
++    }
++}
++
++static void
++parse_riscv_dis_options (const char *opts_in)
++{
++  char *opts = xstrdup (opts_in), *opt = opts, *opt_end = opts;
++
++  set_default_riscv_dis_options ();
++
++  for ( ; opt_end != NULL; opt = opt_end + 1)
++    {
++      if ((opt_end = strchr (opt, ',')) != NULL)
++	*opt_end = 0;
++      parse_riscv_dis_option (opt);
++    }
++
++  free (opts);
++}
++
++/* Print one argument from an array.  */
++
++static void
++arg_print (struct disassemble_info *info, unsigned long val,
++	   const char* const* array, size_t size)
++{
++  const char *s = val >= size || array[val] == NULL ? "unknown" : array[val];
++  (*info->fprintf_func) (info->stream, "%s", s);
++}
++
++static void
++maybe_print_address (struct riscv_private_data *pd, int base_reg, int offset)
++{
++  if (pd->hi_addr[base_reg] != (bfd_vma)-1)
++    {
++      pd->print_addr = pd->hi_addr[base_reg] + offset;
++      pd->hi_addr[base_reg] = -1;
++    }
++  else if (base_reg == X_GP && pd->gp != (bfd_vma)-1)
++    pd->print_addr = pd->gp + offset;
++  else if (base_reg == X_TP || base_reg == 0)
++    pd->print_addr = offset;
++}
++
++/* Print insn arguments for 32/64-bit code.  */
++
++static void
++print_insn_args (const char *d, insn_t l, bfd_vma pc, disassemble_info *info)
++{
++  struct riscv_private_data *pd = info->private_data;
++  int rs1 = (l >> OP_SH_RS1) & OP_MASK_RS1;
++  int rd = (l >> OP_SH_RD) & OP_MASK_RD;
++  fprintf_ftype print = info->fprintf_func;
++
++  if (*d != '\0')
++    print (info->stream, "\t");
++
++  for (; *d != '\0'; d++)
++    {
++      switch (*d)
++	{
++	/* Xcustom */
++	case '^':
++	  switch (*++d)
++	    {
++	    case 'd':
++	      print (info->stream, "%d", rd);
++	      break;
++	    case 's':
++	      print (info->stream, "%d", rs1);
++	      break;
++	    case 't':
++	      print (info->stream, "%d", (int) EXTRACT_OPERAND (RS2, l));
++	      break;
++	    case 'j':
++	      print (info->stream, "%d", (int) EXTRACT_OPERAND (CUSTOM_IMM, l));
++	      break;
++	    }
++	  break;
++
++	case 'C': /* RVC */
++	  switch (*++d)
++	    {
++	    case 's': /* RS1 x8-x15 */
++	    case 'w': /* RS1 x8-x15 */
++	      print (info->stream, "%s",
++		     riscv_gpr_names[EXTRACT_OPERAND (CRS1S, l) + 8]);
++	      break;
++	    case 't': /* RS2 x8-x15 */
++	    case 'x': /* RS2 x8-x15 */
++	      print (info->stream, "%s",
++		     riscv_gpr_names[EXTRACT_OPERAND (CRS2S, l) + 8]);
++	      break;
++	    case 'U': /* RS1, constrained to equal RD */
++	      print (info->stream, "%s", riscv_gpr_names[rd]);
++	      break;
++	    case 'c': /* RS1, constrained to equal sp */
++	      print (info->stream, "%s", riscv_gpr_names[X_SP]);
++	      break;
++	    case 'V': /* RS2 */
++	      print (info->stream, "%s",
++		     riscv_gpr_names[EXTRACT_OPERAND (CRS2, l)]);
++	      break;
++	    case 'i':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_SIMM3 (l));
++	      break;
++	    case 'j':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_IMM (l));
++	      break;
++	    case 'k':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_LW_IMM (l));
++	      break;
++	    case 'l':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_LD_IMM (l));
++	      break;
++	    case 'm':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_LWSP_IMM (l));
++	      break;
++	    case 'n':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_LDSP_IMM (l));
++	      break;
++	    case 'K':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_ADDI4SPN_IMM (l));
++	      break;
++	    case 'L':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_ADDI16SP_IMM (l));
++	      break;
++	    case 'M':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_SWSP_IMM (l));
++	      break;
++	    case 'N':
++	      print (info->stream, "%d", (int)EXTRACT_RVC_SDSP_IMM (l));
++	      break;
++	    case 'p':
++	      info->target = EXTRACT_RVC_B_IMM (l) + pc;
++	      (*info->print_address_func) (info->target, info);
++	      break;
++	    case 'a':
++	      info->target = EXTRACT_RVC_J_IMM (l) + pc;
++	      (*info->print_address_func) (info->target, info);
++	      break;
++	    case 'u':
++	      print (info->stream, "0x%x",
++		     (int) (EXTRACT_RVC_IMM (l) & (RISCV_BIGIMM_REACH-1)));
++	      break;
++	    case '>':
++	      print (info->stream, "0x%x", (int) EXTRACT_RVC_IMM (l) & 0x3f);
++	      break;
++	    case '<':
++	      print (info->stream, "0x%x", (int) EXTRACT_RVC_IMM (l) & 0x1f);
++	      break;
++	    case 'T': /* floating-point RS2 */
++	      print (info->stream, "%s",
++		     riscv_fpr_names[EXTRACT_OPERAND (CRS2, l)]);
++	      break;
++	    case 'D': /* floating-point RS2 x8-x15 */
++	      print (info->stream, "%s",
++		     riscv_fpr_names[EXTRACT_OPERAND (CRS2S, l) + 8]);
++	      break;
++	    }
++	  break;
++
++	case ',':
++	case '(':
++	case ')':
++	case '[':
++	case ']':
++	  print (info->stream, "%c", *d);
++	  break;
++
++	case '0':
++	  /* Only print constant 0 if it is the last argument */
++	  if (!d[1])
++	    print (info->stream, "0");
++	  break;
++
++	case 'b':
++	case 's':
++	  print (info->stream, "%s", riscv_gpr_names[rs1]);
++	  break;
++
++	case 't':
++	  print (info->stream, "%s",
++		 riscv_gpr_names[EXTRACT_OPERAND (RS2, l)]);
++	  break;
++
++	case 'u':
++	  print (info->stream, "0x%x",
++		 (unsigned) EXTRACT_UTYPE_IMM (l) >> RISCV_IMM_BITS);
++	  break;
++
++	case 'm':
++	  arg_print (info, EXTRACT_OPERAND (RM, l),
++		     riscv_rm, ARRAY_SIZE (riscv_rm));
++	  break;
++
++	case 'P':
++	  arg_print (info, EXTRACT_OPERAND (PRED, l),
++		     riscv_pred_succ, ARRAY_SIZE (riscv_pred_succ));
++	  break;
++
++	case 'Q':
++	  arg_print (info, EXTRACT_OPERAND (SUCC, l),
++		     riscv_pred_succ, ARRAY_SIZE (riscv_pred_succ));
++	  break;
++
++	case 'o':
++	  maybe_print_address (pd, rs1, EXTRACT_ITYPE_IMM (l));
++	case 'j':
++	  if (((l & MASK_ADDI) == MATCH_ADDI && rs1 != 0)
++	      || (l & MASK_JALR) == MATCH_JALR)
++	    maybe_print_address (pd, rs1, EXTRACT_ITYPE_IMM (l));
++	  print (info->stream, "%d", (int) EXTRACT_ITYPE_IMM (l));
++	  break;
++
++	case 'q':
++	  maybe_print_address (pd, rs1, EXTRACT_STYPE_IMM (l));
++	  print (info->stream, "%d", (int) EXTRACT_STYPE_IMM (l));
++	  break;
++
++	case 'a':
++	  info->target = EXTRACT_UJTYPE_IMM (l) + pc;
++	  (*info->print_address_func) (info->target, info);
++	  break;
++
++	case 'p':
++	  info->target = EXTRACT_SBTYPE_IMM (l) + pc;
++	  (*info->print_address_func) (info->target, info);
++	  break;
++
++	case 'd':
++	  if ((l & MASK_AUIPC) == MATCH_AUIPC)
++	    pd->hi_addr[rd] = pc + EXTRACT_UTYPE_IMM (l);
++	  else if ((l & MASK_LUI) == MATCH_LUI)
++	    pd->hi_addr[rd] = EXTRACT_UTYPE_IMM (l);
++	  else if ((l & MASK_C_LUI) == MATCH_C_LUI)
++	    pd->hi_addr[rd] = EXTRACT_RVC_LUI_IMM (l);
++	  print (info->stream, "%s", riscv_gpr_names[rd]);
++	  break;
++
++	case 'z':
++	  print (info->stream, "%s", riscv_gpr_names[0]);
++	  break;
++
++	case '>':
++	  print (info->stream, "0x%x", (int) EXTRACT_OPERAND (SHAMT, l));
++	  break;
++
++	case '<':
++	  print (info->stream, "0x%x", (int) EXTRACT_OPERAND (SHAMTW, l));
++	  break;
++
++	case 'S':
++	case 'U':
++	  print (info->stream, "%s", riscv_fpr_names[rs1]);
++	  break;
++
++	case 'T':
++	  print (info->stream, "%s", riscv_fpr_names[EXTRACT_OPERAND (RS2, l)]);
++	  break;
++
++	case 'D':
++	  print (info->stream, "%s", riscv_fpr_names[rd]);
++	  break;
++
++	case 'R':
++	  print (info->stream, "%s", riscv_fpr_names[EXTRACT_OPERAND (RS3, l)]);
++	  break;
++
++	case 'E':
++	  {
++	    const char* csr_name = NULL;
++	    unsigned int csr = EXTRACT_OPERAND (CSR, l);
++	    switch (csr)
++	      {
++	      #define DECLARE_CSR(name, num) case num: csr_name = #name; break;
++	      #include "opcode/riscv-opc.h"
++	      #undef DECLARE_CSR
++	      }
++	    if (csr_name)
++	      print (info->stream, "%s", csr_name);
++	    else
++	      print (info->stream, "0x%x", csr);
++	    break;
++	  }
++
++	case 'Z':
++	  print (info->stream, "%d", rs1);
++	  break;
++
++	default:
++	  /* xgettext:c-format */
++	  print (info->stream, _("# internal error, undefined modifier (%c)"),
++		 *d);
++	  return;
++	}
++    }
++}
++
++/* Print the RISC-V instruction at address MEMADDR in debugged memory,
++   on using INFO.  Returns length of the instruction, in bytes.
++   BIGENDIAN must be 1 if this is big-endian code, 0 if
++   this is little-endian code.  */
++
++static int
++riscv_disassemble_insn (bfd_vma memaddr, insn_t word, disassemble_info *info)
++{
++  const struct riscv_opcode *op;
++  static bfd_boolean init = 0;
++  static const struct riscv_opcode *riscv_hash[OP_MASK_OP + 1];
++  struct riscv_private_data *pd;
++  int insnlen;
++
++#define OP_HASH_IDX(i) ((i) & (riscv_insn_length (i) == 2 ? 0x3 : OP_MASK_OP))
++
++  /* Build a hash table to shorten the search time.  */
++  if (! init)
++    {
++      for (op = riscv_opcodes; op < &riscv_opcodes[NUMOPCODES]; op++)
++	if (!riscv_hash[OP_HASH_IDX (op->match)])
++	  riscv_hash[OP_HASH_IDX (op->match)] = op;
++
++      init = 1;
++    }
++
++  if (info->private_data == NULL)
++    {
++      int i;
++
++      pd = info->private_data = xcalloc (1, sizeof (struct riscv_private_data));
++      pd->gp = -1;
++      pd->print_addr = -1;
++      for (i = 0; i < (int) ARRAY_SIZE (pd->hi_addr); i++)
++	pd->hi_addr[i] = -1;
++
++      for (i = 0; i < info->symtab_size; i++)
++	if (strcmp (bfd_asymbol_name (info->symtab[i]), "_gp") == 0)
++	  pd->gp = bfd_asymbol_value (info->symtab[i]);
++    }
++  else
++    pd = info->private_data;
++
++  insnlen = riscv_insn_length (word);
++
++  info->bytes_per_chunk = insnlen % 4 == 0 ? 4 : 2;
++  info->bytes_per_line = 8;
++  info->display_endian = info->endian;
++  info->insn_info_valid = 1;
++  info->branch_delay_insns = 0;
++  info->data_size = 0;
++  info->insn_type = dis_nonbranch;
++  info->target = 0;
++  info->target2 = 0;
++
++  op = riscv_hash[OP_HASH_IDX (word)];
++  if (op != NULL)
++    {
++      int xlen = 0;
++
++      /* The incoming section might not always be complete.  */
++      if (info->section != NULL)
++	{
++	  Elf_Internal_Ehdr *ehdr = elf_elfheader (info->section->owner);
++	  xlen = ehdr->e_ident[EI_CLASS] == ELFCLASS64 ? 64 : 32;
++	}
++
++      for (; op < &riscv_opcodes[NUMOPCODES]; op++)
++	{
++	  /* Does the opcode match?  */
++	  if (! (op->match_func) (op, word))
++	    continue;
++	  /* Is this a pseudo-instruction and may we print it as such?  */
++	  if (no_aliases && (op->pinfo & INSN_ALIAS))
++	    continue;
++	  /* Is this instruction restricted to a certain value of XLEN?  */
++	  if (isdigit (op->subset[0]) && atoi (op->subset) != xlen)
++	    continue;
++
++	  /* It's a match.  */
++	  (*info->fprintf_func) (info->stream, "%s", op->name);
++	  print_insn_args (op->args, word, memaddr, info);
++
++	  /* Try to disassemble multi-instruction addressing sequences.  */
++	  if (pd->print_addr != (bfd_vma)-1)
++	    {
++	      info->target = pd->print_addr;
++	      (*info->fprintf_func) (info->stream, " # ");
++	      (*info->print_address_func) (info->target, info);
++	      pd->print_addr = -1;
++	    }
++
++	  return insnlen;
++	}
++    }
++
++  /* We did not find a match, so just print the instruction bits.  */
++  info->insn_type = dis_noninsn;
++  (*info->fprintf_func) (info->stream, "0x%llx", (unsigned long long)word);
++  return insnlen;
++}
++
++int
++print_insn_riscv (bfd_vma memaddr, struct disassemble_info *info)
++{
++  bfd_byte packet[2];
++  insn_t insn = 0;
++  bfd_vma n;
++  int status;
++
++  if (info->disassembler_options != NULL)
++    {
++      parse_riscv_dis_options (info->disassembler_options);
++      /* Avoid repeatedly parsing the options.  */
++      info->disassembler_options = NULL;
++    }
++  else if (riscv_gpr_names == NULL)
++    set_default_riscv_dis_options ();
++
++  /* Instructions are a sequence of 2-byte packets in little-endian order.  */
++  for (n = 0; n < sizeof (insn) && n < riscv_insn_length (insn); n += 2)
++    {
++      status = (*info->read_memory_func) (memaddr + n, packet, 2, info);
++      if (status != 0)
++	{
++	  /* Don't fail just because we fell off the end.  */
++	  if (n > 0)
++	    break;
++	  (*info->memory_error_func) (status, memaddr, info);
++	  return status;
++	}
++
++      insn |= ((insn_t) bfd_getl16 (packet)) << (8 * n);
++    }
++
++  return riscv_disassemble_insn (memaddr, insn, info);
++}
++
++void
++print_riscv_disassembler_options (FILE *stream)
++{
++  fprintf (stream, _("\n\
++The following RISC-V-specific disassembler options are supported for use\n\
++with the -M switch (multiple options should be separated by commas):\n"));
++
++  fprintf (stream, _("\n\
++  numeric       Print numeric reigster names, rather than ABI names.\n"));
++
++  fprintf (stream, _("\n\
++  no-aliases    Disassemble only into canonical instructions, rather\n\
++                than into pseudoinstructions.\n"));
++
++  fprintf (stream, _("\n"));
++}
+diff --git original-binutils/opcodes/riscv-opc.c binutils-2_27/opcodes/riscv-opc.c
+new file mode 100644
+index 0000000..7f0ca4a
+--- /dev/null
++++ binutils-2_27/opcodes/riscv-opc.c
+@@ -0,0 +1,650 @@
++/* RISC-V opcode list
++   Copyright 2011-2015 Free Software Foundation, Inc.
++
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target.
++
++   This file is part of the GNU opcodes library.
++
++   This library is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3, or (at your option)
++   any later version.
++
++   It is distributed in the hope that it will be useful, but WITHOUT
++   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++   License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; see the file COPYING3. If not,
++   see <http://www.gnu.org/licenses/>.  */
++
++#include "sysdep.h"
++#include "opcode/riscv.h"
++#include <stdio.h>
++
++/* Register names used by gas and objdump.  */
++
++const char * const riscv_gpr_names_numeric[NGPR] =
++{
++  "x0",   "x1",   "x2",   "x3",   "x4",   "x5",   "x6",   "x7",
++  "x8",   "x9",   "x10",  "x11",  "x12",  "x13",  "x14",  "x15",
++  "x16",  "x17",  "x18",  "x19",  "x20",  "x21",  "x22",  "x23",
++  "x24",  "x25",  "x26",  "x27",  "x28",  "x29",  "x30",  "x31"
++};
++
++const char * const riscv_gpr_names_abi[NGPR] = {
++  "zero", "ra", "sp",  "gp",  "tp", "t0",  "t1",  "t2",
++  "s0",   "s1", "a0",  "a1",  "a2", "a3",  "a4",  "a5",
++  "a6",   "a7", "s2",  "s3",  "s4", "s5",  "s6",  "s7",
++  "s8",   "s9", "s10", "s11", "t3", "t4",  "t5",  "t6"
++};
++
++const char * const riscv_fpr_names_numeric[NFPR] =
++{
++  "f0",   "f1",   "f2",   "f3",   "f4",   "f5",   "f6",   "f7",
++  "f8",   "f9",   "f10",  "f11",  "f12",  "f13",  "f14",  "f15",
++  "f16",  "f17",  "f18",  "f19",  "f20",  "f21",  "f22",  "f23",
++  "f24",  "f25",  "f26",  "f27",  "f28",  "f29",  "f30",  "f31"
++};
++
++const char * const riscv_fpr_names_abi[NFPR] = {
++  "ft0", "ft1", "ft2",  "ft3",  "ft4", "ft5", "ft6",  "ft7",
++  "fs0", "fs1", "fa0",  "fa1",  "fa2", "fa3", "fa4",  "fa5",
++  "fa6", "fa7", "fs2",  "fs3",  "fs4", "fs5", "fs6",  "fs7",
++  "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"
++};
++
++/* The order of overloaded instructions matters.  Label arguments and
++   register arguments look the same. Instructions that can have either
++   for arguments must apear in the correct order in this table for the
++   assembler to pick the right one. In other words, entries with
++   immediate operands must apear after the same instruction with
++   registers.
++
++   Because of the lookup algorithm used, entries with the same opcode
++   name must be contiguous.  */
++
++#define MASK_RS1 (OP_MASK_RS1 << OP_SH_RS1)
++#define MASK_RS2 (OP_MASK_RS2 << OP_SH_RS2)
++#define MASK_RD (OP_MASK_RD << OP_SH_RD)
++#define MASK_CRS2 (OP_MASK_CRS2 << OP_SH_CRS2)
++#define MASK_IMM ENCODE_ITYPE_IMM(-1U)
++#define MASK_RVC_IMM ENCODE_RVC_IMM(-1U)
++#define MASK_UIMM ENCODE_UTYPE_IMM(-1U)
++#define MASK_RM (OP_MASK_RM << OP_SH_RM)
++#define MASK_PRED (OP_MASK_PRED << OP_SH_PRED)
++#define MASK_SUCC (OP_MASK_SUCC << OP_SH_SUCC)
++#define MASK_AQ (OP_MASK_AQ << OP_SH_AQ)
++#define MASK_RL (OP_MASK_RL << OP_SH_RL)
++#define MASK_AQRL (MASK_AQ | MASK_RL)
++
++static int match_opcode(const struct riscv_opcode *op, insn_t insn)
++{
++  return ((insn ^ op->match) & op->mask) == 0;
++}
++
++static int match_never(const struct riscv_opcode *op ATTRIBUTE_UNUSED,
++		       insn_t insn ATTRIBUTE_UNUSED)
++{
++  return 0;
++}
++
++static int match_rs1_eq_rs2(const struct riscv_opcode *op, insn_t insn)
++{
++  int rs1 = (insn & MASK_RS1) >> OP_SH_RS1;
++  int rs2 = (insn & MASK_RS2) >> OP_SH_RS2;
++  return match_opcode (op, insn) && rs1 == rs2;
++}
++
++static int match_rd_nonzero(const struct riscv_opcode *op, insn_t insn)
++{
++  return match_opcode (op, insn) && ((insn & MASK_RD) != 0);
++}
++
++static int match_c_add(const struct riscv_opcode *op, insn_t insn)
++{
++  return match_rd_nonzero (op, insn) && ((insn & MASK_CRS2) != 0);
++}
++
++static int match_c_lui(const struct riscv_opcode *op, insn_t insn)
++{
++  return match_rd_nonzero (op, insn) && (((insn & MASK_RD) >> OP_SH_RD) != 2);
++}
++
++const struct riscv_opcode riscv_builtin_opcodes[] =
++{
++/* name,      isa,   operands, match, mask, match_func, pinfo */
++{"unimp",     "C",   "",  0, 0xffffU,  match_opcode, 0 },
++{"unimp",     "I",   "",  MATCH_CSRRW | (CSR_CYCLE << OP_SH_CSR), 0xffffffffU,  match_opcode, 0 }, /* csrw cycle, x0 */
++{"ebreak",    "C",   "",  MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, INSN_ALIAS },
++{"ebreak",    "I",   "",    MATCH_EBREAK, MASK_EBREAK, match_opcode, 0 },
++{"sbreak",    "C",   "",  MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, INSN_ALIAS },
++{"sbreak",    "I",   "",    MATCH_EBREAK, MASK_EBREAK, match_opcode, INSN_ALIAS },
++{"ret",       "C",   "",  MATCH_C_JR | (X_RA << OP_SH_RD), MASK_C_JR | MASK_RD, match_opcode, INSN_ALIAS },
++{"ret",       "I",   "",  MATCH_JALR | (X_RA << OP_SH_RS1), MASK_JALR | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS },
++{"jr",        "C",   "d",  MATCH_C_JR, MASK_C_JR, match_rd_nonzero, INSN_ALIAS },
++{"jr",        "I",   "s",  MATCH_JALR, MASK_JALR | MASK_RD | MASK_IMM, match_opcode, INSN_ALIAS },
++{"jr",        "I",   "s,j",  MATCH_JALR, MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS },
++{"jalr",      "C",   "d",  MATCH_C_JALR, MASK_C_JALR, match_rd_nonzero, INSN_ALIAS },
++{"jalr",      "I",   "s",  MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD | MASK_IMM, match_opcode, INSN_ALIAS },
++{"jalr",      "I",   "s,j",  MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS },
++{"jalr",      "I",   "d,s",  MATCH_JALR, MASK_JALR | MASK_IMM, match_opcode, INSN_ALIAS },
++{"jalr",      "I",   "d,s,j",  MATCH_JALR, MASK_JALR, match_opcode, 0 },
++{"j",         "C",   "Ca",  MATCH_C_J, MASK_C_J, match_opcode, INSN_ALIAS },
++{"j",         "I",   "a",  MATCH_JAL, MASK_JAL | MASK_RD, match_opcode, INSN_ALIAS },
++{"jal",       "32C", "Ca",  MATCH_C_JAL, MASK_C_JAL, match_opcode, INSN_ALIAS },
++{"jal",       "I",   "a",  MATCH_JAL | (X_RA << OP_SH_RD), MASK_JAL | MASK_RD, match_opcode, INSN_ALIAS },
++{"jal",       "I",   "d,a",  MATCH_JAL, MASK_JAL, match_opcode, 0 },
++{"call",      "I",   "c", (X_T1 << OP_SH_RS1) | (X_RA << OP_SH_RD), (int) M_CALL,  match_never, INSN_MACRO },
++{"call",      "I",   "d,c", (X_T1 << OP_SH_RS1), (int) M_CALL,  match_never, INSN_MACRO },
++{"tail",      "I",   "c", (X_T1 << OP_SH_RS1), (int) M_CALL,  match_never, INSN_MACRO },
++{"jump",      "I",   "c,s", 0, (int) M_CALL,  match_never, INSN_MACRO },
++{"nop",       "C",   "",  MATCH_C_ADDI, 0xffff, match_opcode, INSN_ALIAS },
++{"nop",       "I",   "",         MATCH_ADDI, MASK_ADDI | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS },
++{"lui",       "C",   "d,Cu",  MATCH_C_LUI, MASK_C_LUI, match_c_lui, INSN_ALIAS },
++{"lui",       "I",   "d,u",  MATCH_LUI, MASK_LUI, match_opcode, 0 },
++{"li",        "C",   "d,Cv",  MATCH_C_LUI, MASK_C_LUI, match_c_lui, INSN_ALIAS },
++{"li",        "C",   "d,Cj",  MATCH_C_LI, MASK_C_LI, match_rd_nonzero, INSN_ALIAS },
++{"li",        "C",   "d,0",  MATCH_C_LI, MASK_C_LI | MASK_RVC_IMM, match_rd_nonzero, INSN_ALIAS },
++{"li",        "I",   "d,j",      MATCH_ADDI, MASK_ADDI | MASK_RS1, match_opcode, INSN_ALIAS }, /* addi */
++{"li",        "I",   "d,I",  0,    (int) M_LI,  match_never, INSN_MACRO },
++{"mv",        "C",   "d,CV",  MATCH_C_MV, MASK_C_MV, match_c_add, INSN_ALIAS },
++{"mv",        "I",   "d,s",  MATCH_ADDI, MASK_ADDI | MASK_IMM, match_opcode, INSN_ALIAS },
++{"move",      "C",   "d,CV",  MATCH_C_MV, MASK_C_MV, match_c_add, INSN_ALIAS },
++{"move",      "I",   "d,s",  MATCH_ADDI, MASK_ADDI | MASK_IMM, match_opcode, INSN_ALIAS },
++{"andi",      "C",   "Cs,Cw,Cj",  MATCH_C_ANDI, MASK_C_ANDI, match_opcode, INSN_ALIAS },
++{"andi",      "I",   "d,s,j",  MATCH_ANDI, MASK_ANDI, match_opcode, 0 },
++{"and",       "C",   "Cs,Cw,Ct",  MATCH_C_AND, MASK_C_AND, match_opcode, INSN_ALIAS },
++{"and",       "C",   "Cs,Ct,Cw",  MATCH_C_AND, MASK_C_AND, match_opcode, INSN_ALIAS },
++{"and",       "C",   "Cs,Cw,Cj",  MATCH_C_ANDI, MASK_C_ANDI, match_opcode, INSN_ALIAS },
++{"and",       "I",   "d,s,t",  MATCH_AND, MASK_AND, match_opcode, 0 },
++{"and",       "I",   "d,s,j",  MATCH_ANDI, MASK_ANDI, match_opcode, INSN_ALIAS },
++{"beqz",      "C",   "Cs,Cp",  MATCH_C_BEQZ, MASK_C_BEQZ, match_opcode, INSN_ALIAS },
++{"beqz",      "I",   "s,p",  MATCH_BEQ, MASK_BEQ | MASK_RS2, match_opcode, INSN_ALIAS },
++{"beq",       "I",   "s,t,p",  MATCH_BEQ, MASK_BEQ, match_opcode, 0 },
++{"blez",      "I",   "t,p",  MATCH_BGE, MASK_BGE | MASK_RS1, match_opcode, INSN_ALIAS },
++{"bgez",      "I",   "s,p",  MATCH_BGE, MASK_BGE | MASK_RS2, match_opcode, INSN_ALIAS },
++{"ble",       "I",   "t,s,p",  MATCH_BGE, MASK_BGE, match_opcode, INSN_ALIAS },
++{"bleu",      "I",   "t,s,p",  MATCH_BGEU, MASK_BGEU, match_opcode, INSN_ALIAS },
++{"bge",       "I",   "s,t,p",  MATCH_BGE, MASK_BGE, match_opcode, 0 },
++{"bgeu",      "I",   "s,t,p",  MATCH_BGEU, MASK_BGEU, match_opcode, 0 },
++{"bltz",      "I",   "s,p",  MATCH_BLT, MASK_BLT | MASK_RS2, match_opcode, INSN_ALIAS },
++{"bgtz",      "I",   "t,p",  MATCH_BLT, MASK_BLT | MASK_RS1, match_opcode, INSN_ALIAS },
++{"blt",       "I",   "s,t,p",  MATCH_BLT, MASK_BLT, match_opcode, 0 },
++{"bltu",      "I",   "s,t,p",  MATCH_BLTU, MASK_BLTU, match_opcode, 0 },
++{"bgt",       "I",   "t,s,p",  MATCH_BLT, MASK_BLT, match_opcode, INSN_ALIAS },
++{"bgtu",      "I",   "t,s,p",  MATCH_BLTU, MASK_BLTU, match_opcode, INSN_ALIAS },
++{"bnez",      "C",   "Cs,Cp",  MATCH_C_BNEZ, MASK_C_BNEZ, match_opcode, INSN_ALIAS },
++{"bnez",      "I",   "s,p",  MATCH_BNE, MASK_BNE | MASK_RS2, match_opcode, INSN_ALIAS },
++{"bne",       "I",   "s,t,p",  MATCH_BNE, MASK_BNE, match_opcode, 0 },
++{"addi",      "C",   "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_opcode, INSN_ALIAS },
++{"addi",      "C",   "d,CU,Cj",  MATCH_C_ADDI, MASK_C_ADDI, match_rd_nonzero, INSN_ALIAS },
++{"addi",      "C",   "Cc,Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_opcode, INSN_ALIAS },
++{"addi",      "I",   "d,s,j",  MATCH_ADDI, MASK_ADDI, match_opcode, 0 },
++{"add",       "C",   "d,CU,CV",  MATCH_C_ADD, MASK_C_ADD, match_c_add, INSN_ALIAS },
++{"add",       "C",   "d,CV,CU",  MATCH_C_ADD, MASK_C_ADD, match_c_add, INSN_ALIAS },
++{"add",       "C",   "d,CU,Cj",  MATCH_C_ADDI, MASK_C_ADDI, match_rd_nonzero, INSN_ALIAS },
++{"add",       "C",   "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_opcode, INSN_ALIAS },
++{"add",       "C",   "Cc,Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_opcode, INSN_ALIAS },
++{"add",       "I",   "d,s,t",  MATCH_ADD, MASK_ADD, match_opcode, 0 },
++{"add",       "I",   "d,s,t,0",MATCH_ADD, MASK_ADD, match_opcode, 0 },
++{"add",       "I",   "d,s,j",  MATCH_ADDI, MASK_ADDI, match_opcode, INSN_ALIAS },
++{"la",        "I",   "d,A",  0,    (int) M_LA,  match_never, INSN_MACRO },
++{"lla",       "I",   "d,A",  0,    (int) M_LLA,  match_never, INSN_MACRO },
++{"la.tls.gd", "I",   "d,A",  0,    (int) M_LA_TLS_GD,  match_never, INSN_MACRO },
++{"la.tls.ie", "I",   "d,A",  0,    (int) M_LA_TLS_IE,  match_never, INSN_MACRO },
++{"neg",       "I",   "d,t",  MATCH_SUB, MASK_SUB | MASK_RS1, match_opcode, INSN_ALIAS }, /* sub 0 */
++{"slli",      "C",   "d,CU,C>",  MATCH_C_SLLI, MASK_C_SLLI, match_rd_nonzero, INSN_ALIAS },
++{"slli",      "I",   "d,s,>",   MATCH_SLLI, MASK_SLLI, match_opcode, 0 },
++{"sll",       "C",   "d,CU,C>",  MATCH_C_SLLI, MASK_C_SLLI, match_rd_nonzero, INSN_ALIAS },
++{"sll",       "I",   "d,s,t",   MATCH_SLL, MASK_SLL, match_opcode, 0 },
++{"sll",       "I",   "d,s,>",   MATCH_SLLI, MASK_SLLI, match_opcode, INSN_ALIAS },
++{"srli",      "C",   "Cs,Cw,C>",  MATCH_C_SRLI, MASK_C_SRLI, match_rd_nonzero, INSN_ALIAS },
++{"srli",      "I",   "d,s,>",   MATCH_SRLI, MASK_SRLI, match_opcode, 0 },
++{"srl",       "C",   "Cs,Cw,C>",  MATCH_C_SRLI, MASK_C_SRLI, match_rd_nonzero, INSN_ALIAS },
++{"srl",       "I",   "d,s,t",   MATCH_SRL, MASK_SRL, match_opcode, 0 },
++{"srl",       "I",   "d,s,>",   MATCH_SRLI, MASK_SRLI, match_opcode, INSN_ALIAS },
++{"srai",      "C",   "Cs,Cw,C>",  MATCH_C_SRAI, MASK_C_SRAI, match_rd_nonzero, INSN_ALIAS },
++{"srai",      "I",   "d,s,>",   MATCH_SRAI, MASK_SRAI, match_opcode, 0 },
++{"sra",       "C",   "Cs,Cw,C>",  MATCH_C_SRAI, MASK_C_SRAI, match_rd_nonzero, INSN_ALIAS },
++{"sra",       "I",   "d,s,t",   MATCH_SRA, MASK_SRA, match_opcode, 0 },
++{"sra",       "I",   "d,s,>",   MATCH_SRAI, MASK_SRAI, match_opcode, INSN_ALIAS },
++{"sub",       "C",   "Cs,Cw,Ct",  MATCH_C_SUB, MASK_C_SUB, match_opcode, INSN_ALIAS },
++{"sub",       "I",   "d,s,t",  MATCH_SUB, MASK_SUB, match_opcode, 0 },
++{"lb",        "I",   "d,o(s)",  MATCH_LB, MASK_LB, match_opcode, 0 },
++{"lb",        "I",   "d,A",  0, (int) M_LB, match_never, INSN_MACRO },
++{"lbu",       "I",   "d,o(s)",  MATCH_LBU, MASK_LBU, match_opcode, 0 },
++{"lbu",       "I",   "d,A",  0, (int) M_LBU, match_never, INSN_MACRO },
++{"lh",        "I",   "d,o(s)",  MATCH_LH, MASK_LH, match_opcode, 0 },
++{"lh",        "I",   "d,A",  0, (int) M_LH, match_never, INSN_MACRO },
++{"lhu",       "I",   "d,o(s)",  MATCH_LHU, MASK_LHU, match_opcode, 0 },
++{"lhu",       "I",   "d,A",  0, (int) M_LHU, match_never, INSN_MACRO },
++{"lw",        "C",   "d,Cm(Cc)",  MATCH_C_LWSP, MASK_C_LWSP, match_rd_nonzero, INSN_ALIAS },
++{"lw",        "C",   "Ct,Ck(Cs)",  MATCH_C_LW, MASK_C_LW, match_opcode, INSN_ALIAS },
++{"lw",        "I",   "d,o(s)",  MATCH_LW, MASK_LW, match_opcode, 0 },
++{"lw",        "I",   "d,A",  0, (int) M_LW, match_never, INSN_MACRO },
++{"not",       "I",   "d,s",  MATCH_XORI | MASK_IMM, MASK_XORI | MASK_IMM, match_opcode, INSN_ALIAS },
++{"ori",       "I",   "d,s,j",  MATCH_ORI, MASK_ORI, match_opcode, 0 },
++{"or",       "C",   "Cs,Cw,Ct",  MATCH_C_OR, MASK_C_OR, match_opcode, INSN_ALIAS },
++{"or",       "C",   "Cs,Ct,Cw",  MATCH_C_OR, MASK_C_OR, match_opcode, INSN_ALIAS },
++{"or",        "I",   "d,s,t",  MATCH_OR, MASK_OR, match_opcode, 0 },
++{"or",        "I",   "d,s,j",  MATCH_ORI, MASK_ORI, match_opcode, INSN_ALIAS },
++{"auipc",     "I",   "d,u",  MATCH_AUIPC, MASK_AUIPC, match_opcode, 0 },
++{"seqz",      "I",   "d,s",  MATCH_SLTIU | ENCODE_ITYPE_IMM(1), MASK_SLTIU | MASK_IMM, match_opcode, INSN_ALIAS },
++{"snez",      "I",   "d,t",  MATCH_SLTU, MASK_SLTU | MASK_RS1, match_opcode, INSN_ALIAS },
++{"sltz",      "I",   "d,s",  MATCH_SLT, MASK_SLT | MASK_RS2, match_opcode, INSN_ALIAS },
++{"sgtz",      "I",   "d,t",  MATCH_SLT, MASK_SLT | MASK_RS1, match_opcode, INSN_ALIAS },
++{"slti",      "I",   "d,s,j",  MATCH_SLTI, MASK_SLTI, match_opcode, INSN_ALIAS },
++{"slt",       "I",   "d,s,t",  MATCH_SLT, MASK_SLT, match_opcode, 0 },
++{"slt",       "I",   "d,s,j",  MATCH_SLTI, MASK_SLTI, match_opcode, 0 },
++{"sltiu",     "I",   "d,s,j",  MATCH_SLTIU, MASK_SLTIU, match_opcode, 0 },
++{"sltu",      "I",   "d,s,t",  MATCH_SLTU, MASK_SLTU, match_opcode, 0 },
++{"sltu",      "I",   "d,s,j",  MATCH_SLTIU, MASK_SLTIU, match_opcode, INSN_ALIAS },
++{"sgt",       "I",   "d,t,s",  MATCH_SLT, MASK_SLT, match_opcode, INSN_ALIAS },
++{"sgtu",      "I",   "d,t,s",  MATCH_SLTU, MASK_SLTU, match_opcode, INSN_ALIAS },
++{"sb",        "I",   "t,q(s)",  MATCH_SB, MASK_SB, match_opcode, 0 },
++{"sb",        "I",   "t,A,s",  0, (int) M_SB, match_never, INSN_MACRO },
++{"sh",        "I",   "t,q(s)",  MATCH_SH, MASK_SH, match_opcode, 0 },
++{"sh",        "I",   "t,A,s",  0, (int) M_SH, match_never, INSN_MACRO },
++{"sw",        "C",   "CV,CM(Cc)",  MATCH_C_SWSP, MASK_C_SWSP, match_opcode, INSN_ALIAS },
++{"sw",        "C",   "Ct,Ck(Cs)",  MATCH_C_SW, MASK_C_SW, match_opcode, INSN_ALIAS },
++{"sw",        "I",   "t,q(s)",  MATCH_SW, MASK_SW, match_opcode, 0 },
++{"sw",        "I",   "t,A,s",  0, (int) M_SW, match_never, INSN_MACRO },
++{"fence",     "I",   "",  MATCH_FENCE | MASK_PRED | MASK_SUCC, MASK_FENCE | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS },
++{"fence",     "I",   "P,Q",  MATCH_FENCE, MASK_FENCE | MASK_RD | MASK_RS1 | (MASK_IMM & ~MASK_PRED & ~MASK_SUCC), match_opcode, 0 },
++{"fence.i",   "I",   "",  MATCH_FENCE_I, MASK_FENCE | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, 0 },
++{"rdcycle",   "I",   "d",  MATCH_RDCYCLE, MASK_RDCYCLE, match_opcode, 0 },
++{"rdinstret", "I",   "d",  MATCH_RDINSTRET, MASK_RDINSTRET, match_opcode, 0 },
++{"rdtime",    "I",   "d",  MATCH_RDTIME, MASK_RDTIME, match_opcode, 0 },
++{"rdcycleh",  "32I", "d",  MATCH_RDCYCLEH, MASK_RDCYCLEH, match_opcode, 0 },
++{"rdinstreth","32I", "d",  MATCH_RDINSTRETH, MASK_RDINSTRETH, match_opcode, 0 },
++{"rdtimeh",   "32I", "d",  MATCH_RDTIMEH, MASK_RDTIMEH, match_opcode, 0 },
++{"ecall",     "I",   "",    MATCH_SCALL, MASK_SCALL, match_opcode, 0 },
++{"scall",     "I",   "",    MATCH_SCALL, MASK_SCALL, match_opcode, 0 },
++{"xori",      "I",   "d,s,j",  MATCH_XORI, MASK_XORI, match_opcode, 0 },
++{"xor",       "C",   "Cs,Cw,Ct",  MATCH_C_XOR, MASK_C_XOR, match_opcode, INSN_ALIAS },
++{"xor",       "C",   "Cs,Ct,Cw",  MATCH_C_XOR, MASK_C_XOR, match_opcode, INSN_ALIAS },
++{"xor",       "I",   "d,s,t",  MATCH_XOR, MASK_XOR, match_opcode, 0 },
++{"xor",       "I",   "d,s,j",  MATCH_XORI, MASK_XORI, match_opcode, INSN_ALIAS },
++{"lwu",       "64I", "d,o(s)",  MATCH_LWU, MASK_LWU, match_opcode, 0 },
++{"lwu",       "64I", "d,A",  0, (int) M_LWU, match_never, INSN_MACRO },
++{"ld",        "64C", "d,Cn(Cc)",  MATCH_C_LDSP, MASK_C_LDSP, match_rd_nonzero, INSN_ALIAS },
++{"ld",        "64C", "Ct,Cl(Cs)",  MATCH_C_LD, MASK_C_LD, match_opcode, INSN_ALIAS },
++{"ld",        "64I", "d,o(s)", MATCH_LD, MASK_LD, match_opcode, 0 },
++{"ld",        "64I", "d,A",  0, (int) M_LD, match_never, INSN_MACRO },
++{"sd",        "64C", "CV,CN(Cc)",  MATCH_C_SDSP, MASK_C_SDSP, match_opcode, INSN_ALIAS },
++{"sd",        "64C", "Ct,Cl(Cs)",  MATCH_C_SD, MASK_C_SD, match_opcode, INSN_ALIAS },
++{"sd",        "64I", "t,q(s)",  MATCH_SD, MASK_SD, match_opcode, 0 },
++{"sd",        "64I", "t,A,s",  0, (int) M_SD, match_never, INSN_MACRO },
++{"sext.w",    "64C", "d,CU",  MATCH_C_ADDIW, MASK_C_ADDIW | MASK_RVC_IMM, match_rd_nonzero, INSN_ALIAS },
++{"sext.w",    "64I", "d,s",  MATCH_ADDIW, MASK_ADDIW | MASK_IMM, match_opcode, INSN_ALIAS },
++{"addiw",     "64C", "d,CU,Cj",  MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, INSN_ALIAS },
++{"addiw",     "64I", "d,s,j",  MATCH_ADDIW, MASK_ADDIW, match_opcode, 0 },
++{"addw",      "64C", "Cs,Cw,Ct",  MATCH_C_ADDW, MASK_C_ADDW, match_opcode, INSN_ALIAS },
++{"addw",      "64C", "Cs,Ct,Cw",  MATCH_C_ADDW, MASK_C_ADDW, match_opcode, INSN_ALIAS },
++{"addw",      "64C", "d,CU,Cj",  MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, INSN_ALIAS },
++{"addw",      "64I", "d,s,t",  MATCH_ADDW, MASK_ADDW, match_opcode, 0 },
++{"addw",      "64I", "d,s,j",  MATCH_ADDIW, MASK_ADDIW, match_opcode, INSN_ALIAS },
++{"negw",      "64I", "d,t",  MATCH_SUBW, MASK_SUBW | MASK_RS1, match_opcode, INSN_ALIAS }, /* sub 0 */
++{"slliw",     "64I", "d,s,<",   MATCH_SLLIW, MASK_SLLIW, match_opcode, 0 },
++{"sllw",      "64I", "d,s,t",   MATCH_SLLW, MASK_SLLW, match_opcode, 0 },
++{"sllw",      "64I", "d,s,<",   MATCH_SLLIW, MASK_SLLIW, match_opcode, INSN_ALIAS },
++{"srliw",     "64I", "d,s,<",   MATCH_SRLIW, MASK_SRLIW, match_opcode, 0 },
++{"srlw",      "64I", "d,s,t",   MATCH_SRLW, MASK_SRLW, match_opcode, 0 },
++{"srlw",      "64I", "d,s,<",   MATCH_SRLIW, MASK_SRLIW, match_opcode, INSN_ALIAS },
++{"sraiw",     "64I", "d,s,<",   MATCH_SRAIW, MASK_SRAIW, match_opcode, 0 },
++{"sraw",      "64I", "d,s,t",   MATCH_SRAW, MASK_SRAW, match_opcode, 0 },
++{"sraw",      "64I", "d,s,<",   MATCH_SRAIW, MASK_SRAIW, match_opcode, INSN_ALIAS },
++{"subw",      "64C", "Cs,Cw,Ct",  MATCH_C_SUBW, MASK_C_SUBW, match_opcode, INSN_ALIAS },
++{"subw",      "64I", "d,s,t",  MATCH_SUBW, MASK_SUBW, match_opcode, 0 },
++
++/* Atomic memory operation instruction subset */
++{"lr.w",         "A",   "d,0(s)",    MATCH_LR_W, MASK_LR_W | MASK_AQRL, match_opcode, 0 },
++{"sc.w",         "A",   "d,t,0(s)",  MATCH_SC_W, MASK_SC_W | MASK_AQRL, match_opcode, 0 },
++{"amoadd.w",     "A",   "d,t,0(s)",  MATCH_AMOADD_W, MASK_AMOADD_W | MASK_AQRL, match_opcode, 0 },
++{"amoswap.w",    "A",   "d,t,0(s)",  MATCH_AMOSWAP_W, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, 0 },
++{"amoand.w",     "A",   "d,t,0(s)",  MATCH_AMOAND_W, MASK_AMOAND_W | MASK_AQRL, match_opcode, 0 },
++{"amoor.w",      "A",   "d,t,0(s)",  MATCH_AMOOR_W, MASK_AMOOR_W | MASK_AQRL, match_opcode, 0 },
++{"amoxor.w",     "A",   "d,t,0(s)",  MATCH_AMOXOR_W, MASK_AMOXOR_W | MASK_AQRL, match_opcode, 0 },
++{"amomax.w",     "A",   "d,t,0(s)",  MATCH_AMOMAX_W, MASK_AMOMAX_W | MASK_AQRL, match_opcode, 0 },
++{"amomaxu.w",    "A",   "d,t,0(s)",  MATCH_AMOMAXU_W, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, 0 },
++{"amomin.w",     "A",   "d,t,0(s)",  MATCH_AMOMIN_W, MASK_AMOMIN_W | MASK_AQRL, match_opcode, 0 },
++{"amominu.w",    "A",   "d,t,0(s)",  MATCH_AMOMINU_W, MASK_AMOMINU_W | MASK_AQRL, match_opcode, 0 },
++{"lr.w.aq",      "A",   "d,0(s)",    MATCH_LR_W | MASK_AQ, MASK_LR_W | MASK_AQRL, match_opcode, 0 },
++{"sc.w.aq",      "A",   "d,t,0(s)",  MATCH_SC_W | MASK_AQ, MASK_SC_W | MASK_AQRL, match_opcode, 0 },
++{"amoadd.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOADD_W | MASK_AQ, MASK_AMOADD_W | MASK_AQRL, match_opcode, 0 },
++{"amoswap.w.aq", "A",   "d,t,0(s)",  MATCH_AMOSWAP_W | MASK_AQ, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, 0 },
++{"amoand.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOAND_W | MASK_AQ, MASK_AMOAND_W | MASK_AQRL, match_opcode, 0 },
++{"amoor.w.aq",   "A",   "d,t,0(s)",  MATCH_AMOOR_W | MASK_AQ, MASK_AMOOR_W | MASK_AQRL, match_opcode, 0 },
++{"amoxor.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOXOR_W | MASK_AQ, MASK_AMOXOR_W | MASK_AQRL, match_opcode, 0 },
++{"amomax.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOMAX_W | MASK_AQ, MASK_AMOMAX_W | MASK_AQRL, match_opcode, 0 },
++{"amomaxu.w.aq", "A",   "d,t,0(s)",  MATCH_AMOMAXU_W | MASK_AQ, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, 0 },
++{"amomin.w.aq",  "A",   "d,t,0(s)",  MATCH_AMOMIN_W | MASK_AQ, MASK_AMOMIN_W | MASK_AQRL, match_opcode, 0 },
++{"amominu.w.aq", "A",   "d,t,0(s)",  MATCH_AMOMINU_W | MASK_AQ, MASK_AMOMINU_W | MASK_AQRL, match_opcode, 0 },
++{"lr.w.rl",      "A",   "d,0(s)",    MATCH_LR_W | MASK_RL, MASK_LR_W | MASK_AQRL, match_opcode, 0 },
++{"sc.w.rl",      "A",   "d,t,0(s)",  MATCH_SC_W | MASK_RL, MASK_SC_W | MASK_AQRL, match_opcode, 0 },
++{"amoadd.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOADD_W | MASK_RL, MASK_AMOADD_W | MASK_AQRL, match_opcode, 0 },
++{"amoswap.w.rl", "A",   "d,t,0(s)",  MATCH_AMOSWAP_W | MASK_RL, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, 0 },
++{"amoand.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOAND_W | MASK_RL, MASK_AMOAND_W | MASK_AQRL, match_opcode, 0 },
++{"amoor.w.rl",   "A",   "d,t,0(s)",  MATCH_AMOOR_W | MASK_RL, MASK_AMOOR_W | MASK_AQRL, match_opcode, 0 },
++{"amoxor.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOXOR_W | MASK_RL, MASK_AMOXOR_W | MASK_AQRL, match_opcode, 0 },
++{"amomax.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOMAX_W | MASK_RL, MASK_AMOMAX_W | MASK_AQRL, match_opcode, 0 },
++{"amomaxu.w.rl", "A",   "d,t,0(s)",  MATCH_AMOMAXU_W | MASK_RL, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, 0 },
++{"amomin.w.rl",  "A",   "d,t,0(s)",  MATCH_AMOMIN_W | MASK_RL, MASK_AMOMIN_W | MASK_AQRL, match_opcode, 0 },
++{"amominu.w.rl", "A",   "d,t,0(s)",  MATCH_AMOMINU_W | MASK_RL, MASK_AMOMINU_W | MASK_AQRL, match_opcode, 0 },
++{"lr.w.sc",      "A",   "d,0(s)",    MATCH_LR_W | MASK_AQRL, MASK_LR_W | MASK_AQRL, match_opcode, 0 },
++{"sc.w.sc",      "A",   "d,t,0(s)",  MATCH_SC_W | MASK_AQRL, MASK_SC_W | MASK_AQRL, match_opcode, 0 },
++{"amoadd.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOADD_W | MASK_AQRL, MASK_AMOADD_W | MASK_AQRL, match_opcode, 0 },
++{"amoswap.w.sc", "A",   "d,t,0(s)",  MATCH_AMOSWAP_W | MASK_AQRL, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, 0 },
++{"amoand.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOAND_W | MASK_AQRL, MASK_AMOAND_W | MASK_AQRL, match_opcode, 0 },
++{"amoor.w.sc",   "A",   "d,t,0(s)",  MATCH_AMOOR_W | MASK_AQRL, MASK_AMOOR_W | MASK_AQRL, match_opcode, 0 },
++{"amoxor.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOXOR_W | MASK_AQRL, MASK_AMOXOR_W | MASK_AQRL, match_opcode, 0 },
++{"amomax.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOMAX_W | MASK_AQRL, MASK_AMOMAX_W | MASK_AQRL, match_opcode, 0 },
++{"amomaxu.w.sc", "A",   "d,t,0(s)",  MATCH_AMOMAXU_W | MASK_AQRL, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, 0 },
++{"amomin.w.sc",  "A",   "d,t,0(s)",  MATCH_AMOMIN_W | MASK_AQRL, MASK_AMOMIN_W | MASK_AQRL, match_opcode, 0 },
++{"amominu.w.sc", "A",   "d,t,0(s)",  MATCH_AMOMINU_W | MASK_AQRL, MASK_AMOMINU_W | MASK_AQRL, match_opcode, 0 },
++{"lr.d",         "64A", "d,0(s)",    MATCH_LR_D, MASK_LR_D | MASK_AQRL, match_opcode, 0 },
++{"sc.d",         "64A", "d,t,0(s)",  MATCH_SC_D, MASK_SC_D | MASK_AQRL, match_opcode, 0 },
++{"amoadd.d",     "64A", "d,t,0(s)",  MATCH_AMOADD_D, MASK_AMOADD_D | MASK_AQRL, match_opcode, 0 },
++{"amoswap.d",    "64A", "d,t,0(s)",  MATCH_AMOSWAP_D, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, 0 },
++{"amoand.d",     "64A", "d,t,0(s)",  MATCH_AMOAND_D, MASK_AMOAND_D | MASK_AQRL, match_opcode, 0 },
++{"amoor.d",      "64A", "d,t,0(s)",  MATCH_AMOOR_D, MASK_AMOOR_D | MASK_AQRL, match_opcode, 0 },
++{"amoxor.d",     "64A", "d,t,0(s)",  MATCH_AMOXOR_D, MASK_AMOXOR_D | MASK_AQRL, match_opcode, 0 },
++{"amomax.d",     "64A", "d,t,0(s)",  MATCH_AMOMAX_D, MASK_AMOMAX_D | MASK_AQRL, match_opcode, 0 },
++{"amomaxu.d",    "64A", "d,t,0(s)",  MATCH_AMOMAXU_D, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, 0 },
++{"amomin.d",     "64A", "d,t,0(s)",  MATCH_AMOMIN_D, MASK_AMOMIN_D | MASK_AQRL, match_opcode, 0 },
++{"amominu.d",    "64A", "d,t,0(s)",  MATCH_AMOMINU_D, MASK_AMOMINU_D | MASK_AQRL, match_opcode, 0 },
++{"lr.d.aq",      "64A", "d,0(s)",    MATCH_LR_D | MASK_AQ, MASK_LR_D | MASK_AQRL, match_opcode, 0 },
++{"sc.d.aq",      "64A", "d,t,0(s)",  MATCH_SC_D | MASK_AQ, MASK_SC_D | MASK_AQRL, match_opcode, 0 },
++{"amoadd.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOADD_D | MASK_AQ, MASK_AMOADD_D | MASK_AQRL, match_opcode, 0 },
++{"amoswap.d.aq", "64A", "d,t,0(s)",  MATCH_AMOSWAP_D | MASK_AQ, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, 0 },
++{"amoand.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOAND_D | MASK_AQ, MASK_AMOAND_D | MASK_AQRL, match_opcode, 0 },
++{"amoor.d.aq",   "64A", "d,t,0(s)",  MATCH_AMOOR_D | MASK_AQ, MASK_AMOOR_D | MASK_AQRL, match_opcode, 0 },
++{"amoxor.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOXOR_D | MASK_AQ, MASK_AMOXOR_D | MASK_AQRL, match_opcode, 0 },
++{"amomax.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOMAX_D | MASK_AQ, MASK_AMOMAX_D | MASK_AQRL, match_opcode, 0 },
++{"amomaxu.d.aq", "64A", "d,t,0(s)",  MATCH_AMOMAXU_D | MASK_AQ, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, 0 },
++{"amomin.d.aq",  "64A", "d,t,0(s)",  MATCH_AMOMIN_D | MASK_AQ, MASK_AMOMIN_D | MASK_AQRL, match_opcode, 0 },
++{"amominu.d.aq", "64A", "d,t,0(s)",  MATCH_AMOMINU_D | MASK_AQ, MASK_AMOMINU_D | MASK_AQRL, match_opcode, 0 },
++{"lr.d.rl",      "64A", "d,0(s)",    MATCH_LR_D | MASK_RL, MASK_LR_D | MASK_AQRL, match_opcode, 0 },
++{"sc.d.rl",      "64A", "d,t,0(s)",  MATCH_SC_D | MASK_RL, MASK_SC_D | MASK_AQRL, match_opcode, 0 },
++{"amoadd.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOADD_D | MASK_RL, MASK_AMOADD_D | MASK_AQRL, match_opcode, 0 },
++{"amoswap.d.rl", "64A", "d,t,0(s)",  MATCH_AMOSWAP_D | MASK_RL, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, 0 },
++{"amoand.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOAND_D | MASK_RL, MASK_AMOAND_D | MASK_AQRL, match_opcode, 0 },
++{"amoor.d.rl",   "64A", "d,t,0(s)",  MATCH_AMOOR_D | MASK_RL, MASK_AMOOR_D | MASK_AQRL, match_opcode, 0 },
++{"amoxor.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOXOR_D | MASK_RL, MASK_AMOXOR_D | MASK_AQRL, match_opcode, 0 },
++{"amomax.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOMAX_D | MASK_RL, MASK_AMOMAX_D | MASK_AQRL, match_opcode, 0 },
++{"amomaxu.d.rl", "64A", "d,t,0(s)",  MATCH_AMOMAXU_D | MASK_RL, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, 0 },
++{"amomin.d.rl",  "64A", "d,t,0(s)",  MATCH_AMOMIN_D | MASK_RL, MASK_AMOMIN_D | MASK_AQRL, match_opcode, 0 },
++{"amominu.d.rl", "64A", "d,t,0(s)",  MATCH_AMOMINU_D | MASK_RL, MASK_AMOMINU_D | MASK_AQRL, match_opcode, 0 },
++{"lr.d.sc",      "64A", "d,0(s)",    MATCH_LR_D | MASK_AQRL, MASK_LR_D | MASK_AQRL, match_opcode, 0 },
++{"sc.d.sc",      "64A", "d,t,0(s)",  MATCH_SC_D | MASK_AQRL, MASK_SC_D | MASK_AQRL, match_opcode, 0 },
++{"amoadd.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOADD_D | MASK_AQRL, MASK_AMOADD_D | MASK_AQRL, match_opcode, 0 },
++{"amoswap.d.sc", "64A", "d,t,0(s)",  MATCH_AMOSWAP_D | MASK_AQRL, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, 0 },
++{"amoand.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOAND_D | MASK_AQRL, MASK_AMOAND_D | MASK_AQRL, match_opcode, 0 },
++{"amoor.d.sc",   "64A", "d,t,0(s)",  MATCH_AMOOR_D | MASK_AQRL, MASK_AMOOR_D | MASK_AQRL, match_opcode, 0 },
++{"amoxor.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOXOR_D | MASK_AQRL, MASK_AMOXOR_D | MASK_AQRL, match_opcode, 0 },
++{"amomax.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOMAX_D | MASK_AQRL, MASK_AMOMAX_D | MASK_AQRL, match_opcode, 0 },
++{"amomaxu.d.sc", "64A", "d,t,0(s)",  MATCH_AMOMAXU_D | MASK_AQRL, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, 0 },
++{"amomin.d.sc",  "64A", "d,t,0(s)",  MATCH_AMOMIN_D | MASK_AQRL, MASK_AMOMIN_D | MASK_AQRL, match_opcode, 0 },
++{"amominu.d.sc", "64A", "d,t,0(s)",  MATCH_AMOMINU_D | MASK_AQRL, MASK_AMOMINU_D | MASK_AQRL, match_opcode, 0 },
++
++/* Multiply/Divide instruction subset */
++{"mul",       "M",   "d,s,t",  MATCH_MUL, MASK_MUL, match_opcode, 0 },
++{"mulh",      "M",   "d,s,t",  MATCH_MULH, MASK_MULH, match_opcode, 0 },
++{"mulhu",     "M",   "d,s,t",  MATCH_MULHU, MASK_MULHU, match_opcode, 0 },
++{"mulhsu",    "M",   "d,s,t",  MATCH_MULHSU, MASK_MULHSU, match_opcode, 0 },
++{"div",       "M",   "d,s,t",  MATCH_DIV, MASK_DIV, match_opcode, 0 },
++{"divu",      "M",   "d,s,t",  MATCH_DIVU, MASK_DIVU, match_opcode, 0 },
++{"rem",       "M",   "d,s,t",  MATCH_REM, MASK_REM, match_opcode, 0 },
++{"remu",      "M",   "d,s,t",  MATCH_REMU, MASK_REMU, match_opcode, 0 },
++{"mulw",      "64M", "d,s,t",  MATCH_MULW, MASK_MULW, match_opcode, 0 },
++{"divw",      "64M", "d,s,t",  MATCH_DIVW, MASK_DIVW, match_opcode, 0 },
++{"divuw",     "64M", "d,s,t",  MATCH_DIVUW, MASK_DIVUW, match_opcode, 0 },
++{"remw",      "64M", "d,s,t",  MATCH_REMW, MASK_REMW, match_opcode, 0 },
++{"remuw",     "64M", "d,s,t",  MATCH_REMUW, MASK_REMUW, match_opcode, 0 },
++
++/* Single-precision floating-point instruction subset */
++{"frsr",      "F",   "d",  MATCH_FRCSR, MASK_FRCSR, match_opcode, 0 },
++{"fssr",      "F",   "s",  MATCH_FSCSR, MASK_FSCSR | MASK_RD, match_opcode, 0 },
++{"fssr",      "F",   "d,s",  MATCH_FSCSR, MASK_FSCSR, match_opcode, 0 },
++{"frcsr",     "F",   "d",  MATCH_FRCSR, MASK_FRCSR, match_opcode, 0 },
++{"fscsr",     "F",   "s",  MATCH_FSCSR, MASK_FSCSR | MASK_RD, match_opcode, 0 },
++{"fscsr",     "F",   "d,s",  MATCH_FSCSR, MASK_FSCSR, match_opcode, 0 },
++{"frrm",      "F",   "d",  MATCH_FRRM, MASK_FRRM, match_opcode, 0 },
++{"fsrm",      "F",   "s",  MATCH_FSRM, MASK_FSRM | MASK_RD, match_opcode, 0 },
++{"fsrm",      "F",   "d,s",  MATCH_FSRM, MASK_FSRM, match_opcode, 0 },
++{"frflags",   "F",   "d",  MATCH_FRFLAGS, MASK_FRFLAGS, match_opcode, 0 },
++{"fsflags",   "F",   "s",  MATCH_FSFLAGS, MASK_FSFLAGS | MASK_RD, match_opcode, 0 },
++{"fsflags",   "F",   "d,s",  MATCH_FSFLAGS, MASK_FSFLAGS, match_opcode, 0 },
++{"flw",       "32C", "D,Cm(Cc)",  MATCH_C_FLWSP, MASK_C_FLWSP, match_opcode, INSN_ALIAS },
++{"flw",       "32C", "CD,Ck(Cs)",  MATCH_C_FLW, MASK_C_FLW, match_opcode, INSN_ALIAS },
++{"flw",       "F",   "D,o(s)",  MATCH_FLW, MASK_FLW, match_opcode, 0 },
++{"flw",       "F",   "D,A,s",  0, (int) M_FLW, match_never, INSN_MACRO },
++{"fsw",       "32C", "CT,CM(Cc)",  MATCH_C_FSWSP, MASK_C_FSWSP, match_opcode, INSN_ALIAS },
++{"fsw",       "32C", "CD,Ck(Cs)",  MATCH_C_FSW, MASK_C_FSW, match_opcode, INSN_ALIAS },
++{"fsw",       "F",   "T,q(s)",  MATCH_FSW, MASK_FSW, match_opcode, 0 },
++{"fsw",       "F",   "T,A,s",  0, (int) M_FSW, match_never, INSN_MACRO },
++{"fmv.x.s",   "F",   "d,S",  MATCH_FMV_X_S, MASK_FMV_X_S, match_opcode, 0 },
++{"fmv.s.x",   "F",   "D,s",  MATCH_FMV_S_X, MASK_FMV_S_X, match_opcode, 0 },
++{"fmv.s",     "F",   "D,U",  MATCH_FSGNJ_S, MASK_FSGNJ_S, match_rs1_eq_rs2, INSN_ALIAS },
++{"fneg.s",    "F",   "D,U",  MATCH_FSGNJN_S, MASK_FSGNJN_S, match_rs1_eq_rs2, INSN_ALIAS },
++{"fabs.s",    "F",   "D,U",  MATCH_FSGNJX_S, MASK_FSGNJX_S, match_rs1_eq_rs2, INSN_ALIAS },
++{"fsgnj.s",   "F",   "D,S,T",  MATCH_FSGNJ_S, MASK_FSGNJ_S, match_opcode, 0 },
++{"fsgnjn.s",  "F",   "D,S,T",  MATCH_FSGNJN_S, MASK_FSGNJN_S, match_opcode, 0 },
++{"fsgnjx.s",  "F",   "D,S,T",  MATCH_FSGNJX_S, MASK_FSGNJX_S, match_opcode, 0 },
++{"fadd.s",    "F",   "D,S,T",  MATCH_FADD_S | MASK_RM, MASK_FADD_S | MASK_RM, match_opcode, 0 },
++{"fadd.s",    "F",   "D,S,T,m",  MATCH_FADD_S, MASK_FADD_S, match_opcode, 0 },
++{"fsub.s",    "F",   "D,S,T",  MATCH_FSUB_S | MASK_RM, MASK_FSUB_S | MASK_RM, match_opcode, 0 },
++{"fsub.s",    "F",   "D,S,T,m",  MATCH_FSUB_S, MASK_FSUB_S, match_opcode, 0 },
++{"fmul.s",    "F",   "D,S,T",  MATCH_FMUL_S | MASK_RM, MASK_FMUL_S | MASK_RM, match_opcode, 0 },
++{"fmul.s",    "F",   "D,S,T,m",  MATCH_FMUL_S, MASK_FMUL_S, match_opcode, 0 },
++{"fdiv.s",    "F",   "D,S,T",  MATCH_FDIV_S | MASK_RM, MASK_FDIV_S | MASK_RM, match_opcode, 0 },
++{"fdiv.s",    "F",   "D,S,T,m",  MATCH_FDIV_S, MASK_FDIV_S, match_opcode, 0 },
++{"fsqrt.s",   "F",   "D,S",  MATCH_FSQRT_S | MASK_RM, MASK_FSQRT_S | MASK_RM, match_opcode, 0 },
++{"fsqrt.s",   "F",   "D,S,m",  MATCH_FSQRT_S, MASK_FSQRT_S, match_opcode, 0 },
++{"fmin.s",    "F",   "D,S,T",  MATCH_FMIN_S, MASK_FMIN_S, match_opcode, 0 },
++{"fmax.s",    "F",   "D,S,T",  MATCH_FMAX_S, MASK_FMAX_S, match_opcode, 0 },
++{"fmadd.s",   "F",   "D,S,T,R",  MATCH_FMADD_S | MASK_RM, MASK_FMADD_S | MASK_RM, match_opcode, 0 },
++{"fmadd.s",   "F",   "D,S,T,R,m",  MATCH_FMADD_S, MASK_FMADD_S, match_opcode, 0 },
++{"fnmadd.s",  "F",   "D,S,T,R",  MATCH_FNMADD_S | MASK_RM, MASK_FNMADD_S | MASK_RM, match_opcode, 0 },
++{"fnmadd.s",  "F",   "D,S,T,R,m",  MATCH_FNMADD_S, MASK_FNMADD_S, match_opcode, 0 },
++{"fmsub.s",   "F",   "D,S,T,R",  MATCH_FMSUB_S | MASK_RM, MASK_FMSUB_S | MASK_RM, match_opcode, 0 },
++{"fmsub.s",   "F",   "D,S,T,R,m",  MATCH_FMSUB_S, MASK_FMSUB_S, match_opcode, 0 },
++{"fnmsub.s",  "F",   "D,S,T,R",  MATCH_FNMSUB_S | MASK_RM, MASK_FNMSUB_S | MASK_RM, match_opcode, 0 },
++{"fnmsub.s",  "F",   "D,S,T,R,m",  MATCH_FNMSUB_S, MASK_FNMSUB_S, match_opcode, 0 },
++{"fcvt.w.s",  "F",   "d,S",  MATCH_FCVT_W_S | MASK_RM, MASK_FCVT_W_S | MASK_RM, match_opcode, 0 },
++{"fcvt.w.s",  "F",   "d,S,m",  MATCH_FCVT_W_S, MASK_FCVT_W_S, match_opcode, 0 },
++{"fcvt.wu.s", "F",   "d,S",  MATCH_FCVT_WU_S | MASK_RM, MASK_FCVT_WU_S | MASK_RM, match_opcode, 0 },
++{"fcvt.wu.s", "F",   "d,S,m",  MATCH_FCVT_WU_S, MASK_FCVT_WU_S, match_opcode, 0 },
++{"fcvt.s.w",  "F",   "D,s",  MATCH_FCVT_S_W | MASK_RM, MASK_FCVT_S_W | MASK_RM, match_opcode, 0 },
++{"fcvt.s.w",  "F",   "D,s,m",  MATCH_FCVT_S_W, MASK_FCVT_S_W, match_opcode, 0 },
++{"fcvt.s.wu", "F",   "D,s",  MATCH_FCVT_S_WU | MASK_RM, MASK_FCVT_S_W | MASK_RM, match_opcode, 0 },
++{"fcvt.s.wu", "F",   "D,s,m",  MATCH_FCVT_S_WU, MASK_FCVT_S_WU, match_opcode, 0 },
++{"fclass.s",  "F",   "d,S",  MATCH_FCLASS_S, MASK_FCLASS_S, match_opcode, 0 },
++{"feq.s",     "F",   "d,S,T",    MATCH_FEQ_S, MASK_FEQ_S, match_opcode, 0 },
++{"flt.s",     "F",   "d,S,T",    MATCH_FLT_S, MASK_FLT_S, match_opcode, 0 },
++{"fle.s",     "F",   "d,S,T",    MATCH_FLE_S, MASK_FLE_S, match_opcode, 0 },
++{"fgt.s",     "F",   "d,T,S",    MATCH_FLT_S, MASK_FLT_S, match_opcode, 0 },
++{"fge.s",     "F",   "d,T,S",    MATCH_FLE_S, MASK_FLE_S, match_opcode, 0 },
++{"fcvt.l.s",  "64F", "d,S",  MATCH_FCVT_L_S | MASK_RM, MASK_FCVT_L_S | MASK_RM, match_opcode, 0 },
++{"fcvt.l.s",  "64F", "d,S,m",  MATCH_FCVT_L_S, MASK_FCVT_L_S, match_opcode, 0 },
++{"fcvt.lu.s", "64F", "d,S",  MATCH_FCVT_LU_S | MASK_RM, MASK_FCVT_LU_S | MASK_RM, match_opcode, 0 },
++{"fcvt.lu.s", "64F", "d,S,m",  MATCH_FCVT_LU_S, MASK_FCVT_LU_S, match_opcode, 0 },
++{"fcvt.s.l",  "64F", "D,s",  MATCH_FCVT_S_L | MASK_RM, MASK_FCVT_S_L | MASK_RM, match_opcode, 0 },
++{"fcvt.s.l",  "64F", "D,s,m",  MATCH_FCVT_S_L, MASK_FCVT_S_L, match_opcode, 0 },
++{"fcvt.s.lu", "64F", "D,s",  MATCH_FCVT_S_LU | MASK_RM, MASK_FCVT_S_L | MASK_RM, match_opcode, 0 },
++{"fcvt.s.lu", "64F", "D,s,m",  MATCH_FCVT_S_LU, MASK_FCVT_S_LU, match_opcode, 0 },
++
++/* Double-precision floating-point instruction subset */
++{"fld",       "C",   "D,Cn(Cc)",  MATCH_C_FLDSP, MASK_C_FLDSP, match_opcode, INSN_ALIAS },
++{"fld",       "C",   "CD,Cl(Cs)",  MATCH_C_FLD, MASK_C_FLD, match_opcode, INSN_ALIAS },
++{"fld",       "D",   "D,o(s)",  MATCH_FLD, MASK_FLD, match_opcode, 0 },
++{"fld",       "D",   "D,A,s",  0, (int) M_FLD, match_never, INSN_MACRO },
++{"fsd",       "C",   "CT,CN(Cc)",  MATCH_C_FSDSP, MASK_C_FSDSP, match_opcode, INSN_ALIAS },
++{"fsd",       "C",   "CD,Cl(Cs)",  MATCH_C_FSD, MASK_C_FSD, match_opcode, INSN_ALIAS },
++{"fsd",       "D",   "T,q(s)",  MATCH_FSD, MASK_FSD, match_opcode, 0 },
++{"fsd",       "D",   "T,A,s",  0, (int) M_FSD, match_never, INSN_MACRO },
++{"fmv.d",     "D",   "D,U",  MATCH_FSGNJ_D, MASK_FSGNJ_D, match_rs1_eq_rs2, INSN_ALIAS },
++{"fneg.d",    "D",   "D,U",  MATCH_FSGNJN_D, MASK_FSGNJN_D, match_rs1_eq_rs2, INSN_ALIAS },
++{"fabs.d",    "D",   "D,U",  MATCH_FSGNJX_D, MASK_FSGNJX_D, match_rs1_eq_rs2, INSN_ALIAS },
++{"fsgnj.d",   "D",   "D,S,T",  MATCH_FSGNJ_D, MASK_FSGNJ_D, match_opcode, 0 },
++{"fsgnjn.d",  "D",   "D,S,T",  MATCH_FSGNJN_D, MASK_FSGNJN_D, match_opcode, 0 },
++{"fsgnjx.d",  "D",   "D,S,T",  MATCH_FSGNJX_D, MASK_FSGNJX_D, match_opcode, 0 },
++{"fadd.d",    "D",   "D,S,T",  MATCH_FADD_D | MASK_RM, MASK_FADD_D | MASK_RM, match_opcode, 0 },
++{"fadd.d",    "D",   "D,S,T,m",  MATCH_FADD_D, MASK_FADD_D, match_opcode, 0 },
++{"fsub.d",    "D",   "D,S,T",  MATCH_FSUB_D | MASK_RM, MASK_FSUB_D | MASK_RM, match_opcode, 0 },
++{"fsub.d",    "D",   "D,S,T,m",  MATCH_FSUB_D, MASK_FSUB_D, match_opcode, 0 },
++{"fmul.d",    "D",   "D,S,T",  MATCH_FMUL_D | MASK_RM, MASK_FMUL_D | MASK_RM, match_opcode, 0 },
++{"fmul.d",    "D",   "D,S,T,m",  MATCH_FMUL_D, MASK_FMUL_D, match_opcode, 0 },
++{"fdiv.d",    "D",   "D,S,T",  MATCH_FDIV_D | MASK_RM, MASK_FDIV_D | MASK_RM, match_opcode, 0 },
++{"fdiv.d",    "D",   "D,S,T,m",  MATCH_FDIV_D, MASK_FDIV_D, match_opcode, 0 },
++{"fsqrt.d",   "D",   "D,S",  MATCH_FSQRT_D | MASK_RM, MASK_FSQRT_D | MASK_RM, match_opcode, 0 },
++{"fsqrt.d",   "D",   "D,S,m",  MATCH_FSQRT_D, MASK_FSQRT_D, match_opcode, 0 },
++{"fmin.d",    "D",   "D,S,T",  MATCH_FMIN_D, MASK_FMIN_D, match_opcode, 0 },
++{"fmax.d",    "D",   "D,S,T",  MATCH_FMAX_D, MASK_FMAX_D, match_opcode, 0 },
++{"fmadd.d",   "D",   "D,S,T,R",  MATCH_FMADD_D | MASK_RM, MASK_FMADD_D | MASK_RM, match_opcode, 0 },
++{"fmadd.d",   "D",   "D,S,T,R,m",  MATCH_FMADD_D, MASK_FMADD_D, match_opcode, 0 },
++{"fnmadd.d",  "D",   "D,S,T,R",  MATCH_FNMADD_D | MASK_RM, MASK_FNMADD_D | MASK_RM, match_opcode, 0 },
++{"fnmadd.d",  "D",   "D,S,T,R,m",  MATCH_FNMADD_D, MASK_FNMADD_D, match_opcode, 0 },
++{"fmsub.d",   "D",   "D,S,T,R",  MATCH_FMSUB_D | MASK_RM, MASK_FMSUB_D | MASK_RM, match_opcode, 0 },
++{"fmsub.d",   "D",   "D,S,T,R,m",  MATCH_FMSUB_D, MASK_FMSUB_D, match_opcode, 0 },
++{"fnmsub.d",  "D",   "D,S,T,R",  MATCH_FNMSUB_D | MASK_RM, MASK_FNMSUB_D | MASK_RM, match_opcode, 0 },
++{"fnmsub.d",  "D",   "D,S,T,R,m",  MATCH_FNMSUB_D, MASK_FNMSUB_D, match_opcode, 0 },
++{"fcvt.w.d",  "D",   "d,S",  MATCH_FCVT_W_D | MASK_RM, MASK_FCVT_W_D | MASK_RM, match_opcode, 0 },
++{"fcvt.w.d",  "D",   "d,S,m",  MATCH_FCVT_W_D, MASK_FCVT_W_D, match_opcode, 0 },
++{"fcvt.wu.d", "D",   "d,S",  MATCH_FCVT_WU_D | MASK_RM, MASK_FCVT_WU_D | MASK_RM, match_opcode, 0 },
++{"fcvt.wu.d", "D",   "d,S,m",  MATCH_FCVT_WU_D, MASK_FCVT_WU_D, match_opcode, 0 },
++{"fcvt.d.w",  "D",   "D,s",  MATCH_FCVT_D_W, MASK_FCVT_D_W | MASK_RM, match_opcode, 0 },
++{"fcvt.d.wu", "D",   "D,s",  MATCH_FCVT_D_WU, MASK_FCVT_D_WU | MASK_RM, match_opcode, 0 },
++{"fcvt.d.s",  "D",   "D,S",  MATCH_FCVT_D_S, MASK_FCVT_D_S | MASK_RM, match_opcode, 0 },
++{"fcvt.s.d",  "D",   "D,S",  MATCH_FCVT_S_D | MASK_RM, MASK_FCVT_S_D | MASK_RM, match_opcode, 0 },
++{"fcvt.s.d",  "D",   "D,S,m",  MATCH_FCVT_S_D, MASK_FCVT_S_D, match_opcode, 0 },
++{"fclass.d",  "D",   "d,S",  MATCH_FCLASS_D, MASK_FCLASS_D, match_opcode, 0 },
++{"feq.d",     "D",   "d,S,T",    MATCH_FEQ_D, MASK_FEQ_D, match_opcode, 0 },
++{"flt.d",     "D",   "d,S,T",    MATCH_FLT_D, MASK_FLT_D, match_opcode, 0 },
++{"fle.d",     "D",   "d,S,T",    MATCH_FLE_D, MASK_FLE_D, match_opcode, 0 },
++{"fgt.d",     "D",   "d,T,S",    MATCH_FLT_D, MASK_FLT_D, match_opcode, 0 },
++{"fge.d",     "D",   "d,T,S",    MATCH_FLE_D, MASK_FLE_D, match_opcode, 0 },
++{"fmv.x.d",   "64D", "d,S",  MATCH_FMV_X_D, MASK_FMV_X_D, match_opcode, 0 },
++{"fmv.d.x",   "64D", "D,s",  MATCH_FMV_D_X, MASK_FMV_D_X, match_opcode, 0 },
++{"fcvt.l.d",  "64D", "d,S",  MATCH_FCVT_L_D | MASK_RM, MASK_FCVT_L_D | MASK_RM, match_opcode, 0 },
++{"fcvt.l.d",  "64D", "d,S,m",  MATCH_FCVT_L_D, MASK_FCVT_L_D, match_opcode, 0 },
++{"fcvt.lu.d", "64D", "d,S",  MATCH_FCVT_LU_D | MASK_RM, MASK_FCVT_LU_D | MASK_RM, match_opcode, 0 },
++{"fcvt.lu.d", "64D", "d,S,m",  MATCH_FCVT_LU_D, MASK_FCVT_LU_D, match_opcode, 0 },
++{"fcvt.d.l",  "64D", "D,s",  MATCH_FCVT_D_L | MASK_RM, MASK_FCVT_D_L | MASK_RM, match_opcode, 0 },
++{"fcvt.d.l",  "64D", "D,s,m",  MATCH_FCVT_D_L, MASK_FCVT_D_L, match_opcode, 0 },
++{"fcvt.d.lu", "64D", "D,s",  MATCH_FCVT_D_LU | MASK_RM, MASK_FCVT_D_L | MASK_RM, match_opcode, 0 },
++{"fcvt.d.lu", "64D", "D,s,m",  MATCH_FCVT_D_LU, MASK_FCVT_D_LU, match_opcode, 0 },
++
++/* Compressed instructions */
++{"c.ebreak",  "C",   "",  MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, 0 },
++{"c.jr",      "C",   "d",  MATCH_C_JR, MASK_C_JR, match_rd_nonzero, 0 },
++{"c.jalr",    "C",   "d",  MATCH_C_JALR, MASK_C_JALR, match_rd_nonzero, 0 },
++{"c.j",       "C",   "Ca",  MATCH_C_J, MASK_C_J, match_opcode, 0 },
++{"c.jal",     "32C", "Ca",  MATCH_C_JAL, MASK_C_JAL, match_opcode, 0 },
++{"c.beqz",    "C",   "Cs,Cp",  MATCH_C_BEQZ, MASK_C_BEQZ, match_opcode, 0 },
++{"c.bnez",    "C",   "Cs,Cp",  MATCH_C_BNEZ, MASK_C_BNEZ, match_opcode, 0 },
++{"c.lwsp",    "C",   "d,Cm(Cc)",  MATCH_C_LWSP, MASK_C_LWSP, match_rd_nonzero, 0 },
++{"c.lw",      "C",   "Ct,Ck(Cs)",  MATCH_C_LW, MASK_C_LW, match_opcode, 0 },
++{"c.swsp",    "C",   "CV,CM(Cc)",  MATCH_C_SWSP, MASK_C_SWSP, match_opcode, 0 },
++{"c.sw",      "C",   "Ct,Ck(Cs)",  MATCH_C_SW, MASK_C_SW, match_opcode, 0 },
++{"c.nop",     "C",   "",  MATCH_C_ADDI, 0xffff, match_opcode, 0 },
++{"c.mv",      "C",   "d,CV",  MATCH_C_MV, MASK_C_MV, match_c_add, 0 },
++{"c.lui",     "C",   "d,Cu",  MATCH_C_LUI, MASK_C_LUI, match_c_lui, 0 },
++{"c.li",      "C",   "d,Cj",  MATCH_C_LI, MASK_C_LI, match_rd_nonzero, 0 },
++{"c.addi4spn","C",   "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_opcode, 0 },
++{"c.addi16sp","C",   "Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_opcode, 0 },
++{"c.addi",    "C",   "d,Cj",  MATCH_C_ADDI, MASK_C_ADDI, match_rd_nonzero, 0 },
++{"c.add",     "C",   "d,CV",  MATCH_C_ADD, MASK_C_ADD, match_c_add, 0 },
++{"c.sub",     "C",   "Cs,Ct",  MATCH_C_SUB, MASK_C_SUB, match_opcode, 0 },
++{"c.and",     "C",   "Cs,Ct",  MATCH_C_AND, MASK_C_AND, match_opcode, 0 },
++{"c.or",      "C",   "Cs,Ct",  MATCH_C_OR, MASK_C_OR, match_opcode, 0 },
++{"c.xor",     "C",   "Cs,Ct",  MATCH_C_XOR, MASK_C_XOR, match_opcode, 0 },
++{"c.slli",    "C",   "d,C>",  MATCH_C_SLLI, MASK_C_SLLI, match_rd_nonzero, 0 },
++{"c.srli",    "C",   "Cs,C>",  MATCH_C_SRLI, MASK_C_SRLI, match_opcode, 0 },
++{"c.srai",    "C",   "Cs,C>",  MATCH_C_SRAI, MASK_C_SRAI, match_opcode, 0 },
++{"c.andi",    "C",   "Cs,Cj",  MATCH_C_ANDI, MASK_C_ANDI, match_opcode, 0 },
++{"c.addiw",   "64C", "d,Cj",  MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, 0 },
++{"c.addw",    "64C", "Cs,Ct",  MATCH_C_ADDW, MASK_C_ADDW, match_opcode, 0 },
++{"c.subw",    "64C", "Cs,Ct",  MATCH_C_SUBW, MASK_C_SUBW, match_opcode, 0 },
++{"c.ldsp",    "64C", "d,Cn(Cc)",  MATCH_C_LDSP, MASK_C_LDSP, match_rd_nonzero, 0 },
++{"c.ld",      "64C", "Ct,Cl(Cs)",  MATCH_C_LD, MASK_C_LD, match_opcode, 0 },
++{"c.sdsp",    "64C", "CV,CN(Cc)",  MATCH_C_SDSP, MASK_C_SDSP, match_opcode, 0 },
++{"c.sd",      "64C", "Ct,Cl(Cs)",  MATCH_C_SD, MASK_C_SD, match_opcode, 0 },
++{"c.fldsp",   "C",   "D,Cn(Cc)",  MATCH_C_FLDSP, MASK_C_FLDSP, match_opcode, 0 },
++{"c.fld",     "C",   "CD,Cl(Cs)",  MATCH_C_FLD, MASK_C_FLD, match_opcode, 0 },
++{"c.fsdsp",   "C",   "CT,CN(Cc)",  MATCH_C_FSDSP, MASK_C_FSDSP, match_opcode, 0 },
++{"c.fsd",     "C",   "CD,Cl(Cs)",  MATCH_C_FSD, MASK_C_FSD, match_opcode, 0 },
++{"c.flwsp",   "32C", "D,Cm(Cc)",  MATCH_C_FLWSP, MASK_C_FLWSP, match_opcode, 0 },
++{"c.flw",     "32C", "CD,Ck(Cs)",  MATCH_C_FLW, MASK_C_FLW, match_opcode, 0 },
++{"c.fswsp",   "32C", "CT,CM(Cc)",  MATCH_C_FSWSP, MASK_C_FSWSP, match_opcode, 0 },
++{"c.fsw",     "32C", "CD,Ck(Cs)",  MATCH_C_FSW, MASK_C_FSW, match_opcode, 0 },
++
++/* Supervisor instructions */
++{"csrr",      "I",   "d,E",  MATCH_CSRRS, MASK_CSRRS | MASK_RS1, match_opcode, 0 },
++{"csrwi",     "I",   "E,Z",  MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, 0 },
++{"csrw",      "I",   "E,s",  MATCH_CSRRW, MASK_CSRRW | MASK_RD, match_opcode, 0 },
++{"csrw",      "I",   "E,Z",  MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, 0 },
++{"csrsi",     "I",   "E,Z",  MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, 0 },
++{"csrs",      "I",   "E,s",  MATCH_CSRRS, MASK_CSRRS | MASK_RD, match_opcode, 0 },
++{"csrs",      "I",   "E,Z",  MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, 0 },
++{"csrci",     "I",   "E,Z",  MATCH_CSRRCI, MASK_CSRRCI | MASK_RD, match_opcode, 0 },
++{"csrc",      "I",   "E,s",  MATCH_CSRRC, MASK_CSRRC | MASK_RD, match_opcode, 0 },
++{"csrc",      "I",   "E,Z",  MATCH_CSRRCI, MASK_CSRRCI | MASK_RD, match_opcode, 0 },
++{"csrrw",     "I",   "d,E,s",  MATCH_CSRRW, MASK_CSRRW, match_opcode, 0 },
++{"csrrw",     "I",   "d,E,Z",  MATCH_CSRRWI, MASK_CSRRWI, match_opcode, 0 },
++{"csrrs",     "I",   "d,E,s",  MATCH_CSRRS, MASK_CSRRS, match_opcode, 0 },
++{"csrrs",     "I",   "d,E,Z",  MATCH_CSRRSI, MASK_CSRRSI, match_opcode, 0 },
++{"csrrc",     "I",   "d,E,s",  MATCH_CSRRC, MASK_CSRRC, match_opcode, 0 },
++{"csrrc",     "I",   "d,E,Z",  MATCH_CSRRCI, MASK_CSRRCI, match_opcode, 0 },
++{"csrrwi",    "I",   "d,E,Z",  MATCH_CSRRWI, MASK_CSRRWI, match_opcode, 0 },
++{"csrrsi",    "I",   "d,E,Z",  MATCH_CSRRSI, MASK_CSRRSI, match_opcode, 0 },
++{"csrrci",    "I",   "d,E,Z",  MATCH_CSRRCI, MASK_CSRRCI, match_opcode, 0 },
++{"uret",      "I",   "",     MATCH_URET, MASK_URET, match_opcode, 0 },
++{"sret",      "I",   "",     MATCH_SRET, MASK_SRET, match_opcode, 0 },
++{"hret",      "I",   "",     MATCH_HRET, MASK_HRET, match_opcode, 0 },
++{"mret",      "I",   "",     MATCH_MRET, MASK_MRET, match_opcode, 0 },
++{"dret",      "I",   "",     MATCH_DRET, MASK_DRET, match_opcode, 0 },
++{"sfence.vm", "I",   "",     MATCH_SFENCE_VM, MASK_SFENCE_VM | MASK_RS1, match_opcode, 0 },
++{"sfence.vm", "I",   "s",    MATCH_SFENCE_VM, MASK_SFENCE_VM, match_opcode, 0 },
++{"wfi",       "I",   "",     MATCH_WFI, MASK_WFI, match_opcode, 0 },
++
++/* Rocket Custom Coprocessor extension */
++{"custom0",   "Xcustom", "d,s,t,^j", MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2, match_opcode, 0},
++{"custom0",   "Xcustom", "d,s,^t,^j", MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1, match_opcode, 0},
++{"custom0",   "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD, match_opcode, 0},
++{"custom0",   "Xcustom", "^d,s,t,^j", MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2, match_opcode, 0},
++{"custom0",   "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1, match_opcode, 0},
++{"custom0",   "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM0, MASK_CUSTOM0, match_opcode, 0},
++{"custom1",   "Xcustom", "d,s,t,^j", MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2, match_opcode, 0},
++{"custom1",   "Xcustom", "d,s,^t,^j", MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1, match_opcode, 0},
++{"custom1",   "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD, match_opcode, 0},
++{"custom1",   "Xcustom", "^d,s,t,^j", MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2, match_opcode, 0},
++{"custom1",   "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1, match_opcode, 0},
++{"custom1",   "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM1, MASK_CUSTOM1, match_opcode, 0},
++{"custom2",   "Xcustom", "d,s,t,^j", MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2, match_opcode, 0},
++{"custom2",   "Xcustom", "d,s,^t,^j", MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1, match_opcode, 0},
++{"custom2",   "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD, match_opcode, 0},
++{"custom2",   "Xcustom", "^d,s,t,^j", MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2, match_opcode, 0},
++{"custom2",   "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1, match_opcode, 0},
++{"custom2",   "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM2, MASK_CUSTOM2, match_opcode, 0},
++{"custom3",   "Xcustom", "d,s,t,^j", MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2, match_opcode, 0},
++{"custom3",   "Xcustom", "d,s,^t,^j", MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1, match_opcode, 0},
++{"custom3",   "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD, match_opcode, 0},
++{"custom3",   "Xcustom", "^d,s,t,^j", MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2, match_opcode, 0},
++{"custom3",   "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1, match_opcode, 0},
++{"custom3",   "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM3, MASK_CUSTOM3, match_opcode, 0},
++};
++
++#define RISCV_NUM_OPCODES \
++  ((sizeof riscv_builtin_opcodes) / (sizeof (riscv_builtin_opcodes[0])))
++const int bfd_riscv_num_builtin_opcodes = RISCV_NUM_OPCODES;
++
++/* Removed const from the following to allow for dynamic extensions to the
++   built-in instruction set.  */
++struct riscv_opcode *riscv_opcodes =
++  (struct riscv_opcode *) riscv_builtin_opcodes;
++int bfd_riscv_num_opcodes = RISCV_NUM_OPCODES;
++#undef RISCV_NUM_OPCODES
diff --git a/util/crossgcc/patches/gcc-5.3.0_elf_biarch.patch b/util/crossgcc/patches/gcc-5.3.0_elf_biarch.patch
deleted file mode 100644
index 574e151..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_elf_biarch.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-diff -urN gcc-4.9.2/gcc/config/i386/t-elf64 gcc-4.9.2/gcc/config/i386/t-elf64
---- gcc-4.9.2/gcc/config/i386/t-elf64	1969-12-31 16:00:00.000000000 -0800
-+++ gcc-5.3.0/gcc/config/i386/t-elf64	2015-06-17 11:20:08.032513005 -0700
-@@ -0,0 +1,38 @@
-+# Copyright (C) 2002-2014 Free Software Foundation, Inc.
-+#
-+# This file is part of GCC.
-+#
-+# GCC is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 3, or (at your option)
-+# any later version.
-+#
-+# GCC is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with GCC; see the file COPYING3.  If not see
-+# <http://www.gnu.org/licenses/>.
-+
-+# On Debian, Ubuntu and other derivative distributions, the 32bit libraries
-+# are found in /lib32 and /usr/lib32, /lib64 and /usr/lib64 are symlinks to
-+# /lib and /usr/lib, while other distributions install libraries into /lib64
-+# and /usr/lib64.  The LSB does not enforce the use of /lib64 and /usr/lib64,
-+# it doesn't tell anything about the 32bit libraries on those systems.  Set
-+# MULTILIB_OSDIRNAMES according to what is found on the target.
-+
-+# To support i386, x86-64 and x32 libraries, the directory structrue
-+# should be:
-+#
-+# 	/lib has i386 libraries.
-+# 	/lib64 has x86-64 libraries.
-+# 	/libx32 has x32 libraries.
-+#
-+comma=,
-+MULTILIB_OPTIONS    = $(subst $(comma),/,$(TM_MULTILIB_CONFIG))
-+MULTILIB_DIRNAMES   = $(patsubst m%, %, $(subst /, ,$(MULTILIB_OPTIONS)))
-+MULTILIB_OSDIRNAMES = m64=../lib64$(call if_multiarch,:x86_64-elf)
-+MULTILIB_OSDIRNAMES+= m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:i386-elf)
-+MULTILIB_OSDIRNAMES+= mx32=../libx32$(call if_multiarch,:x86_64-elf-x32)
-diff -urN gcc-4.9.2/gcc/config.gcc gcc-4.9.2/gcc/config.gcc
---- gcc-4.9.2/gcc/config.gcc	2015-06-17 11:20:57.841008182 -0700
-+++ gcc-5.3.0/gcc/config.gcc	2015-06-17 11:17:24.818890200 -0700
-@@ -1353,6 +1353,30 @@
- 	;;
- x86_64-*-elf*)
- 	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h"
-+	tmake_file="${tmake_file} i386/t-elf64"
-+	x86_multilibs="${with_multilib_list}"
-+	if test "$x86_multilibs" = "default"; then
-+		case ${with_abi} in
-+		x32 | mx32)
-+			x86_multilibs="mx32"
-+			;;
-+		*)
-+			x86_multilibs="m64,m32"
-+			;;
-+		esac
-+	fi
-+	x86_multilibs=`echo $x86_multilibs | sed -e 's/,/ /g'`
-+	for x86_multilib in ${x86_multilibs}; do
-+		case ${x86_multilib} in
-+		m32 | m64 | mx32)
-+			TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG},${x86_multilib}"
-+			;;
-+		*)
-+			echo "--with-multilib-list=${x86_with_multilib} not supported."
-+			exit 1
-+		esac
-+	done
-+	TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'`
- 	;;
- i[34567]86-*-rdos*)
-     tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/rdos.h"
---- gcc-5.3.0/gcc/config/i386/x86-64.h.orig	2015-08-20 17:17:34.555919593 +0200
-+++ gcc-5.3.0/gcc/config/i386/x86-64.h	2015-08-20 17:17:42.615908670 +0200
-@@ -49,7 +49,7 @@
- #define WCHAR_TYPE_SIZE 32
- 
- #undef ASM_SPEC
--#define ASM_SPEC "%{m32:--32} %{m64:--64} %{mx32:--x32}"
-+#define ASM_SPEC "%{m16|m32:--32} %{m64:--64} %{mx32:--x32}"
- 
- #undef ASM_OUTPUT_ALIGNED_BSS
- #define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
diff --git a/util/crossgcc/patches/gcc-5.3.0_gnat.patch b/util/crossgcc/patches/gcc-5.3.0_gnat.patch
deleted file mode 100644
index 167d118..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_gnat.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- gcc-5.3.0/gcc/ada/gcc-interface/Make-lang.in.bak	2015-08-24 16:23:25.004493665 +0200
-+++ gcc-5.3.0/gcc/ada/gcc-interface/Make-lang.in	2015-08-24 17:53:52.496636113 +0200
-@@ -45,7 +45,7 @@
- 

- 
- # Extra flags to pass to recursive makes.
--COMMON_ADAFLAGS= -gnatpg
-+COMMON_ADAFLAGS= -gnatpg -gnatwG
- ifeq ($(TREECHECKING),)
- CHECKING_ADAFLAGS=
- else
diff --git a/util/crossgcc/patches/gcc-5.3.0_libc_name_p.patch b/util/crossgcc/patches/gcc-5.3.0_libc_name_p.patch
deleted file mode 100644
index 98e1802..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_libc_name_p.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-diff -urp gcc-5.3.0.bak/gcc/cp/cfns.h gcc-5.3.0/gcc/cp/cfns.h
---- gcc-5.3.0.bak/gcc/cp/cfns.h	2015-01-05 13:33:28.000000000 +0100
-+++ gcc-5.3.0/gcc/cp/cfns.h	2016-12-04 01:33:47.568537831 +0100
-@@ -51,8 +51,12 @@ along with GCC; see the file COPYING3.
- __inline
- #endif
- static unsigned int hash (const char *, unsigned int);
-+static
- #ifdef __GNUC__
- __inline
-+#ifdef __GNUC_STDC_INLINE__
-+__attribute__ ((__gnu_inline__))
-+#endif
- #endif
- const char * libc_name_p (const char *, unsigned int);
- /* maximum key range = 391, duplicates = 0 */
-@@ -122,6 +126,7 @@ hash (register const char *str, register
-   return hval + asso_values[(unsigned char)str[len - 1]];
- }
- 
-+static
- #ifdef __GNUC__
- __inline
- #ifdef __GNUC_STDC_INLINE__
diff --git a/util/crossgcc/patches/gcc-5.3.0_libgcc.patch b/util/crossgcc/patches/gcc-5.3.0_libgcc.patch
deleted file mode 100644
index fd4b254..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_libgcc.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-diff -urN gcc-5.2.0.orig/libgcc/config/t-hardfp gcc-5.2.0/libgcc/config/t-hardfp
---- gcc-5.2.0.orig/libgcc/config/t-hardfp	2015-01-05 04:33:28.000000000 -0800
-+++ gcc-5.3.0/libgcc/config/t-hardfp	2016-04-06 12:04:51.000000000 -0700
-@@ -59,21 +59,52 @@
- 
- hardfp_func_list := $(filter-out $(hardfp_exclusions),$(hardfp_func_list))
- 
-+HOST_OS ?= $(shell uname)
-+
- # Regexp for matching a floating-point mode.
-+ifeq ($(HOST_OS), Darwin)
-+hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /|/g')
-+else
-+ifeq ($(HOST_OS), FreeBSD)
-+hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /|/g')
-+else
- hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /\\|/g')
-+endif
-+endif
- 
- # Regexp for matching the end of a function name, after the last
- # floating-point mode.
-+ifeq ($(HOST_OS), Darwin)
-+hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /|/g')
-+else
-+ifeq ($(HOST_OS), FreeBSD)
-+hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /|/g')
-+else
- hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /\\|/g')
-+endif
-+endif
- 
- # Add -D options to define:
- #   FUNC: the function name (e.g. __addsf3)
- #   OP:   the function name without the leading __ and with the last
- #            floating-point mode removed (e.g. add3)
- #   TYPE: the last floating-point mode (e.g. sf)
-+
-+ifeq ($(HOST_OS), Darwin)
- hardfp_defines_for = \
-   $(shell echo $1 | \
--    sed 's/\(.*\)\($(hardfp_mode_regexp)\)\($(hardfp_suffix_regexp)\|\)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
-+    sed -E 's/(.*)($(hardfp_mode_regexp))($(hardfp_suffix_regexp)|.*)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
-+else
-+ifeq ($(HOST_OS), FreeBSD)
-+hardfp_defines_for = \
-+  $(shell echo $1 | \
-+    sed -r 's/(.*)($(hardfp_mode_regexp))($(hardfp_suffix_regexp)|.*)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
-+else
-+hardfp_defines_for = \
-+  $(shell echo $1 | \
-+    sed 's/\(.*\)\($(hardfp_mode_regexp)\)\($(hardfp_suffix_regexp)\|\)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
-+endif
-+endif
- 
- hardfp-o = $(patsubst %,%$(objext),$(hardfp_func_list))
- $(hardfp-o): %$(objext): $(srcdir)/config/hardfp.c
diff --git a/util/crossgcc/patches/gcc-5.3.0_nds32.patch b/util/crossgcc/patches/gcc-5.3.0_nds32.patch
deleted file mode 100644
index 34f2573..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_nds32.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-diff -urN gcc-5.3.0.orig/gcc/config/nds32/nds32.md gcc-5.3.0/gcc/config/nds32/nds32.md
---- gcc-5.3.0.orig/gcc/config/nds32/nds32.md	2015-01-15 22:45:09.000000000 -0800
-+++ gcc-5.3.0/gcc/config/nds32/nds32.md	2016-04-14 22:09:09.000000000 -0700
-@@ -2289,11 +2289,11 @@
-   emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2],
- 				  operands[4]));
- 
--  operands[5] = gen_reg_rtx (SImode);
-+  rtx tmp = gen_reg_rtx (SImode);
-   /* Step C, D, E, and F, using another temporary register operands[5].  */
-   emit_jump_insn (gen_casesi_internal (operands[0],
- 				       operands[3],
--				       operands[5]));
-+				       tmp));
-   DONE;
- })
- 
diff --git a/util/crossgcc/patches/gcc-5.3.0_riscv.patch b/util/crossgcc/patches/gcc-5.3.0_riscv.patch
deleted file mode 100644
index 7e2e828..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_riscv.patch
+++ /dev/null
@@ -1,10122 +0,0 @@
---- original-gcc/gcc/config.gcc
-+++ gcc-5.3.0/gcc/config.gcc
-@@ -439,6 +439,10 @@ powerpc*-*-*)
- 	esac
- 	extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
- 	;;
-+riscv*)
-+	cpu_type=riscv
-+	need_64bit_hwint=yes
-+	;;
- rs6000*-*-*)
- 	extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
- 	;;
-@@ -1982,6 +1986,34 @@ microblaze*-*-elf)
- 	cxx_target_objs="${cxx_target_objs} microblaze-c.o"
- 	tmake_file="${tmake_file} microblaze/t-microblaze"
-         ;;
-+riscv32*-*-linux*)
-+	tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h riscv/default-32.h ${tm_file} riscv/linux.h riscv/linux64.h"
-+	tmake_file="${tmake_file} riscv/t-linux64"
-+	gnu_ld=yes
-+	gas=yes
-+	gcc_cv_initfini_array=yes
-+	;;
-+riscv*-*-linux*)
-+	tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h riscv/linux64.h"
-+	tmake_file="${tmake_file} riscv/t-linux64"
-+	gnu_ld=yes
-+	gas=yes
-+	gcc_cv_initfini_array=yes
-+	;;
-+riscv32*-*-elf*)
-+	tm_file="elfos.h newlib-stdint.h riscv/default-32.h ${tm_file} riscv/elf.h"
-+	tmake_file="${tmake_file} riscv/t-elf"
-+	gnu_ld=yes
-+	gas=yes
-+	gcc_cv_initfini_array=yes
-+	;;
-+riscv*-*-elf*)
-+	tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
-+	tmake_file="${tmake_file} riscv/t-elf"
-+	gnu_ld=yes
-+	gas=yes
-+	gcc_cv_initfini_array=yes
-+	;;
- mips*-*-netbsd*)			# NetBSD/mips, either endian.
- 	target_cpu_default="MASK_ABICALLS"
- 	tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
-@@ -3866,6 +3898,31 @@ case "${target}" in
- 		done
- 		;;
- 
-+	riscv*-*-*)
-+		supported_defaults="abi arch arch_32 arch_64 float tune tune_32 tune_64"
-+
-+		case ${with_float} in
-+		"" | soft | hard)
-+			# OK
-+			;;
-+		*)
-+			echo "Unknown floating point type used in --with-float=$with_float" 1>&2
-+			exit 1
-+			;;
-+		esac
-+
-+		case ${with_abi} in
-+		"" | 32 | 64)
-+			# OK
-+			;;
-+		*)
-+			echo "Unknown ABI used in --with-abi=$with_abi" 1>&2
-+			exit 1
-+			;;
-+		esac
-+
-+    ;;
-+
- 	mips*-*-*)
- 		supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci"
- 
---- original-gcc/gcc/configure
-+++ gcc-5.3.0/gcc/configure
-@@ -23717,6 +23717,25 @@ x3:	.space 4
- 	tls_first_minor=14
- 	tls_as_opt="-a32 --fatal-warnings"
- 	;;
-+  riscv*-*-*)
-+    conftest_s='
-+	.section .tdata,"awT", at progbits
-+x:
-+	.word 2
-+	.text
-+	la.tls.gd a0,x
-+	la.tls.ie a1,x
-+	lui a0,%tls_ie_pcrel_hi(x)
-+	lw a0,%pcrel_lo(x)(a0)
-+	add a0,a0,tp
-+	lw a0,0(a0)
-+	lui a0,%tprel_hi(x)
-+	add a0,a0,tp,%tprel_add(x)
-+	lw a0,%tprel_lo(x)(a0)'
-+	tls_first_major=2
-+	tls_first_minor=21
-+	tls_as_opt='-m32 --fatal-warnings'
-+	;;
-   s390-*-*)
-     conftest_s='
- 	.section ".tdata","awT", at progbits
---- original-gcc/gcc/configure.ac
-+++ gcc-5.3.0/gcc/configure.ac
-@@ -3263,6 +3263,25 @@ x3:	.space 4
- 	tls_first_minor=14
- 	tls_as_opt="-a32 --fatal-warnings"
- 	;;
-+  riscv*-*-*)
-+    conftest_s='
-+	.section .tdata,"awT", at progbits
-+x:
-+	.word 2
-+	.text
-+	la.tls.gd a0,x
-+	la.tls.ie a1,x
-+	lui a0,%tls_ie_pcrel_hi(x)
-+	lw a0,%pcrel_lo(x)(a0)
-+	add a0,a0,tp
-+	lw a0,0(a0)
-+	lui a0,%tprel_hi(x)
-+	add a0,a0,tp,%tprel_add(x)
-+	lw a0,%tprel_lo(x)(a0)'
-+	tls_first_major=2
-+	tls_first_minor=21
-+	tls_as_opt='-m32 --fatal-warnings'
-+	;;
-   s390-*-*)
-     conftest_s='
- 	.section ".tdata","awT", at progbits
---- original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
-+++ gcc-5.3.0/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
-@@ -6,6 +6,9 @@
- #elif defined (__powerpc__) || defined (__PPC__) || defined (__ppc__) || defined (__POWERPC__) || defined (__ppc)
-   /* On PPC division by zero does not trap.  */
- # define DO_TEST 0
-+#elif defined (__riscv__)
-+  /* On RISC-V division by zero does not trap.  */
-+# define DO_TEST 0
- #elif defined (__SPU__)
-   /* On SPU division by zero does not trap.  */
- # define DO_TEST 0
---- original-gcc/gcc/testsuite/gcc.dg/20020312-2.c
-+++ gcc-5.3.0/gcc/testsuite/gcc.dg/20020312-2.c
-@@ -66,6 +66,8 @@ extern void abort (void);
- # else
- #  define PIC_REG  "30"
- # endif
-+#elif defined(__riscv__)
-+/* No pic register.  */
- #elif defined(__RX__)
- /* No pic register.  */
- #elif defined(__s390__)
---- original-gcc/gcc/testsuite/gcc.dg/20040813-1.c
-+++ gcc-5.3.0/gcc/testsuite/gcc.dg/20040813-1.c
-@@ -2,7 +2,7 @@
- /* Contributed by Devang Patel  <dpatel at apple.com>  */
- 
- /* { dg-do compile } */
--/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* nios2-*-* *-*-vxworks* nvptx-*-* } { "*" } { "" } } */
-+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* riscv*-*-* tile*-*-* nios2-*-* *-*-vxworks* nvptx-*-* } { "*" } { "" } } */
- /* { dg-options "-gstabs" } */
- 
- int
---- original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c
-+++ gcc-5.3.0/gcc/testsuite/gcc.dg/stack-usage-1.c
-@@ -61,6 +61,8 @@
- #  else
- #    define SIZE 240
- #  endif
-+#elif defined (__riscv__)
-+#  define SIZE 240
- #elif defined (__AVR__)
- #  define SIZE 254
- #elif defined (__s390x__)
---- original-gcc/libatomic/configure.tgt
-+++ gcc-5.3.0/libatomic/configure.tgt
-@@ -33,6 +33,7 @@ case "${target_cpu}" in
- 	ARCH=alpha
- 	;;
-   rs6000 | powerpc*)	ARCH=powerpc ;;
-+  riscv*)		ARCH=riscv ;;
-   sh*)			ARCH=sh ;;
- 
-   arm*)
---- original-gcc/libgcc/config.host
-+++ gcc-5.3.0/libgcc/config.host
-@@ -167,6 +167,9 @@ powerpc*-*-*)
- 	;;
- rs6000*-*-*)
- 	;;
-+riscv*)
-+	cpu_type=riscv
-+	;;
- sparc64*-*-*)
- 	cpu_type=sparc
- 	;;
-@@ -1064,6 +1067,14 @@ powerpcle-*-eabi*)
- 	tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
- 	extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
- 	;;
-+riscv*-*-linux*)
-+	tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-tpbit riscv/t-elf riscv/t-elf${host_address}"
-+	extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
-+	;;
-+riscv*-*-*)
-+	tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-elf riscv/t-elf${host_address}"
-+	extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
-+	;;
- rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
- 	md_unwind_header=rs6000/aix-unwind.h
- 	tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
---- original-gcc/libsanitizer/asan/asan_linux.cc
-+++ gcc-5.3.0/libsanitizer/asan/asan_linux.cc
-@@ -213,6 +213,11 @@ void GetPcSpBp(void *context, uptr *pc,
-   *pc = ucontext->uc_mcontext.gregs[31];
-   *bp = ucontext->uc_mcontext.gregs[30];
-   *sp = ucontext->uc_mcontext.gregs[29];
-+# elif defined(__riscv__)
-+  ucontext_t *ucontext = (ucontext_t*)context;
-+  *pc = ucontext->uc_mcontext.gregs[REG_PC];
-+  *bp = ucontext->uc_mcontext.gregs[REG_S0];
-+  *sp = ucontext->uc_mcontext.gregs[REG_SP];
- #else
- # error "Unsupported arch"
- #endif
---- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
-+++ gcc-5.3.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
-@@ -61,7 +61,8 @@ namespace __sanitizer {
- }  // namespace __sanitizer
- 
- #if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
--                            && !defined(__mips__) && !defined(__sparc__)
-+                            && !defined(__mips__) && !defined(__sparc__)\
-+                            && !defined(__riscv__)
- COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
- #endif
- 
---- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
-+++ gcc-5.3.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
-@@ -72,6 +72,10 @@ namespace __sanitizer {
-   const unsigned struct_kernel_stat_sz = 144;
-   #endif
-   const unsigned struct_kernel_stat64_sz = 104;
-+#elif defined(__riscv__)
-+  const unsigned struct___old_kernel_stat_sz = 0;
-+  const unsigned struct_kernel_stat_sz = 128;
-+  const unsigned struct_kernel_stat64_sz = 128;
- #elif defined(__sparc__) && defined(__arch64__)
-   const unsigned struct___old_kernel_stat_sz = 0;
-   const unsigned struct_kernel_stat_sz = 104;
-@@ -511,7 +515,7 @@ namespace __sanitizer {
-   typedef long __sanitizer___kernel_off_t;
- #endif
- 
--#if defined(__powerpc__) || defined(__mips__)
-+#if defined(__powerpc__) || defined(__mips__) || defined(__riscv__)
-   typedef unsigned int __sanitizer___kernel_old_uid_t;
-   typedef unsigned int __sanitizer___kernel_old_gid_t;
- #else
-diff -ru gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform.h gcc-5.1.0/libsanitizer/sanitizer_common/sanitizer_platform.h
---- gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform.h	2015-05-13 19:36:27.061421043 -0700
-+++ gcc-5.3.0/libsanitizer/sanitizer_common/sanitizer_platform.h	2015-05-13 19:44:19.274355577 -0700
-@@ -98,9 +98,9 @@
- 
- // The AArch64 linux port uses the canonical syscall set as mandated by
- // the upstream linux community for all new ports. Other ports may still
--// use legacy syscalls.
-+// use legacy syscalls.  The RISC-V port also does this.
- #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
--# if defined(__aarch64__) && SANITIZER_LINUX
-+# if (defined(__aarch64__) || defined(__riscv__)) && SANITIZER_LINUX
- # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
- # else
- # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
-diff -ru gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h gcc-5.1.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
---- gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h	2015-05-13 19:36:27.061421043 -0700
-+++ gcc-5.3.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h	2015-05-13 19:39:13.515487834 -0700
-@@ -73,7 +73,6 @@
-   #endif
-   const unsigned struct_kernel_stat64_sz = 104;
- #elif defined(__riscv__)
--  const unsigned struct___old_kernel_stat_sz = 0;
-   const unsigned struct_kernel_stat_sz = 128;
-   const unsigned struct_kernel_stat64_sz = 128;
- #elif defined(__sparc__) && defined(__arch64__)
-@@ -104,7 +103,7 @@
- 
- #if SANITIZER_LINUX || SANITIZER_FREEBSD
- 
--#if defined(__powerpc64__)
-+#if defined(__powerpc64__) || defined(__riscv__)
-   const unsigned struct___old_kernel_stat_sz = 0;
- #elif !defined(__sparc__)
-   const unsigned struct___old_kernel_stat_sz = 32;
---- original-gcc/libstdc++-v3/configure
-+++ gcc-5.3.0/libstdc++-v3/configure
-@@ -16646,7 +16646,7 @@ ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
-   # Long term, -std=c++0x could be even better, could manage to explicitly
-   # request C99 facilities to the underlying C headers.
-   ac_save_CXXFLAGS="$CXXFLAGS"
--  CXXFLAGS="$CXXFLAGS -std=c++98"
-+  CXXFLAGS="$CXXFLAGS -std=gnu++98"
-   ac_save_LIBS="$LIBS"
-   ac_save_gcc_no_link="$gcc_no_link"
- 
-@@ -17268,9 +17268,11 @@ rm -f core conftest.err conftest.$ac_obj
- $as_echo "$glibcxx_cv_c99_wchar" >&6; }
-   fi
- 
-+  # For newlib, don't check complex since missing c99 functions, but
-+  #   rest of c99 stuff is there so don't loose it
-   # Option parsed, now set things appropriately.
-   if test x"$glibcxx_cv_c99_math" = x"no" ||
--     test x"$glibcxx_cv_c99_complex" = x"no" ||
-+     # test x"$glibcxx_cv_c99_complex" = x"no" ||
-      test x"$glibcxx_cv_c99_stdio" = x"no" ||
-      test x"$glibcxx_cv_c99_stdlib" = x"no" ||
-      test x"$glibcxx_cv_c99_wchar" = x"no"; then
-diff -urN empty/gcc/common/config/riscv/riscv-common.c gcc-5.3.0/gcc/common/config/riscv/riscv-common.c
---- empty/gcc/common/config/riscv/riscv-common.c	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/common/config/riscv/riscv-common.c	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,139 @@
-+/* Common hooks for RISC-V.
-+   Copyright (C) 1989-2014 Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+#include "config.h"
-+#include "system.h"
-+#include "coretypes.h"
-+#include "tm.h"
-+#include "common/common-target.h"
-+#include "common/common-target-def.h"
-+#include "opts.h"
-+#include "flags.h"
-+#include "errors.h"
-+
-+/* Parse a RISC-V ISA string into an option mask.  */
-+
-+static void
-+riscv_parse_arch_string (const char *isa, int *flags)
-+{
-+  const char *p = isa;
-+
-+  if (strncmp (p, "RV32", 4) == 0)
-+    *flags |= MASK_32BIT, p += 4;
-+  else if (strncmp (p, "RV64", 4) == 0)
-+    *flags &= ~MASK_32BIT, p += 4;
-+
-+  if (*p++ != 'I')
-+    {
-+      error ("-march=%s: ISA strings must begin with I, RV32I, or RV64I", isa);
-+      return;
-+    }
-+
-+  *flags &= ~MASK_MULDIV;
-+  if (*p == 'M')
-+    *flags |= MASK_MULDIV, p++;
-+
-+  *flags &= ~MASK_ATOMIC;
-+  if (*p == 'A')
-+    *flags |= MASK_ATOMIC, p++;
-+
-+  *flags |= MASK_SOFT_FLOAT_ABI;
-+  if (*p == 'F')
-+    *flags &= ~MASK_SOFT_FLOAT_ABI, p++;
-+
-+  if (*p == 'D')
-+    {
-+      p++;
-+      if (!TARGET_HARD_FLOAT)
-+	{
-+	  error ("-march=%s: the D extension requires the F extension", isa);
-+	  return;
-+	}
-+    }
-+  else if (TARGET_HARD_FLOAT)
-+    {
-+      error ("-march=%s: single-precision-only is not yet supported", isa);
-+      return;
-+    }
-+
-+  *flags &= ~MASK_RVC;
-+  if (*p == 'C')
-+    *flags |= MASK_RVC, p++;
-+
-+  /* FIXME: For now we just stop parsing when faced with a
-+     non-standard RISC-V ISA extension, partially becauses of a
-+     problem with the naming scheme. */
-+  if (*p == 'X')
-+    return;
-+
-+  if (*p)
-+    {
-+      error ("-march=%s: unsupported ISA substring %s", isa, p);
-+      return;
-+    }
-+}
-+
-+static int
-+riscv_flags_from_arch_string (const char *isa)
-+{
-+  int flags = 0;
-+  riscv_parse_arch_string (isa, &flags);
-+  return flags;
-+}
-+
-+/* Implement TARGET_HANDLE_OPTION.  */
-+
-+static bool
-+riscv_handle_option (struct gcc_options *opts,
-+		     struct gcc_options *opts_set ATTRIBUTE_UNUSED,
-+		     const struct cl_decoded_option *decoded,
-+		     location_t loc ATTRIBUTE_UNUSED)
-+{
-+  switch (decoded->opt_index)
-+    {
-+    case OPT_march_:
-+      riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
-+      return true;
-+
-+    default:
-+      return true;
-+    }
-+}
-+
-+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE.  */
-+static const struct default_options riscv_option_optimization_table[] =
-+  {
-+    { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
-+    { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
-+    { OPT_LEVELS_NONE, 0, NULL, 0 }
-+  };
-+
-+#undef TARGET_OPTION_OPTIMIZATION_TABLE
-+#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
-+
-+#undef TARGET_DEFAULT_TARGET_FLAGS
-+#define TARGET_DEFAULT_TARGET_FLAGS				\
-+  (TARGET_DEFAULT						\
-+   | riscv_flags_from_arch_string (RISCV_ARCH_STRING_DEFAULT)	\
-+   | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT))
-+
-+#undef TARGET_HANDLE_OPTION
-+#define TARGET_HANDLE_OPTION riscv_handle_option
-+
-+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
-diff -urN empty/gcc/config/riscv/constraints.md gcc-5.3.0/gcc/config/riscv/constraints.md
---- empty/gcc/config/riscv/constraints.md	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/constraints.md	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,93 @@
-+;; Constraint definitions for RISC-V target.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+;;
-+;; This file is part of GCC.
-+;;
-+;; GCC is free software; you can redistribute it and/or modify
-+;; it under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+;;
-+;; GCC is distributed in the hope that it will be useful,
-+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+;; GNU General Public License for more details.
-+;;
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3.  If not see
-+;; <http://www.gnu.org/licenses/>.
-+
-+;; Register constraints
-+
-+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
-+  "A floating-point register (if available).")
-+
-+(define_register_constraint "b" "ALL_REGS"
-+  "@internal")
-+
-+(define_register_constraint "j" "T_REGS"
-+  "@internal")
-+
-+(define_register_constraint "l" "JALR_REGS"
-+  "@internal")
-+
-+;; Integer constraints
-+
-+(define_constraint "Z"
-+  "@internal"
-+  (and (match_code "const_int")
-+       (match_test "1")))
-+
-+(define_constraint "I"
-+  "An I-type 12-bit signed immediate."
-+  (and (match_code "const_int")
-+       (match_test "SMALL_OPERAND (ival)")))
-+
-+(define_constraint "J"
-+  "Integer zero."
-+  (and (match_code "const_int")
-+       (match_test "ival == 0")))
-+
-+;; Floating-point constraints
-+
-+(define_constraint "G"
-+  "Floating-point zero."
-+  (and (match_code "const_double")
-+       (match_test "op == CONST0_RTX (mode)")))
-+
-+;; General constraints
-+
-+(define_constraint "Q"
-+  "@internal"
-+  (match_operand 0 "const_arith_operand"))
-+
-+(define_memory_constraint "A"
-+  "An address that is held in a general-purpose register."
-+  (and (match_code "mem")
-+       (match_test "GET_CODE(XEXP(op,0)) == REG")))
-+
-+(define_constraint "S"
-+  "@internal
-+   A constant call address."
-+  (and (match_operand 0 "call_insn_operand")
-+       (match_test "CONSTANT_P (op)")))
-+
-+(define_constraint "T"
-+  "@internal
-+   A constant @code{move_operand}."
-+  (and (match_operand 0 "move_operand")
-+       (match_test "CONSTANT_P (op)")))
-+
-+(define_memory_constraint "W"
-+  "@internal
-+   A memory address based on a member of @code{BASE_REG_CLASS}."
-+  (and (match_code "mem")
-+       (match_operand 0 "memory_operand")))
-+
-+(define_constraint "YG"
-+  "@internal
-+   A vector zero."
-+  (and (match_code "const_vector")
-+       (match_test "op == CONST0_RTX (mode)")))
-diff -urN empty/gcc/config/riscv/default-32.h gcc-5.3.0/gcc/config/riscv/default-32.h
---- empty/gcc/config/riscv/default-32.h	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/default-32.h	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,22 @@
-+/* Definitions of target machine for GCC, for RISC-V,
-+   defaulting to 32-bit code generation.
-+
-+   Copyright (C) 1999-2014 Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+#define TARGET_64BIT_DEFAULT 0
-diff -urN empty/gcc/config/riscv/elf.h gcc-5.3.0/gcc/config/riscv/elf.h
---- empty/gcc/config/riscv/elf.h	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/elf.h	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,31 @@
-+/* Target macros for riscv*-elf targets.
-+   Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010
-+   Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+/* Leave the linker script to choose the appropriate libraries.  */
-+#undef  LIB_SPEC
-+#define LIB_SPEC ""
-+
-+#undef  STARTFILE_SPEC
-+#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
-+
-+#undef  ENDFILE_SPEC
-+#define ENDFILE_SPEC "crtend%O%s"
-+
-+#define NO_IMPLICIT_EXTERN_C 1
-diff -urN empty/gcc/config/riscv/generic.md gcc-5.3.0/gcc/config/riscv/generic.md
---- empty/gcc/config/riscv/generic.md	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/generic.md	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,78 @@
-+;; Generic DFA-based pipeline description for RISC-V targets.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+
-+;; This file is part of GCC.
-+
-+;; GCC is free software; you can redistribute it and/or modify it
-+;; under the terms of the GNU General Public License as published
-+;; by the Free Software Foundation; either version 3, or (at your
-+;; option) any later version.
-+
-+;; GCC is distributed in the hope that it will be useful, but WITHOUT
-+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-+;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
-+;; License for more details.
-+
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3.  If not see
-+;; <http://www.gnu.org/licenses/>.
-+
-+
-+(define_automaton "pipe0")
-+(define_cpu_unit "alu" "pipe0")
-+(define_cpu_unit "imuldiv" "pipe0")
-+(define_cpu_unit "fdivsqrt" "pipe0")
-+
-+(define_insn_reservation "generic_alu" 1
-+  (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
-+  "alu")
-+
-+(define_insn_reservation "generic_load" 3
-+  (eq_attr "type" "load,fpload")
-+  "alu")
-+
-+(define_insn_reservation "generic_store" 1
-+  (eq_attr "type" "store,fpstore")
-+  "alu")
-+
-+(define_insn_reservation "generic_xfer" 3
-+  (eq_attr "type" "mfc,mtc,fcvt,fmove,fcmp")
-+  "alu")
-+
-+(define_insn_reservation "generic_branch" 1
-+  (eq_attr "type" "branch,jump,call")
-+  "alu")
-+
-+(define_insn_reservation "generic_imul" 10
-+  (eq_attr "type" "imul")
-+  "imuldiv*10")
-+
-+(define_insn_reservation "generic_idivsi" 34
-+  (and (eq_attr "type" "idiv")
-+       (eq_attr "mode" "SI"))
-+  "imuldiv*34")
-+
-+(define_insn_reservation "generic_idivdi" 66
-+  (and (eq_attr "type" "idiv")
-+       (eq_attr "mode" "DI"))
-+  "imuldiv*66")
-+
-+(define_insn_reservation "generic_fmul_single" 5
-+  (and (eq_attr "type" "fadd,fmul,fmadd")
-+       (eq_attr "mode" "SF"))
-+  "alu")
-+
-+(define_insn_reservation "generic_fmul_double" 7
-+  (and (eq_attr "type" "fadd,fmul,fmadd")
-+       (eq_attr "mode" "DF"))
-+  "alu")
-+
-+(define_insn_reservation "generic_fdiv" 20
-+  (eq_attr "type" "fdiv")
-+  "fdivsqrt*20")
-+
-+(define_insn_reservation "generic_fsqrt" 25
-+  (eq_attr "type" "fsqrt")
-+  "fdivsqrt*25")
-diff -urN empty/gcc/config/riscv/linux64.h gcc-5.3.0/gcc/config/riscv/linux64.h
---- empty/gcc/config/riscv/linux64.h	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/linux64.h	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,43 @@
-+/* Definitions for 64-bit RISC-V GNU/Linux systems with ELF format.
-+   Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
-+   Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+/* Force the default ABI flags onto the command line
-+   in order to make the other specs easier to write.  */
-+#undef LIB_SPEC
-+#define LIB_SPEC "\
-+%{pthread:-lpthread} \
-+%{shared:-lc} \
-+%{!shared: \
-+  %{profile:-lc_p} %{!profile:-lc}}"
-+
-+#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1"
-+#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1"
-+
-+#undef LINK_SPEC
-+#define LINK_SPEC "\
-+%{shared} \
-+  %{!shared: \
-+    %{!static: \
-+      %{rdynamic:-export-dynamic} \
-+      %{" OPT_ARCH64 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
-+      %{" OPT_ARCH32 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
-+    %{static:-static}} \
-+%{" OPT_ARCH64 ":-melf64lriscv} \
-+%{" OPT_ARCH32 ":-melf32lriscv}"
-diff -urN empty/gcc/config/riscv/linux.h gcc-5.3.0/gcc/config/riscv/linux.h
---- empty/gcc/config/riscv/linux.h	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/linux.h	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,60 @@
-+/* Definitions for RISC-V GNU/Linux systems with ELF format.
-+   Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
-+   2007, 2008, 2010, 2011 Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+#undef WCHAR_TYPE
-+#define WCHAR_TYPE "int"
-+
-+#undef WCHAR_TYPE_SIZE
-+#define WCHAR_TYPE_SIZE 32
-+
-+#define TARGET_OS_CPP_BUILTINS()				\
-+  do {								\
-+    GNU_USER_TARGET_OS_CPP_BUILTINS();				\
-+    /* The GNU C++ standard library requires this.  */		\
-+    if (c_dialect_cxx ())					\
-+      builtin_define ("_GNU_SOURCE");				\
-+  } while (0)
-+
-+#undef SUBTARGET_CPP_SPEC
-+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
-+
-+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
-+
-+/* Borrowed from sparc/linux.h */
-+#undef LINK_SPEC
-+#define LINK_SPEC \
-+  "%{shared:-shared} \
-+  %{!shared: \
-+    %{!static: \
-+      %{rdynamic:-export-dynamic} \
-+      -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
-+      %{static:-static}}"
-+
-+#undef LIB_SPEC
-+#define LIB_SPEC "\
-+%{pthread:-lpthread} \
-+%{shared:-lc} \
-+%{!shared: \
-+  %{profile:-lc_p} %{!profile:-lc}}"
-+
-+/* Similar to standard Linux, but adding -ffast-math support.  */
-+#undef  ENDFILE_SPEC
-+#define ENDFILE_SPEC \
-+   "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
-diff -urN empty/gcc/config/riscv/peephole.md gcc-5.3.0/gcc/config/riscv/peephole.md
---- empty/gcc/config/riscv/peephole.md	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/peephole.md	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,121 @@
-+;;........................
-+;; DI -> SI optimizations
-+;;........................
-+
-+;; Simplify (int)(a + 1), etc.
-+(define_peephole2
-+  [(set (match_operand:DI 0 "register_operand")
-+	(match_operator:DI 4 "modular_operator"
-+	  [(match_operand:DI 1 "register_operand")
-+	   (match_operand:DI 2 "arith_operand")]))
-+   (set (match_operand:SI 3 "register_operand")
-+	(truncate:SI (match_dup 0)))]
-+  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
-+   && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
-+  [(set (match_dup 3)
-+	  (truncate:SI
-+	     (match_op_dup:DI 4 
-+	       [(match_operand:DI 1 "register_operand")
-+		(match_operand:DI 2 "arith_operand")])))])
-+
-+;; Simplify (int)a + 1, etc.
-+(define_peephole2
-+  [(set (match_operand:SI 0 "register_operand")
-+	(truncate:SI (match_operand:DI 1 "register_operand")))
-+   (set (match_operand:SI 3 "register_operand")
-+	(match_operator:SI 4 "modular_operator"
-+	  [(match_dup 0)
-+	   (match_operand:SI 2 "arith_operand")]))]
-+  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
-+  [(set (match_dup 3)
-+	(match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
-+
-+;; Simplify -(int)a, etc.
-+(define_peephole2
-+  [(set (match_operand:SI 0 "register_operand")
-+	(truncate:SI (match_operand:DI 2 "register_operand")))
-+   (set (match_operand:SI 3 "register_operand")
-+	(match_operator:SI 4 "modular_operator"
-+	  [(match_operand:SI 1 "reg_or_0_operand")
-+	   (match_dup 0)]))]
-+  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
-+  [(set (match_dup 3)
-+	(match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
-+
-+;; Simplify (unsigned long)(unsigned int)a << const
-+(define_peephole2
-+  [(set (match_operand:DI 0 "register_operand")
-+	(ashift:DI (match_operand:DI 1 "register_operand")
-+		   (match_operand 2 "const_int_operand")))
-+   (set (match_operand:DI 3 "register_operand")
-+	(lshiftrt:DI (match_dup 0) (match_dup 2)))
-+   (set (match_operand:DI 4 "register_operand")
-+	(ashift:DI (match_dup 3) (match_operand 5 "const_int_operand")))]
-+  "TARGET_64BIT
-+   && INTVAL (operands[5]) < INTVAL (operands[2])
-+   && (REGNO (operands[3]) == REGNO (operands[4])
-+       || peep2_reg_dead_p (3, operands[3]))"
-+  [(set (match_dup 0)
-+	(ashift:DI (match_dup 1) (match_dup 2)))
-+   (set (match_dup 4)
-+	(lshiftrt:DI (match_dup 0) (match_operand 5)))]
-+{
-+  operands[5] = GEN_INT (INTVAL (operands[2]) - INTVAL (operands[5]));
-+})
-+
-+;; Simplify PIC loads to static variables.
-+;; These will go away once we figure out how to emit auipc discretely.
-+(define_insn "*local_pic_load<mode>"
-+  [(set (match_operand:ANYI 0 "register_operand" "=r")
-+	(mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
-+  "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
-+  "<load>\t%0,%1"
-+  [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_load<mode>"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+	(mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
-+   (clobber (match_scratch:DI 2 "=&r"))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
-+  "<load>\t%0,%1,%2"
-+  [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_load<mode>"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+	(mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
-+   (clobber (match_scratch:SI 2 "=&r"))]
-+  "TARGET_HARD_FLOAT && !TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
-+  "<load>\t%0,%1,%2"
-+  [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_loadu<mode>"
-+  [(set (match_operand:SUPERQI 0 "register_operand" "=r")
-+	(zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
-+  "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
-+  "<load>u\t%0,%1"
-+  [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_storedi<mode>"
-+  [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
-+	(match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
-+   (clobber (match_scratch:DI 2 "=&r"))]
-+  "TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
-+  "<store>\t%z1,%0,%2"
-+  [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_storesi<mode>"
-+  [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
-+	(match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
-+   (clobber (match_scratch:SI 2 "=&r"))]
-+  "!TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
-+  "<store>\t%z1,%0,%2"
-+  [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_storedi<mode>"
-+  [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
-+	(match_operand:ANYF 1 "register_operand" "f"))
-+   (clobber (match_scratch:DI 2 "=&r"))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
-+  "<store>\t%1,%0,%2"
-+  [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_storesi<mode>"
-+  [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
-+	(match_operand:ANYF 1 "register_operand" "f"))
-+   (clobber (match_scratch:SI 2 "=&r"))]
-+  "TARGET_HARD_FLOAT && !TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
-+  "<store>\t%1,%0,%2"
-+  [(set (attr "length") (const_int 8))])
-diff -urN empty/gcc/config/riscv/predicates.md gcc-5.3.0/gcc/config/riscv/predicates.md
---- empty/gcc/config/riscv/predicates.md	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/predicates.md	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,184 @@
-+;; Predicate description for RISC-V target.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+;;
-+;; This file is part of GCC.
-+;;
-+;; GCC is free software; you can redistribute it and/or modify
-+;; it under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+;;
-+;; GCC is distributed in the hope that it will be useful,
-+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+;; GNU General Public License for more details.
-+;;
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3.  If not see
-+;; <http://www.gnu.org/licenses/>.
-+
-+(define_predicate "const_arith_operand"
-+  (and (match_code "const_int")
-+       (match_test "SMALL_OPERAND (INTVAL (op))")))
-+
-+(define_predicate "arith_operand"
-+  (ior (match_operand 0 "const_arith_operand")
-+       (match_operand 0 "register_operand")))
-+
-+(define_predicate "sle_operand"
-+  (and (match_code "const_int")
-+       (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
-+
-+(define_predicate "sleu_operand"
-+  (and (match_operand 0 "sle_operand")
-+       (match_test "INTVAL (op) + 1 != 0")))
-+
-+(define_predicate "const_0_operand"
-+  (and (match_code "const_int,const_double,const_vector")
-+       (match_test "op == CONST0_RTX (GET_MODE (op))")))
-+
-+(define_predicate "reg_or_0_operand"
-+  (ior (match_operand 0 "const_0_operand")
-+       (match_operand 0 "register_operand")))
-+
-+(define_predicate "const_1_operand"
-+  (and (match_code "const_int,const_double,const_vector")
-+       (match_test "op == CONST1_RTX (GET_MODE (op))")))
-+
-+(define_predicate "reg_or_1_operand"
-+  (ior (match_operand 0 "const_1_operand")
-+       (match_operand 0 "register_operand")))
-+
-+;; Only use branch-on-bit sequences when the mask is not an ANDI immediate.
-+(define_predicate "branch_on_bit_operand"
-+  (and (match_code "const_int")
-+       (match_test "INTVAL (op) >= IMM_BITS - 1")))
-+
-+;; This is used for indexing into vectors, and hence only accepts const_int.
-+(define_predicate "const_0_or_1_operand"
-+  (and (match_code "const_int")
-+       (ior (match_test "op == CONST0_RTX (GET_MODE (op))")
-+	    (match_test "op == CONST1_RTX (GET_MODE (op))"))))
-+
-+(define_special_predicate "pc_or_label_operand"
-+  (match_code "pc,label_ref"))
-+
-+;; A legitimate CONST_INT operand that takes more than one instruction
-+;; to load.
-+(define_predicate "splittable_const_int_operand"
-+  (match_code "const_int")
-+{
-+  /* Don't handle multi-word moves this way; we don't want to introduce
-+     the individual word-mode moves until after reload.  */
-+  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
-+    return false;
-+
-+  /* Otherwise check whether the constant can be loaded in a single
-+     instruction.  */
-+  return !LUI_OPERAND (INTVAL (op)) && !SMALL_OPERAND (INTVAL (op));
-+})
-+
-+(define_predicate "move_operand"
-+  (match_operand 0 "general_operand")
-+{
-+  enum riscv_symbol_type symbol_type;
-+
-+  /* The thinking here is as follows:
-+
-+     (1) The move expanders should split complex load sequences into
-+	 individual instructions.  Those individual instructions can
-+	 then be optimized by all rtl passes.
-+
-+     (2) The target of pre-reload load sequences should not be used
-+	 to store temporary results.  If the target register is only
-+	 assigned one value, reload can rematerialize that value
-+	 on demand, rather than spill it to the stack.
-+
-+     (3) If we allowed pre-reload passes like combine and cse to recreate
-+	 complex load sequences, we would want to be able to split the
-+	 sequences before reload as well, so that the pre-reload scheduler
-+	 can see the individual instructions.  This falls foul of (2);
-+	 the splitter would be forced to reuse the target register for
-+	 intermediate results.
-+
-+     (4) We want to define complex load splitters for combine.  These
-+	 splitters can request a temporary scratch register, which avoids
-+	 the problem in (2).  They allow things like:
-+
-+	      (set (reg T1) (high SYM))
-+	      (set (reg T2) (low (reg T1) SYM))
-+	      (set (reg X) (plus (reg T2) (const_int OFFSET)))
-+
-+	 to be combined into:
-+
-+	      (set (reg T3) (high SYM+OFFSET))
-+	      (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
-+
-+	 if T2 is only used this once.  */
-+  switch (GET_CODE (op))
-+    {
-+    case CONST_INT:
-+      return !splittable_const_int_operand (op, mode);
-+
-+    case CONST:
-+    case SYMBOL_REF:
-+    case LABEL_REF:
-+      return (riscv_symbolic_constant_p (op, &symbol_type)
-+	      && !riscv_hi_relocs[symbol_type]);
-+
-+    case HIGH:
-+      op = XEXP (op, 0);
-+      return riscv_symbolic_constant_p (op, &symbol_type);
-+
-+    default:
-+      return true;
-+    }
-+})
-+
-+(define_predicate "consttable_operand"
-+  (match_test "CONSTANT_P (op)"))
-+
-+(define_predicate "symbolic_operand"
-+  (match_code "const,symbol_ref,label_ref")
-+{
-+  enum riscv_symbol_type type;
-+  return riscv_symbolic_constant_p (op, &type);
-+})
-+
-+(define_predicate "absolute_symbolic_operand"
-+  (match_code "const,symbol_ref,label_ref")
-+{
-+  enum riscv_symbol_type type;
-+  return (riscv_symbolic_constant_p (op, &type)
-+	  && type == SYMBOL_ABSOLUTE);
-+})
-+
-+(define_predicate "plt_symbolic_operand"
-+  (match_code "const,symbol_ref,label_ref")
-+{
-+  enum riscv_symbol_type type;
-+  return (riscv_symbolic_constant_p (op, &type)
-+	  && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
-+})
-+
-+(define_predicate "call_insn_operand"
-+  (ior (match_operand 0 "absolute_symbolic_operand")
-+       (match_operand 0 "plt_symbolic_operand")
-+       (match_operand 0 "register_operand")))
-+
-+(define_predicate "symbol_ref_operand"
-+  (match_code "symbol_ref"))
-+
-+(define_predicate "modular_operator"
-+  (match_code "plus,minus,mult,ashift"))
-+
-+(define_predicate "equality_operator"
-+  (match_code "eq,ne"))
-+
-+(define_predicate "order_operator"
-+  (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
-+
-+(define_predicate "fp_order_operator"
-+  (match_code "eq,ne,lt,le,gt,ge"))
-diff -urN empty/gcc/config/riscv/riscv.c gcc-5.3.0/gcc/config/riscv/riscv.c
---- empty/gcc/config/riscv/riscv.c	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv.c	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,4311 @@
-+/* Subroutines used for code generation for RISC-V.
-+   Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+#include "config.h"
-+#include "system.h"
-+#include "coretypes.h"
-+#include "tm.h"
-+#include "rtl.h"
-+#include "regs.h"
-+#include "hard-reg-set.h"
-+#include "insn-config.h"
-+#include "conditions.h"
-+#include "insn-attr.h"
-+#include "recog.h"
-+#include "output.h"
-+#include "hash-set.h"
-+#include "machmode.h"
-+#include "vec.h"
-+#include "double-int.h"
-+#include "input.h"
-+#include "alias.h"
-+#include "symtab.h"
-+#include "wide-int.h"
-+#include "inchash.h"
-+#include "tree.h"
-+#include "fold-const.h"
-+#include "varasm.h"
-+#include "stringpool.h"
-+#include "stor-layout.h"
-+#include "calls.h"
-+#include "function.h"
-+#include "hashtab.h"
-+#include "flags.h"
-+#include "statistics.h"
-+#include "real.h"
-+#include "fixed-value.h"
-+#include "expmed.h"
-+#include "dojump.h"
-+#include "explow.h"
-+#include "emit-rtl.h"
-+#include "stmt.h"
-+#include "expr.h"
-+#include "insn-codes.h"
-+#include "optabs.h"
-+#include "libfuncs.h"
-+#include "reload.h"
-+#include "tm_p.h"
-+#include "ggc.h"
-+#include "gstab.h"
-+#include "hash-table.h"
-+#include "debug.h"
-+#include "target.h"
-+#include "target-def.h"
-+#include "common/common-target.h"
-+#include "langhooks.h"
-+#include "dominance.h"
-+#include "cfg.h"
-+#include "cfgrtl.h"
-+#include "cfganal.h"
-+#include "lcm.h"
-+#include "cfgbuild.h"
-+#include "cfgcleanup.h"
-+#include "predict.h"
-+#include "basic-block.h"
-+#include "sched-int.h"
-+#include "tree-ssa-alias.h"
-+#include "internal-fn.h"
-+#include "gimple-fold.h"
-+#include "tree-eh.h"
-+#include "gimple-expr.h"
-+#include "is-a.h"
-+#include "gimple.h"
-+#include "gimplify.h"
-+#include "bitmap.h"
-+#include "diagnostic.h"
-+#include "target-globals.h"
-+#include "opts.h"
-+#include "tree-pass.h"
-+#include "context.h"
-+#include "hash-map.h"
-+#include "plugin-api.h"
-+#include "ipa-ref.h"
-+#include "cgraph.h"
-+#include "builtins.h"
-+#include "rtl-iter.h"
-+#include <stdint.h>
-+
-+/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF.  */
-+#define UNSPEC_ADDRESS_P(X)					\
-+  (GET_CODE (X) == UNSPEC					\
-+   && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST			\
-+   && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
-+
-+/* Extract the symbol or label from UNSPEC wrapper X.  */
-+#define UNSPEC_ADDRESS(X) \
-+  XVECEXP (X, 0, 0)
-+
-+/* Extract the symbol type from UNSPEC wrapper X.  */
-+#define UNSPEC_ADDRESS_TYPE(X) \
-+  ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
-+
-+/* The maximum distance between the top of the stack frame and the
-+   value sp has when we save and restore registers.  This is set by the
-+   range  of load/store offsets and must also preserve stack alignment. */
-+#define RISCV_MAX_FIRST_STACK_STEP (IMM_REACH/2 - 16)
-+
-+/* True if INSN is a riscv.md pattern or asm statement.  */
-+#define USEFUL_INSN_P(INSN)						\
-+  (NONDEBUG_INSN_P (INSN)						\
-+   && GET_CODE (PATTERN (INSN)) != USE					\
-+   && GET_CODE (PATTERN (INSN)) != CLOBBER				\
-+   && GET_CODE (PATTERN (INSN)) != ADDR_VEC				\
-+   && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
-+
-+/* True if bit BIT is set in VALUE.  */
-+#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
-+
-+/* Classifies an address.
-+
-+   ADDRESS_REG
-+       A natural register + offset address.  The register satisfies
-+       riscv_valid_base_register_p and the offset is a const_arith_operand.
-+
-+   ADDRESS_LO_SUM
-+       A LO_SUM rtx.  The first operand is a valid base register and
-+       the second operand is a symbolic address.
-+
-+   ADDRESS_CONST_INT
-+       A signed 16-bit constant address.
-+
-+   ADDRESS_SYMBOLIC:
-+       A constant symbolic address.  */
-+enum riscv_address_type {
-+  ADDRESS_REG,
-+  ADDRESS_LO_SUM,
-+  ADDRESS_CONST_INT,
-+  ADDRESS_SYMBOLIC
-+};
-+
-+enum riscv_code_model riscv_cmodel = TARGET_DEFAULT_CMODEL;
-+
-+/* Macros to create an enumeration identifier for a function prototype.  */
-+#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
-+#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
-+#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
-+#define RISCV_FTYPE_NAME4(A, B, C, D, E) RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
-+
-+/* Classifies the prototype of a built-in function.  */
-+enum riscv_function_type {
-+#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
-+#include "config/riscv/riscv-ftypes.def"
-+#undef DEF_RISCV_FTYPE
-+  RISCV_MAX_FTYPE_MAX
-+};
-+
-+/* Specifies how a built-in function should be converted into rtl.  */
-+enum riscv_builtin_type {
-+  /* The function corresponds directly to an .md pattern.  The return
-+     value is mapped to operand 0 and the arguments are mapped to
-+     operands 1 and above.  */
-+  RISCV_BUILTIN_DIRECT,
-+
-+  /* The function corresponds directly to an .md pattern.  There is no return
-+     value and the arguments are mapped to operands 0 and above.  */
-+  RISCV_BUILTIN_DIRECT_NO_TARGET
-+};
-+
-+/* Information about a function's frame layout.  */
-+struct GTY(())  riscv_frame_info {
-+  /* The size of the frame in bytes.  */
-+  HOST_WIDE_INT total_size;
-+
-+  /* Bit X is set if the function saves or restores GPR X.  */
-+  unsigned int mask;
-+
-+  /* Likewise FPR X.  */
-+  unsigned int fmask;
-+
-+  /* How much the GPR save/restore routines adjust sp (or 0 if unused).  */
-+  unsigned save_libcall_adjustment;
-+
-+  /* Offsets of fixed-point and floating-point save areas from frame bottom */
-+  HOST_WIDE_INT gp_sp_offset;
-+  HOST_WIDE_INT fp_sp_offset;
-+
-+  /* Offset of virtual frame pointer from stack pointer/frame bottom */
-+  HOST_WIDE_INT frame_pointer_offset;
-+
-+  /* Offset of hard frame pointer from stack pointer/frame bottom */
-+  HOST_WIDE_INT hard_frame_pointer_offset;
-+
-+  /* The offset of arg_pointer_rtx from the bottom of the frame.  */
-+  HOST_WIDE_INT arg_pointer_offset;
-+};
-+
-+struct GTY(())  machine_function {
-+  /* The number of extra stack bytes taken up by register varargs.
-+     This area is allocated by the callee at the very top of the frame.  */
-+  int varargs_size;
-+
-+  /* Cached return value of leaf_function_p.  <0 if false, >0 if true.  */
-+  int is_leaf;
-+
-+  /* The current frame information, calculated by riscv_compute_frame_info.  */
-+  struct riscv_frame_info frame;
-+};
-+
-+/* Information about a single argument.  */
-+struct riscv_arg_info {
-+  /* True if the argument is passed in a floating-point register, or
-+     would have been if we hadn't run out of registers.  */
-+  bool fpr_p;
-+
-+  /* The number of words passed in registers, rounded up.  */
-+  unsigned int reg_words;
-+
-+  /* For EABI, the offset of the first register from GP_ARG_FIRST or
-+     FP_ARG_FIRST.  For other ABIs, the offset of the first register from
-+     the start of the ABI's argument structure (see the CUMULATIVE_ARGS
-+     comment for details).
-+
-+     The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
-+     on the stack.  */
-+  unsigned int reg_offset;
-+
-+  /* The number of words that must be passed on the stack, rounded up.  */
-+  unsigned int stack_words;
-+
-+  /* The offset from the start of the stack overflow area of the argument's
-+     first stack word.  Only meaningful when STACK_WORDS is nonzero.  */
-+  unsigned int stack_offset;
-+};
-+
-+/* Information about an address described by riscv_address_type.
-+
-+   ADDRESS_CONST_INT
-+       No fields are used.
-+
-+   ADDRESS_REG
-+       REG is the base register and OFFSET is the constant offset.
-+
-+   ADDRESS_LO_SUM
-+       REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
-+       is the type of symbol it references.
-+
-+   ADDRESS_SYMBOLIC
-+       SYMBOL_TYPE is the type of symbol that the address references.  */
-+struct riscv_address_info {
-+  enum riscv_address_type type;
-+  rtx reg;
-+  rtx offset;
-+  enum riscv_symbol_type symbol_type;
-+};
-+
-+/* One stage in a constant building sequence.  These sequences have
-+   the form:
-+
-+	A = VALUE[0]
-+	A = A CODE[1] VALUE[1]
-+	A = A CODE[2] VALUE[2]
-+	...
-+
-+   where A is an accumulator, each CODE[i] is a binary rtl operation
-+   and each VALUE[i] is a constant integer.  CODE[0] is undefined.  */
-+struct riscv_integer_op {
-+  enum rtx_code code;
-+  unsigned HOST_WIDE_INT value;
-+};
-+
-+/* The largest number of operations needed to load an integer constant.
-+   The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI,
-+   but we may attempt and reject even worse sequences.  */
-+#define RISCV_MAX_INTEGER_OPS 32
-+
-+/* Costs of various operations on the different architectures.  */
-+
-+struct riscv_tune_info
-+{
-+  unsigned short fp_add[2];
-+  unsigned short fp_mul[2];
-+  unsigned short fp_div[2];
-+  unsigned short int_mul[2];
-+  unsigned short int_div[2];
-+  unsigned short issue_rate;
-+  unsigned short branch_cost;
-+  unsigned short memory_cost;
-+};
-+
-+/* Information about one CPU we know about.  */
-+struct riscv_cpu_info {
-+  /* This CPU's canonical name.  */
-+  const char *name;
-+
-+  /* The RISC-V ISA and extensions supported by this CPU.  */
-+  const char *isa;
-+
-+  /* Tuning parameters for this CPU.  */
-+  const struct riscv_tune_info *tune_info;
-+};
-+
-+/* Global variables for machine-dependent things.  */
-+
-+/* Which tuning parameters to use.  */
-+static const struct riscv_tune_info *tune_info;
-+
-+/* Index [M][R] is true if register R is allowed to hold a value of mode M.  */
-+bool riscv_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
-+
-+/* riscv_lo_relocs[X] is the relocation to use when a symbol of type X
-+   appears in a LO_SUM.  It can be null if such LO_SUMs aren't valid or
-+   if they are matched by a special .md file pattern.  */
-+const char *riscv_lo_relocs[NUM_SYMBOL_TYPES];
-+
-+/* Likewise for HIGHs.  */
-+const char *riscv_hi_relocs[NUM_SYMBOL_TYPES];
-+
-+/* Index R is the smallest register class that contains register R.  */
-+const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
-+  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
-+  GR_REGS,	T_REGS,		T_REGS,		T_REGS,
-+  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
-+  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
-+  GR_REGS,	GR_REGS, 	GR_REGS,	GR_REGS,
-+  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
-+  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
-+  T_REGS,	T_REGS,		T_REGS,		T_REGS,
-+  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
-+  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
-+  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
-+  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
-+  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
-+  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
-+  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
-+  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
-+  FRAME_REGS,	FRAME_REGS,
-+};
-+
-+/* Costs to use when optimizing for size.  */
-+static const struct riscv_tune_info rocket_tune_info = {
-+  {COSTS_N_INSNS (4), COSTS_N_INSNS (5)},	/* fp_add */
-+  {COSTS_N_INSNS (4), COSTS_N_INSNS (5)},	/* fp_mul */
-+  {COSTS_N_INSNS (20), COSTS_N_INSNS (20)},	/* fp_div */
-+  {COSTS_N_INSNS (4), COSTS_N_INSNS (4)},	/* int_mul */
-+  {COSTS_N_INSNS (6), COSTS_N_INSNS (6)},	/* int_div */
-+  1,						/* issue_rate */
-+  3,						/* branch_cost */
-+  5						/* memory_cost */
-+};
-+
-+/* Costs to use when optimizing for size.  */
-+static const struct riscv_tune_info optimize_size_tune_info = {
-+  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* fp_add */
-+  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* fp_mul */
-+  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* fp_div */
-+  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* int_mul */
-+  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* int_div */
-+  1,						/* issue_rate */
-+  1,						/* branch_cost */
-+  1						/* memory_cost */
-+};
-+
-+/* A table describing all the processors GCC knows about.  */
-+static const struct riscv_cpu_info riscv_cpu_info_table[] = {
-+  /* Entries for generic ISAs.  */
-+  { "rocket", "IMAFD", &rocket_tune_info },
-+};
-+
-+/* Return the riscv_cpu_info entry for the given name string.  */
-+
-+static const struct riscv_cpu_info *
-+riscv_parse_cpu (const char *cpu_string)
-+{
-+  unsigned int i;
-+
-+  for (i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
-+    if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
-+      return riscv_cpu_info_table + i;
-+
-+  error ("unknown cpu `%s'", cpu_string);
-+  return riscv_cpu_info_table;
-+}
-+
-+/* Fill CODES with a sequence of rtl operations to load VALUE.
-+   Return the number of operations needed.  */
-+
-+static int
-+riscv_build_integer_1 (struct riscv_integer_op *codes, HOST_WIDE_INT value,
-+		       enum machine_mode mode)
-+{
-+  HOST_WIDE_INT low_part = CONST_LOW_PART (value);
-+  int cost = INT_MAX, alt_cost;
-+  struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
-+
-+  if (SMALL_OPERAND (value) || LUI_OPERAND (value))
-+    {
-+      /* Simply ADDI or LUI */
-+      codes[0].code = UNKNOWN;
-+      codes[0].value = value;
-+      return 1;
-+    }
-+
-+  /* End with ADDI */
-+  if (low_part != 0
-+      && !(mode == HImode && (int16_t)(value - low_part) != (value - low_part)))
-+    {
-+      cost = 1 + riscv_build_integer_1 (codes, value - low_part, mode);
-+      codes[cost-1].code = PLUS;
-+      codes[cost-1].value = low_part;
-+    }
-+
-+  /* End with XORI */
-+  if (cost > 2 && (low_part < 0 || mode == HImode))
-+    {
-+      alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
-+      alt_codes[alt_cost-1].code = XOR;
-+      alt_codes[alt_cost-1].value = low_part;
-+      if (alt_cost < cost)
-+	cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
-+    }
-+
-+  /* Eliminate trailing zeros and end with SLLI */
-+  if (cost > 2 && (value & 1) == 0)
-+    {
-+      int shift = 0;
-+      while ((value & 1) == 0)
-+	shift++, value >>= 1;
-+      alt_cost = 1 + riscv_build_integer_1 (alt_codes, value, mode);
-+      alt_codes[alt_cost-1].code = ASHIFT;
-+      alt_codes[alt_cost-1].value = shift;
-+      if (alt_cost < cost)
-+	cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
-+    }
-+
-+  gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
-+  return cost;
-+}
-+
-+static int
-+riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
-+		     enum machine_mode mode)
-+{
-+  int cost = riscv_build_integer_1 (codes, value, mode);
-+
-+  /* Eliminate leading zeros and end with SRLI */
-+  if (value > 0 && cost > 2)
-+    {
-+      struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
-+      int alt_cost, shift = 0;
-+      HOST_WIDE_INT shifted_val;
-+
-+      /* Try filling trailing bits with 1s */
-+      while ((value << shift) >= 0)
-+	shift++;
-+      shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
-+      alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
-+      alt_codes[alt_cost-1].code = LSHIFTRT;
-+      alt_codes[alt_cost-1].value = shift;
-+      if (alt_cost < cost)
-+	cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
-+
-+      /* Try filling trailing bits with 0s */
-+      shifted_val = value << shift;
-+      alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
-+      alt_codes[alt_cost-1].code = LSHIFTRT;
-+      alt_codes[alt_cost-1].value = shift;
-+      if (alt_cost < cost)
-+	cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
-+    }
-+
-+  return cost;
-+}
-+
-+static int
-+riscv_split_integer_cost (HOST_WIDE_INT val)
-+{
-+  int cost;
-+  int32_t loval = val, hival = (val - (int32_t)val) >> 32;
-+  struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
-+
-+  cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
-+  if (loval != hival)
-+    cost += riscv_build_integer (codes, hival, VOIDmode);
-+
-+  return cost;
-+}
-+
-+static int
-+riscv_integer_cost (HOST_WIDE_INT val)
-+{
-+  struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
-+  return MIN (riscv_build_integer (codes, val, VOIDmode),
-+	      riscv_split_integer_cost (val));
-+}
-+
-+/* Try to split a 64b integer into 32b parts, then reassemble. */
-+
-+static rtx
-+riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
-+{
-+  int32_t loval = val, hival = (val - (int32_t)val) >> 32;
-+  rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
-+
-+  riscv_move_integer (hi, hi, hival);
-+  riscv_move_integer (lo, lo, loval);
-+
-+  hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
-+  hi = force_reg (mode, hi);
-+
-+  return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
-+}
-+
-+/* Return true if X is a thread-local symbol.  */
-+
-+static bool
-+riscv_tls_symbol_p (const_rtx x)
-+{
-+  return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
-+}
-+
-+static bool
-+riscv_symbol_binds_local_p (const_rtx x)
-+{
-+  return (SYMBOL_REF_DECL (x)
-+	  ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
-+	  : SYMBOL_REF_LOCAL_P (x));
-+}
-+
-+/* Return the method that should be used to access SYMBOL_REF or
-+   LABEL_REF X in context CONTEXT.  */
-+
-+static enum riscv_symbol_type
-+riscv_classify_symbol (const_rtx x)
-+{
-+  if (riscv_tls_symbol_p (x))
-+    return SYMBOL_TLS;
-+
-+  if (GET_CODE (x) == LABEL_REF)
-+    {
-+      if (LABEL_REF_NONLOCAL_P (x))
-+	return SYMBOL_GOT_DISP;
-+      return SYMBOL_ABSOLUTE;
-+    }
-+
-+  gcc_assert (GET_CODE (x) == SYMBOL_REF);
-+
-+  if (flag_pic && !riscv_symbol_binds_local_p (x))
-+    return SYMBOL_GOT_DISP;
-+
-+  return SYMBOL_ABSOLUTE;
-+}
-+
-+/* Classify the base of symbolic expression X, given that X appears in
-+   context CONTEXT.  */
-+
-+static enum riscv_symbol_type
-+riscv_classify_symbolic_expression (rtx x)
-+{
-+  rtx offset;
-+
-+  split_const (x, &x, &offset);
-+  if (UNSPEC_ADDRESS_P (x))
-+    return UNSPEC_ADDRESS_TYPE (x);
-+
-+  return riscv_classify_symbol (x);
-+}
-+
-+/* Return true if X is a symbolic constant that can be used in context
-+   CONTEXT.  If it is, store the type of the symbol in *SYMBOL_TYPE.  */
-+
-+bool
-+riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
-+{
-+  rtx offset;
-+
-+  split_const (x, &x, &offset);
-+  if (UNSPEC_ADDRESS_P (x))
-+    {
-+      *symbol_type = UNSPEC_ADDRESS_TYPE (x);
-+      x = UNSPEC_ADDRESS (x);
-+    }
-+  else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
-+    *symbol_type = riscv_classify_symbol (x);
-+  else
-+    return false;
-+
-+  if (offset == const0_rtx)
-+    return true;
-+
-+  /* Check whether a nonzero offset is valid for the underlying
-+     relocations.  */
-+  switch (*symbol_type)
-+    {
-+    case SYMBOL_ABSOLUTE:
-+    case SYMBOL_TLS_LE:
-+      return (int32_t) INTVAL (offset) == INTVAL (offset);
-+
-+    default:
-+      return false;
-+    }
-+  gcc_unreachable ();
-+}
-+
-+/* Returns the number of instructions necessary to reference a symbol. */
-+
-+static int riscv_symbol_insns (enum riscv_symbol_type type)
-+{
-+  switch (type)
-+    {
-+    case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
-+    case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference itself */
-+    case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference itself */
-+    case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference itself */
-+    default: gcc_unreachable();
-+    }
-+}
-+
-+/* Implement TARGET_LEGITIMATE_CONSTANT_P.  */
-+
-+static bool
-+riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
-+{
-+  return riscv_const_insns (x) > 0;
-+}
-+
-+/* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */
-+
-+static bool
-+riscv_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
-+{
-+  enum riscv_symbol_type type;
-+  rtx base, offset;
-+
-+  /* There is no assembler syntax for expressing an address-sized
-+     high part.  */
-+  if (GET_CODE (x) == HIGH)
-+    return true;
-+
-+  split_const (x, &base, &offset);
-+  if (riscv_symbolic_constant_p (base, &type))
-+    {
-+      /* As an optimization, don't spill symbolic constants that are as
-+	 cheap to rematerialize as to access in the constant pool.  */
-+      if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
-+	return true;
-+
-+      /* As an optimization, avoid needlessly generate dynamic relocations.  */
-+      if (flag_pic)
-+	return true;
-+    }
-+
-+  /* TLS symbols must be computed by riscv_legitimize_move.  */
-+  if (tls_referenced_p (x))
-+    return true;
-+
-+  return false;
-+}
-+
-+/* Return true if register REGNO is a valid base register for mode MODE.
-+   STRICT_P is true if REG_OK_STRICT is in effect.  */
-+
-+int
-+riscv_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
-+			       bool strict_p)
-+{
-+  if (!HARD_REGISTER_NUM_P (regno))
-+    {
-+      if (!strict_p)
-+	return true;
-+      regno = reg_renumber[regno];
-+    }
-+
-+  /* These fake registers will be eliminated to either the stack or
-+     hard frame pointer, both of which are usually valid base registers.
-+     Reload deals with the cases where the eliminated form isn't valid.  */
-+  if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
-+    return true;
-+
-+  return GP_REG_P (regno);
-+}
-+
-+/* Return true if X is a valid base register for mode MODE.
-+   STRICT_P is true if REG_OK_STRICT is in effect.  */
-+
-+static bool
-+riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
-+{
-+  if (!strict_p && GET_CODE (x) == SUBREG)
-+    x = SUBREG_REG (x);
-+
-+  return (REG_P (x)
-+	  && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
-+}
-+
-+/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
-+   can address a value of mode MODE.  */
-+
-+static bool
-+riscv_valid_offset_p (rtx x, enum machine_mode mode)
-+{
-+  /* Check that X is a signed 12-bit number.  */
-+  if (!const_arith_operand (x, Pmode))
-+    return false;
-+
-+  /* We may need to split multiword moves, so make sure that every word
-+     is accessible.  */
-+  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
-+      && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
-+    return false;
-+
-+  return true;
-+}
-+
-+/* Return true if a LO_SUM can address a value of mode MODE when the
-+   LO_SUM symbol has type SYMBOL_TYPE.  */
-+
-+static bool
-+riscv_valid_lo_sum_p (enum riscv_symbol_type symbol_type, enum machine_mode mode)
-+{
-+  /* Check that symbols of type SYMBOL_TYPE can be used to access values
-+     of mode MODE.  */
-+  if (riscv_symbol_insns (symbol_type) == 0)
-+    return false;
-+
-+  /* Check that there is a known low-part relocation.  */
-+  if (riscv_lo_relocs[symbol_type] == NULL)
-+    return false;
-+
-+  /* We may need to split multiword moves, so make sure that each word
-+     can be accessed without inducing a carry.  This is mainly needed
-+     for o64, which has historically only guaranteed 64-bit alignment
-+     for 128-bit types.  */
-+  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
-+      && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
-+    return false;
-+
-+  return true;
-+}
-+
-+/* Return true if X is a valid address for machine mode MODE.  If it is,
-+   fill in INFO appropriately.  STRICT_P is true if REG_OK_STRICT is in
-+   effect.  */
-+
-+static bool
-+riscv_classify_address (struct riscv_address_info *info, rtx x,
-+		       enum machine_mode mode, bool strict_p)
-+{
-+  switch (GET_CODE (x))
-+    {
-+    case REG:
-+    case SUBREG:
-+      info->type = ADDRESS_REG;
-+      info->reg = x;
-+      info->offset = const0_rtx;
-+      return riscv_valid_base_register_p (info->reg, mode, strict_p);
-+
-+    case PLUS:
-+      info->type = ADDRESS_REG;
-+      info->reg = XEXP (x, 0);
-+      info->offset = XEXP (x, 1);
-+      return (riscv_valid_base_register_p (info->reg, mode, strict_p)
-+	      && riscv_valid_offset_p (info->offset, mode));
-+
-+    case LO_SUM:
-+      info->type = ADDRESS_LO_SUM;
-+      info->reg = XEXP (x, 0);
-+      info->offset = XEXP (x, 1);
-+      /* We have to trust the creator of the LO_SUM to do something vaguely
-+	 sane.  Target-independent code that creates a LO_SUM should also
-+	 create and verify the matching HIGH.  Target-independent code that
-+	 adds an offset to a LO_SUM must prove that the offset will not
-+	 induce a carry.  Failure to do either of these things would be
-+	 a bug, and we are not required to check for it here.  The RISCV
-+	 backend itself should only create LO_SUMs for valid symbolic
-+	 constants, with the high part being either a HIGH or a copy
-+	 of _gp. */
-+      info->symbol_type
-+	= riscv_classify_symbolic_expression (info->offset);
-+      return (riscv_valid_base_register_p (info->reg, mode, strict_p)
-+	      && riscv_valid_lo_sum_p (info->symbol_type, mode));
-+
-+    case CONST_INT:
-+      /* Small-integer addresses don't occur very often, but they
-+	 are legitimate if $0 is a valid base register.  */
-+      info->type = ADDRESS_CONST_INT;
-+      return SMALL_OPERAND (INTVAL (x));
-+
-+    default:
-+      return false;
-+    }
-+}
-+
-+/* Implement TARGET_LEGITIMATE_ADDRESS_P.  */
-+
-+static bool
-+riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
-+{
-+  struct riscv_address_info addr;
-+
-+  return riscv_classify_address (&addr, x, mode, strict_p);
-+}
-+
-+/* Return the number of instructions needed to load or store a value
-+   of mode MODE at address X.  Return 0 if X isn't valid for MODE.
-+   Assume that multiword moves may need to be split into word moves
-+   if MIGHT_SPLIT_P, otherwise assume that a single load or store is
-+   enough. */
-+
-+int
-+riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
-+{
-+  struct riscv_address_info addr;
-+  int n = 1;
-+
-+  if (!riscv_classify_address (&addr, x, mode, false))
-+    return 0;
-+
-+  /* BLKmode is used for single unaligned loads and stores and should
-+     not count as a multiword mode. */
-+  if (mode != BLKmode && might_split_p)
-+    n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
-+
-+  if (addr.type == ADDRESS_LO_SUM)
-+    n += riscv_symbol_insns (addr.symbol_type) - 1;
-+
-+  return n;
-+}
-+
-+/* Return the number of instructions needed to load constant X.
-+   Return 0 if X isn't a valid constant.  */
-+
-+int
-+riscv_const_insns (rtx x)
-+{
-+  enum riscv_symbol_type symbol_type;
-+  rtx offset;
-+
-+  switch (GET_CODE (x))
-+    {
-+    case HIGH:
-+      if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
-+	  || !riscv_hi_relocs[symbol_type])
-+	return 0;
-+
-+      /* This is simply an LUI. */
-+      return 1;
-+
-+    case CONST_INT:
-+      {
-+	int cost = riscv_integer_cost (INTVAL (x));
-+	/* Force complicated constants to memory. */
-+	return cost < 4 ? cost : 0;
-+      }
-+
-+    case CONST_DOUBLE:
-+    case CONST_VECTOR:
-+      /* Allow zeros for normal mode, where we can use x0.  */
-+      return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
-+
-+    case CONST:
-+      /* See if we can refer to X directly.  */
-+      if (riscv_symbolic_constant_p (x, &symbol_type))
-+	return riscv_symbol_insns (symbol_type);
-+
-+      /* Otherwise try splitting the constant into a base and offset.  */
-+      split_const (x, &x, &offset);
-+      if (offset != 0)
-+	{
-+	  int n = riscv_const_insns (x);
-+	  if (n != 0)
-+	    return n + riscv_integer_cost (INTVAL (offset));
-+	}
-+      return 0;
-+
-+    case SYMBOL_REF:
-+    case LABEL_REF:
-+      return riscv_symbol_insns (riscv_classify_symbol (x));
-+
-+    default:
-+      return 0;
-+    }
-+}
-+
-+/* X is a doubleword constant that can be handled by splitting it into
-+   two words and loading each word separately.  Return the number of
-+   instructions required to do this.  */
-+
-+int
-+riscv_split_const_insns (rtx x)
-+{
-+  unsigned int low, high;
-+
-+  low = riscv_const_insns (riscv_subword (x, false));
-+  high = riscv_const_insns (riscv_subword (x, true));
-+  gcc_assert (low > 0 && high > 0);
-+  return low + high;
-+}
-+
-+/* Return the number of instructions needed to implement INSN,
-+   given that it loads from or stores to MEM. */
-+
-+int
-+riscv_load_store_insns (rtx mem, rtx_insn *insn)
-+{
-+  enum machine_mode mode;
-+  bool might_split_p;
-+  rtx set;
-+
-+  gcc_assert (MEM_P (mem));
-+  mode = GET_MODE (mem);
-+
-+  /* Try to prove that INSN does not need to be split.  */
-+  might_split_p = true;
-+  if (GET_MODE_BITSIZE (mode) == 64)
-+    {
-+      set = single_set (insn);
-+      if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
-+	might_split_p = false;
-+    }
-+
-+  return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
-+}
-+
-+/* Emit a move from SRC to DEST.  Assume that the move expanders can
-+   handle all moves if !can_create_pseudo_p ().  The distinction is
-+   important because, unlike emit_move_insn, the move expanders know
-+   how to force Pmode objects into the constant pool even when the
-+   constant pool address is not itself legitimate.  */
-+
-+rtx
-+riscv_emit_move (rtx dest, rtx src)
-+{
-+  return (can_create_pseudo_p ()
-+	  ? emit_move_insn (dest, src)
-+	  : emit_move_insn_1 (dest, src));
-+}
-+
-+/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)).  */
-+
-+static void
-+riscv_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
-+{
-+  emit_insn (gen_rtx_SET (VOIDmode, target,
-+			  gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
-+}
-+
-+/* Compute (CODE OP0 OP1) and store the result in a new register
-+   of mode MODE.  Return that new register.  */
-+
-+static rtx
-+riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
-+{
-+  rtx reg;
-+
-+  reg = gen_reg_rtx (mode);
-+  riscv_emit_binary (code, reg, op0, op1);
-+  return reg;
-+}
-+
-+/* Copy VALUE to a register and return that register.  If new pseudos
-+   are allowed, copy it into a new register, otherwise use DEST.  */
-+
-+static rtx
-+riscv_force_temporary (rtx dest, rtx value)
-+{
-+  if (can_create_pseudo_p ())
-+    return force_reg (Pmode, value);
-+  else
-+    {
-+      riscv_emit_move (dest, value);
-+      return dest;
-+    }
-+}
-+
-+/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
-+   then add CONST_INT OFFSET to the result.  */
-+
-+static rtx
-+riscv_unspec_address_offset (rtx base, rtx offset,
-+			    enum riscv_symbol_type symbol_type)
-+{
-+  base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
-+			 UNSPEC_ADDRESS_FIRST + symbol_type);
-+  if (offset != const0_rtx)
-+    base = gen_rtx_PLUS (Pmode, base, offset);
-+  return gen_rtx_CONST (Pmode, base);
-+}
-+
-+/* Return an UNSPEC address with underlying address ADDRESS and symbol
-+   type SYMBOL_TYPE.  */
-+
-+rtx
-+riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
-+{
-+  rtx base, offset;
-+
-+  split_const (address, &base, &offset);
-+  return riscv_unspec_address_offset (base, offset, symbol_type);
-+}
-+
-+/* If OP is an UNSPEC address, return the address to which it refers,
-+   otherwise return OP itself.  */
-+
-+static rtx
-+riscv_strip_unspec_address (rtx op)
-+{
-+  rtx base, offset;
-+
-+  split_const (op, &base, &offset);
-+  if (UNSPEC_ADDRESS_P (base))
-+    op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
-+  return op;
-+}
-+
-+/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
-+   high part to BASE and return the result.  Just return BASE otherwise.
-+   TEMP is as for riscv_force_temporary.
-+
-+   The returned expression can be used as the first operand to a LO_SUM.  */
-+
-+static rtx
-+riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
-+{
-+  addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
-+  return riscv_force_temporary (temp, addr);
-+}
-+
-+/* Load an entry from the GOT. */
-+static rtx riscv_got_load_tls_gd(rtx dest, rtx sym)
-+{
-+  return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym));
-+}
-+
-+static rtx riscv_got_load_tls_ie(rtx dest, rtx sym)
-+{
-+  return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym));
-+}
-+
-+static rtx riscv_tls_add_tp_le(rtx dest, rtx base, rtx sym)
-+{
-+  rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
-+  return (Pmode == DImode ? gen_tls_add_tp_ledi(dest, base, tp, sym) : gen_tls_add_tp_lesi(dest, base, tp, sym));
-+}
-+
-+/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
-+   it appears in a MEM of that mode.  Return true if ADDR is a legitimate
-+   constant in that context and can be split into high and low parts.
-+   If so, and if LOW_OUT is nonnull, emit the high part and store the
-+   low part in *LOW_OUT.  Leave *LOW_OUT unchanged otherwise.
-+
-+   TEMP is as for riscv_force_temporary and is used to load the high
-+   part into a register.
-+
-+   When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
-+   a legitimize SET_SRC for an .md pattern, otherwise the low part
-+   is guaranteed to be a legitimate address for mode MODE.  */
-+
-+bool
-+riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
-+{
-+  enum riscv_symbol_type symbol_type;
-+  rtx high;
-+
-+  if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
-+      || !riscv_symbolic_constant_p (addr, &symbol_type)
-+      || riscv_symbol_insns (symbol_type) == 0
-+      || !riscv_hi_relocs[symbol_type])
-+    return false;
-+
-+  if (low_out)
-+    {
-+      switch (symbol_type)
-+	{
-+	case SYMBOL_ABSOLUTE:
-+	  high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
-+      	  high = riscv_force_temporary (temp, high);
-+      	  *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
-+	  break;
-+	
-+	default:
-+	  gcc_unreachable ();
-+	}
-+    }
-+
-+  return true;
-+}
-+
-+/* Return a legitimate address for REG + OFFSET.  TEMP is as for
-+   riscv_force_temporary; it is only needed when OFFSET is not a
-+   SMALL_OPERAND.  */
-+
-+static rtx
-+riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
-+{
-+  if (!SMALL_OPERAND (offset))
-+    {
-+      rtx high;
-+
-+      /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
-+         The addition inside the macro CONST_HIGH_PART may cause an
-+         overflow, so we need to force a sign-extension check.  */
-+      high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
-+      offset = CONST_LOW_PART (offset);
-+      high = riscv_force_temporary (temp, high);
-+      reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
-+    }
-+  return plus_constant (Pmode, reg, offset);
-+}
-+
-+/* The __tls_get_attr symbol.  */
-+static GTY(()) rtx riscv_tls_symbol;
-+
-+/* Return an instruction sequence that calls __tls_get_addr.  SYM is
-+   the TLS symbol we are referencing and TYPE is the symbol type to use
-+   (either global dynamic or local dynamic).  RESULT is an RTX for the
-+   return value location.  */
-+
-+static rtx
-+riscv_call_tls_get_addr (rtx sym, rtx result)
-+{
-+  rtx insn, a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
-+
-+  if (!riscv_tls_symbol)
-+    riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
-+
-+  start_sequence ();
-+  
-+  emit_insn (riscv_got_load_tls_gd (a0, sym));
-+  insn = riscv_expand_call (false, result, riscv_tls_symbol, const0_rtx);
-+  RTL_CONST_CALL_P (insn) = 1;
-+  use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
-+  insn = get_insns ();
-+
-+  end_sequence ();
-+
-+  return insn;
-+}
-+
-+/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
-+   its address.  The return value will be both a valid address and a valid
-+   SET_SRC (either a REG or a LO_SUM).  */
-+
-+static rtx
-+riscv_legitimize_tls_address (rtx loc)
-+{
-+  rtx dest, insn, tp, tmp1;
-+  enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
-+
-+  /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE.  */
-+  if (!flag_pic)
-+    model = TLS_MODEL_LOCAL_EXEC;
-+
-+  switch (model)
-+    {
-+    case TLS_MODEL_LOCAL_DYNAMIC:
-+      /* Rely on section anchors for the optimization that LDM TLS
-+	 provides.  The anchor's address is loaded with GD TLS. */
-+    case TLS_MODEL_GLOBAL_DYNAMIC:
-+      tmp1 = gen_rtx_REG (Pmode, GP_RETURN);
-+      insn = riscv_call_tls_get_addr (loc, tmp1);
-+      dest = gen_reg_rtx (Pmode);
-+      emit_libcall_block (insn, dest, tmp1, loc);
-+      break;
-+
-+    case TLS_MODEL_INITIAL_EXEC:
-+      /* la.tls.ie; tp-relative add */
-+      tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
-+      tmp1 = gen_reg_rtx (Pmode);
-+      emit_insn (riscv_got_load_tls_ie (tmp1, loc));
-+      dest = gen_reg_rtx (Pmode);
-+      emit_insn (gen_add3_insn (dest, tmp1, tp));
-+      break;
-+
-+    case TLS_MODEL_LOCAL_EXEC:
-+      tmp1 = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
-+      dest = gen_reg_rtx (Pmode);
-+      emit_insn (riscv_tls_add_tp_le (dest, tmp1, loc));
-+      dest = gen_rtx_LO_SUM (Pmode, dest,
-+			     riscv_unspec_address (loc, SYMBOL_TLS_LE));
-+      break;
-+
-+    default:
-+      gcc_unreachable ();
-+    }
-+  return dest;
-+}
-+

-+/* If X is not a valid address for mode MODE, force it into a register.  */
-+
-+static rtx
-+riscv_force_address (rtx x, enum machine_mode mode)
-+{
-+  if (!riscv_legitimate_address_p (mode, x, false))
-+    x = force_reg (Pmode, x);
-+  return x;
-+}
-+
-+/* This function is used to implement LEGITIMIZE_ADDRESS.  If X can
-+   be legitimized in a way that the generic machinery might not expect,
-+   return a new address, otherwise return NULL.  MODE is the mode of
-+   the memory being accessed.  */
-+
-+static rtx
-+riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
-+			 enum machine_mode mode)
-+{
-+  rtx addr;
-+
-+  if (riscv_tls_symbol_p (x))
-+    return riscv_legitimize_tls_address (x);
-+
-+  /* See if the address can split into a high part and a LO_SUM.  */
-+  if (riscv_split_symbol (NULL, x, mode, &addr))
-+    return riscv_force_address (addr, mode);
-+
-+  /* Handle BASE + OFFSET using riscv_add_offset.  */
-+  if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
-+      && INTVAL (XEXP (x, 1)) != 0)
-+    {
-+      rtx base = XEXP (x, 0);
-+      HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
-+
-+      if (!riscv_valid_base_register_p (base, mode, false))
-+	base = copy_to_mode_reg (Pmode, base);
-+      addr = riscv_add_offset (NULL, base, offset);
-+      return riscv_force_address (addr, mode);
-+    }
-+
-+  return x;
-+}
-+
-+/* Load VALUE into DEST.  TEMP is as for riscv_force_temporary.  */
-+
-+void
-+riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
-+{
-+  struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
-+  enum machine_mode mode;
-+  int i, num_ops;
-+  rtx x;
-+
-+  mode = GET_MODE (dest);
-+  num_ops = riscv_build_integer (codes, value, mode);
-+
-+  if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
-+      && num_ops >= riscv_split_integer_cost (value))
-+    x = riscv_split_integer (value, mode);
-+  else
-+    {
-+      /* Apply each binary operation to X. */
-+      x = GEN_INT (codes[0].value);
-+
-+      for (i = 1; i < num_ops; i++)
-+        {
-+          if (!can_create_pseudo_p ())
-+            {
-+              emit_insn (gen_rtx_SET (VOIDmode, temp, x));
-+              x = temp;
-+            }
-+          else
-+            x = force_reg (mode, x);
-+
-+          x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
-+        }
-+    }
-+
-+  emit_insn (gen_rtx_SET (VOIDmode, dest, x));
-+}
-+
-+/* Subroutine of riscv_legitimize_move.  Move constant SRC into register
-+   DEST given that SRC satisfies immediate_operand but doesn't satisfy
-+   move_operand.  */
-+
-+static void
-+riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
-+{
-+  rtx base, offset;
-+
-+  /* Split moves of big integers into smaller pieces.  */
-+  if (splittable_const_int_operand (src, mode))
-+    {
-+      riscv_move_integer (dest, dest, INTVAL (src));
-+      return;
-+    }
-+
-+  /* Split moves of symbolic constants into high/low pairs.  */
-+  if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
-+    {
-+      emit_insn (gen_rtx_SET (VOIDmode, dest, src));
-+      return;
-+    }
-+
-+  /* Generate the appropriate access sequences for TLS symbols.  */
-+  if (riscv_tls_symbol_p (src))
-+    {
-+      riscv_emit_move (dest, riscv_legitimize_tls_address (src));
-+      return;
-+    }
-+
-+  /* If we have (const (plus symbol offset)), and that expression cannot
-+     be forced into memory, load the symbol first and add in the offset.  Also
-+     prefer to do this even if the constant _can_ be forced into memory, as it
-+     usually produces better code.  */
-+  split_const (src, &base, &offset);
-+  if (offset != const0_rtx
-+      && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
-+    {
-+      base = riscv_force_temporary (dest, base);
-+      riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
-+      return;
-+    }
-+
-+  src = force_const_mem (mode, src);
-+
-+  /* When using explicit relocs, constant pool references are sometimes
-+     not legitimate addresses.  */
-+  riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
-+  riscv_emit_move (dest, src);
-+}
-+
-+/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
-+   sequence that is valid.  */
-+
-+bool
-+riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
-+{
-+  if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
-+    {
-+      riscv_emit_move (dest, force_reg (mode, src));
-+      return true;
-+    }
-+
-+  /* We need to deal with constants that would be legitimate
-+     immediate_operands but aren't legitimate move_operands.  */
-+  if (CONSTANT_P (src) && !move_operand (src, mode))
-+    {
-+      riscv_legitimize_const_move (mode, dest, src);
-+      set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
-+      return true;
-+    }
-+  return false;
-+}
-+
-+/* Return true if there is an instruction that implements CODE and accepts
-+   X as an immediate operand. */
-+
-+static int
-+riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
-+{
-+  switch (code)
-+    {
-+    case ASHIFT:
-+    case ASHIFTRT:
-+    case LSHIFTRT:
-+      /* All shift counts are truncated to a valid constant.  */
-+      return true;
-+
-+    case AND:
-+    case IOR:
-+    case XOR:
-+    case PLUS:
-+    case LT:
-+    case LTU:
-+      /* These instructions take 12-bit signed immediates.  */
-+      return SMALL_OPERAND (x);
-+
-+    case LE:
-+      /* We add 1 to the immediate and use SLT.  */
-+      return SMALL_OPERAND (x + 1);
-+
-+    case LEU:
-+      /* Likewise SLTU, but reject the always-true case.  */
-+      return SMALL_OPERAND (x + 1) && x + 1 != 0;
-+
-+    case GE:
-+    case GEU:
-+      /* We can emulate an immediate of 1 by using GT/GTU against x0. */
-+      return x == 1;
-+
-+    default:
-+      /* By default assume that x0 can be used for 0.  */
-+      return x == 0;
-+    }
-+}
-+
-+/* Return the cost of binary operation X, given that the instruction
-+   sequence for a word-sized or smaller operation takes SIGNLE_INSNS
-+   instructions and that the sequence of a double-word operation takes
-+   DOUBLE_INSNS instructions.  */
-+
-+static int
-+riscv_binary_cost (rtx x, int single_insns, int double_insns)
-+{
-+  if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
-+    return COSTS_N_INSNS (double_insns);
-+  return COSTS_N_INSNS (single_insns);
-+}
-+
-+/* Return the cost of sign-extending OP to mode MODE, not including the
-+   cost of OP itself.  */
-+
-+static int
-+riscv_sign_extend_cost (enum machine_mode mode, rtx op)
-+{
-+  if (MEM_P (op))
-+    /* Extended loads are as cheap as unextended ones.  */
-+    return 0;
-+
-+  if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
-+    /* A sign extension from SImode to DImode in 64-bit mode is free.  */
-+    return 0;
-+
-+  /* We need to use a shift left and a shift right.  */
-+  return COSTS_N_INSNS (2);
-+}
-+
-+/* Return the cost of zero-extending OP to mode MODE, not including the
-+   cost of OP itself.  */
-+
-+static int
-+riscv_zero_extend_cost (enum machine_mode mode, rtx op)
-+{
-+  if (MEM_P (op))
-+    /* Extended loads are as cheap as unextended ones.  */
-+    return 0;
-+
-+  if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ||
-+      ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode))
-+    /* We need a shift left by 32 bits and a shift right by 32 bits.  */
-+    return COSTS_N_INSNS (2);
-+
-+  /* We can use ANDI.  */
-+  return COSTS_N_INSNS (1);
-+}
-+
-+/* Implement TARGET_RTX_COSTS.  */
-+
-+static bool
-+riscv_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
-+		 int *total, bool speed)
-+{
-+  enum machine_mode mode = GET_MODE (x);
-+  bool float_mode_p = FLOAT_MODE_P (mode);
-+  int cost;
-+
-+  switch (code)
-+    {
-+    case CONST_INT:
-+      if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
-+	{
-+	  *total = 0;
-+	  return true;
-+	}
-+      /* Fall through.  */
-+
-+    case SYMBOL_REF:
-+    case LABEL_REF:
-+    case CONST_DOUBLE:
-+    case CONST:
-+      if (speed)
-+	*total = 1;
-+      else if ((cost = riscv_const_insns (x)) > 0)
-+	*total = COSTS_N_INSNS (cost);
-+      else /* The instruction will be fetched from the constant pool.  */
-+	*total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
-+      return true;
-+
-+    case MEM:
-+      /* If the address is legitimate, return the number of
-+	 instructions it needs.  */
-+      if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
-+	{
-+	  *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
-+	  return true;
-+	}
-+      /* Otherwise use the default handling.  */
-+      return false;
-+
-+    case NOT:
-+      *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
-+      return false;
-+
-+    case AND:
-+    case IOR:
-+    case XOR:
-+      /* Double-word operations use two single-word operations.  */
-+      *total = riscv_binary_cost (x, 1, 2);
-+      return false;
-+
-+    case ASHIFT:
-+    case ASHIFTRT:
-+    case LSHIFTRT:
-+      *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
-+      return false;
-+
-+    case ABS:
-+      *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
-+      return false;
-+
-+    case LO_SUM:
-+      *total = set_src_cost (XEXP (x, 0), speed);
-+      return true;
-+
-+    case LT:
-+    case LTU:
-+    case LE:
-+    case LEU:
-+    case GT:
-+    case GTU:
-+    case GE:
-+    case GEU:
-+    case EQ:
-+    case NE:
-+    case UNORDERED:
-+    case LTGT:
-+      /* Branch comparisons have VOIDmode, so use the first operand's
-+	 mode instead.  */
-+      mode = GET_MODE (XEXP (x, 0));
-+      if (float_mode_p)
-+	*total = tune_info->fp_add[mode == DFmode];
-+      else
-+	*total = riscv_binary_cost (x, 1, 3);
-+      return false;
-+
-+    case MINUS:
-+      if (float_mode_p
-+	  && !HONOR_NANS (mode)
-+	  && !HONOR_SIGNED_ZEROS (mode))
-+	{
-+	  /* See if we can use NMADD or NMSUB.  See riscv.md for the
-+	     associated patterns.  */
-+	  rtx op0 = XEXP (x, 0);
-+	  rtx op1 = XEXP (x, 1);
-+	  if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
-+	    {
-+	      *total = (tune_info->fp_mul[mode == DFmode]
-+			+ set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
-+			+ set_src_cost (XEXP (op0, 1), speed)
-+			+ set_src_cost (op1, speed));
-+	      return true;
-+	    }
-+	  if (GET_CODE (op1) == MULT)
-+	    {
-+	      *total = (tune_info->fp_mul[mode == DFmode]
-+			+ set_src_cost (op0, speed)
-+			+ set_src_cost (XEXP (op1, 0), speed)
-+			+ set_src_cost (XEXP (op1, 1), speed));
-+	      return true;
-+	    }
-+	}
-+      /* Fall through.  */
-+
-+    case PLUS:
-+      if (float_mode_p)
-+	*total = tune_info->fp_add[mode == DFmode];
-+      else
-+	*total = riscv_binary_cost (x, 1, 4);
-+      return false;
-+
-+    case NEG:
-+      if (float_mode_p
-+	  && !HONOR_NANS (mode)
-+	  && HONOR_SIGNED_ZEROS (mode))
-+	{
-+	  /* See if we can use NMADD or NMSUB.  See riscv.md for the
-+	     associated patterns.  */
-+	  rtx op = XEXP (x, 0);
-+	  if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
-+	      && GET_CODE (XEXP (op, 0)) == MULT)
-+	    {
-+	      *total = (tune_info->fp_mul[mode == DFmode]
-+			+ set_src_cost (XEXP (XEXP (op, 0), 0), speed)
-+			+ set_src_cost (XEXP (XEXP (op, 0), 1), speed)
-+			+ set_src_cost (XEXP (op, 1), speed));
-+	      return true;
-+	    }
-+	}
-+
-+      if (float_mode_p)
-+	*total = tune_info->fp_add[mode == DFmode];
-+      else
-+	*total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
-+      return false;
-+
-+    case MULT:
-+      if (float_mode_p)
-+	*total = tune_info->fp_mul[mode == DFmode];
-+      else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
-+	*total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
-+      else if (!speed)
-+	*total = COSTS_N_INSNS (1);
-+      else
-+	*total = tune_info->int_mul[mode == DImode];
-+      return false;
-+
-+    case DIV:
-+    case SQRT:
-+    case MOD:
-+      if (float_mode_p)
-+	{
-+	  *total = tune_info->fp_div[mode == DFmode];
-+	  return false;
-+	}
-+      /* Fall through.  */
-+
-+    case UDIV:
-+    case UMOD:
-+      if (speed)
-+	*total = tune_info->int_div[mode == DImode];
-+      else
-+	*total = COSTS_N_INSNS (1);
-+      return false;
-+
-+    case SIGN_EXTEND:
-+      *total = riscv_sign_extend_cost (mode, XEXP (x, 0));
-+      return false;
-+
-+    case ZERO_EXTEND:
-+      *total = riscv_zero_extend_cost (mode, XEXP (x, 0));
-+      return false;
-+
-+    case FLOAT:
-+    case UNSIGNED_FLOAT:
-+    case FIX:
-+    case FLOAT_EXTEND:
-+    case FLOAT_TRUNCATE:
-+      *total = tune_info->fp_add[mode == DFmode];
-+      return false;
-+
-+    default:
-+      return false;
-+    }
-+}
-+
-+/* Implement TARGET_ADDRESS_COST.  */
-+
-+static int
-+riscv_address_cost (rtx addr, enum machine_mode mode,
-+		    addr_space_t as ATTRIBUTE_UNUSED,
-+		    bool speed ATTRIBUTE_UNUSED)
-+{
-+  return riscv_address_insns (addr, mode, false);
-+}
-+
-+/* Return one word of double-word value OP.  HIGH_P is true to select the
-+   high part or false to select the low part. */
-+
-+rtx
-+riscv_subword (rtx op, bool high_p)
-+{
-+  unsigned int byte;
-+  enum machine_mode mode;
-+
-+  mode = GET_MODE (op);
-+  if (mode == VOIDmode)
-+    mode = TARGET_64BIT ? TImode : DImode;
-+
-+  byte = high_p ? UNITS_PER_WORD : 0;
-+
-+  if (FP_REG_RTX_P (op))
-+    return gen_rtx_REG (word_mode, REGNO (op) + high_p);
-+
-+  if (MEM_P (op))
-+    return adjust_address (op, word_mode, byte);
-+
-+  return simplify_gen_subreg (word_mode, op, mode, byte);
-+}
-+
-+/* Return true if a 64-bit move from SRC to DEST should be split into two.  */
-+
-+bool
-+riscv_split_64bit_move_p (rtx dest, rtx src)
-+{
-+  /* All 64b moves are legal in 64b mode.  All 64b FPR <-> FPR and
-+     FPR <-> MEM moves are legal in 32b mode, too.  Although
-+     FPR <-> GPR moves are not available in general in 32b mode,
-+     we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */
-+  return !(TARGET_64BIT
-+	   || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
-+	   || (FP_REG_RTX_P (dest) && MEM_P (src))
-+	   || (FP_REG_RTX_P (src) && MEM_P (dest))
-+	   || (FP_REG_RTX_P(dest) && src == CONST0_RTX(GET_MODE(src))));
-+}
-+
-+/* Split a doubleword move from SRC to DEST.  On 32-bit targets,
-+   this function handles 64-bit moves for which riscv_split_64bit_move_p
-+   holds.  For 64-bit targets, this function handles 128-bit moves.  */
-+
-+void
-+riscv_split_doubleword_move (rtx dest, rtx src)
-+{
-+  rtx low_dest;
-+
-+   /* The operation can be split into two normal moves.  Decide in
-+      which order to do them.  */
-+   low_dest = riscv_subword (dest, false);
-+   if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
-+     {
-+       riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
-+       riscv_emit_move (low_dest, riscv_subword (src, false));
-+     }
-+   else
-+     {
-+       riscv_emit_move (low_dest, riscv_subword (src, false));
-+       riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
-+     }
-+}
-+

-+/* Return the appropriate instructions to move SRC into DEST.  Assume
-+   that SRC is operand 1 and DEST is operand 0.  */
-+
-+const char *
-+riscv_output_move (rtx dest, rtx src)
-+{
-+  enum rtx_code dest_code, src_code;
-+  enum machine_mode mode;
-+  bool dbl_p;
-+
-+  dest_code = GET_CODE (dest);
-+  src_code = GET_CODE (src);
-+  mode = GET_MODE (dest);
-+  dbl_p = (GET_MODE_SIZE (mode) == 8);
-+
-+  if (dbl_p && riscv_split_64bit_move_p (dest, src))
-+    return "#";
-+
-+  if (dest_code == REG && GP_REG_P (REGNO (dest)))
-+    {
-+      if (src_code == REG && FP_REG_P (REGNO (src)))
-+	return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
-+
-+      if (src_code == MEM)
-+	switch (GET_MODE_SIZE (mode))
-+	  {
-+	  case 1: return "lbu\t%0,%1";
-+	  case 2: return "lhu\t%0,%1";
-+	  case 4: return "lw\t%0,%1";
-+	  case 8: return "ld\t%0,%1";
-+	  }
-+
-+      if (src_code == CONST_INT)
-+	return "li\t%0,%1";
-+
-+      if (src_code == HIGH)
-+	return "lui\t%0,%h1";
-+
-+      if (symbolic_operand (src, VOIDmode))
-+	switch (riscv_classify_symbolic_expression (src))
-+	  {
-+	  case SYMBOL_GOT_DISP: return "la\t%0,%1";
-+	  case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
-+	  default: gcc_unreachable();
-+	  }
-+    }
-+  if ((src_code == REG && GP_REG_P (REGNO (src)))
-+      || (src == CONST0_RTX (mode)))
-+    {
-+      if (dest_code == REG)
-+	{
-+	  if (GP_REG_P (REGNO (dest)))
-+	    return "mv\t%0,%z1";
-+
-+	  if (FP_REG_P (REGNO (dest)))
-+	    {
-+	      if (!dbl_p)
-+		return "fmv.s.x\t%0,%z1";
-+	      if (TARGET_64BIT)
-+		return "fmv.d.x\t%0,%z1";
-+	      /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
-+	      gcc_assert (src == CONST0_RTX (mode));
-+	      return "fcvt.d.w\t%0,x0";
-+	    }
-+	}
-+      if (dest_code == MEM)
-+	switch (GET_MODE_SIZE (mode))
-+	  {
-+	  case 1: return "sb\t%z1,%0";
-+	  case 2: return "sh\t%z1,%0";
-+	  case 4: return "sw\t%z1,%0";
-+	  case 8: return "sd\t%z1,%0";
-+	  }
-+    }
-+  if (src_code == REG && FP_REG_P (REGNO (src)))
-+    {
-+      if (dest_code == REG && FP_REG_P (REGNO (dest)))
-+	return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
-+
-+      if (dest_code == MEM)
-+	return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
-+    }
-+  if (dest_code == REG && FP_REG_P (REGNO (dest)))
-+    {
-+      if (src_code == MEM)
-+	return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
-+    }
-+  gcc_unreachable ();
-+}
-+

-+/* Return true if CMP1 is a suitable second operand for integer ordering
-+   test CODE.  See also the *sCC patterns in riscv.md.  */
-+
-+static bool
-+riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
-+{
-+  switch (code)
-+    {
-+    case GT:
-+    case GTU:
-+      return reg_or_0_operand (cmp1, VOIDmode);
-+
-+    case GE:
-+    case GEU:
-+      return cmp1 == const1_rtx;
-+
-+    case LT:
-+    case LTU:
-+      return arith_operand (cmp1, VOIDmode);
-+
-+    case LE:
-+      return sle_operand (cmp1, VOIDmode);
-+
-+    case LEU:
-+      return sleu_operand (cmp1, VOIDmode);
-+
-+    default:
-+      gcc_unreachable ();
-+    }
-+}
-+
-+/* Return true if *CMP1 (of mode MODE) is a valid second operand for
-+   integer ordering test *CODE, or if an equivalent combination can
-+   be formed by adjusting *CODE and *CMP1.  When returning true, update
-+   *CODE and *CMP1 with the chosen code and operand, otherwise leave
-+   them alone.  */
-+
-+static bool
-+riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
-+				  enum machine_mode mode)
-+{
-+  HOST_WIDE_INT plus_one;
-+
-+  if (riscv_int_order_operand_ok_p (*code, *cmp1))
-+    return true;
-+
-+  if (CONST_INT_P (*cmp1))
-+    switch (*code)
-+      {
-+      case LE:
-+	plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
-+	if (INTVAL (*cmp1) < plus_one)
-+	  {
-+	    *code = LT;
-+	    *cmp1 = force_reg (mode, GEN_INT (plus_one));
-+	    return true;
-+	  }
-+	break;
-+
-+      case LEU:
-+	plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
-+	if (plus_one != 0)
-+	  {
-+	    *code = LTU;
-+	    *cmp1 = force_reg (mode, GEN_INT (plus_one));
-+	    return true;
-+	  }
-+	break;
-+
-+      default:
-+	break;
-+      }
-+  return false;
-+}
-+
-+/* Compare CMP0 and CMP1 using ordering test CODE and store the result
-+   in TARGET.  CMP0 and TARGET are register_operands.  If INVERT_PTR
-+   is nonnull, it's OK to set TARGET to the inverse of the result and
-+   flip *INVERT_PTR instead.  */
-+
-+static void
-+riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
-+			  rtx target, rtx cmp0, rtx cmp1)
-+{
-+  enum machine_mode mode;
-+
-+  /* First see if there is a RISCV instruction that can do this operation.
-+     If not, try doing the same for the inverse operation.  If that also
-+     fails, force CMP1 into a register and try again.  */
-+  mode = GET_MODE (cmp0);
-+  if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
-+    riscv_emit_binary (code, target, cmp0, cmp1);
-+  else
-+    {
-+      enum rtx_code inv_code = reverse_condition (code);
-+      if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
-+	{
-+	  cmp1 = force_reg (mode, cmp1);
-+	  riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
-+	}
-+      else if (invert_ptr == 0)
-+	{
-+	  rtx inv_target;
-+
-+	  inv_target = riscv_force_binary (GET_MODE (target),
-+					  inv_code, cmp0, cmp1);
-+	  riscv_emit_binary (XOR, target, inv_target, const1_rtx);
-+	}
-+      else
-+	{
-+	  *invert_ptr = !*invert_ptr;
-+	  riscv_emit_binary (inv_code, target, cmp0, cmp1);
-+	}
-+    }
-+}
-+
-+/* Return a register that is zero iff CMP0 and CMP1 are equal.
-+   The register will have the same mode as CMP0.  */
-+
-+static rtx
-+riscv_zero_if_equal (rtx cmp0, rtx cmp1)
-+{
-+  if (cmp1 == const0_rtx)
-+    return cmp0;
-+
-+  return expand_binop (GET_MODE (cmp0), sub_optab,
-+		       cmp0, cmp1, 0, 0, OPTAB_DIRECT);
-+}
-+
-+/* Return false if we can easily emit code for the FP comparison specified
-+   by *CODE.  If not, set *CODE to its inverse and return true. */
-+
-+static bool
-+riscv_reversed_fp_cond (enum rtx_code *code)
-+{
-+  switch (*code)
-+    {
-+    case EQ:
-+    case LT:
-+    case LE:
-+    case GT:
-+    case GE:
-+    case LTGT:
-+    case ORDERED:
-+      /* We know how to emit code for these cases... */
-+      return false;
-+
-+    default:
-+      /* ...but we must invert these and rely on the others. */
-+      *code = reverse_condition_maybe_unordered (*code);
-+      return true;
-+    }
-+}
-+
-+/* Convert a comparison into something that can be used in a branch or
-+   conditional move.  On entry, *OP0 and *OP1 are the values being
-+   compared and *CODE is the code used to compare them.
-+
-+   Update *CODE, *OP0 and *OP1 so that they describe the final comparison. */
-+
-+static void
-+riscv_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1)
-+{
-+  rtx cmp_op0 = *op0;
-+  rtx cmp_op1 = *op1;
-+
-+  if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
-+    {
-+      if (splittable_const_int_operand (cmp_op1, VOIDmode))
-+	{
-+	  HOST_WIDE_INT rhs = INTVAL (cmp_op1), new_rhs;
-+	  enum rtx_code new_code;
-+
-+	  switch (*code)
-+	    {
-+	    case LTU: new_rhs = rhs - 1; new_code = LEU; goto try_new_rhs;
-+	    case LEU: new_rhs = rhs + 1; new_code = LTU; goto try_new_rhs;
-+	    case GTU: new_rhs = rhs + 1; new_code = GEU; goto try_new_rhs;
-+	    case GEU: new_rhs = rhs - 1; new_code = GTU; goto try_new_rhs;
-+	    case LT: new_rhs = rhs - 1; new_code = LE; goto try_new_rhs;
-+	    case LE: new_rhs = rhs + 1; new_code = LT; goto try_new_rhs;
-+	    case GT: new_rhs = rhs + 1; new_code = GE; goto try_new_rhs;
-+	    case GE: new_rhs = rhs - 1; new_code = GT;
-+	    try_new_rhs:
-+	      /* Convert e.g. OP0 > 4095 into OP0 >= 4096.  */
-+	      if ((rhs < 0) == (new_rhs < 0)
-+		  && riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs))
-+		{
-+		  *op1 = GEN_INT (new_rhs);
-+		  *code = new_code;
-+		}
-+	      break;
-+
-+	    case EQ:
-+	    case NE:
-+	      /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0.  */
-+	      if (SMALL_OPERAND (-rhs))
-+		{
-+		  *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
-+		  riscv_emit_binary (PLUS, *op0, cmp_op0, GEN_INT (-rhs));
-+		  *op1 = const0_rtx;
-+		}
-+	    default:
-+	      break;
-+	    }
-+	}
-+
-+      if (*op1 != const0_rtx)
-+	*op1 = force_reg (GET_MODE (cmp_op0), *op1);
-+    }
-+  else
-+    {
-+      /* For FP comparisons, set an integer register with the result of the
-+	 comparison, then branch on it. */
-+      rtx tmp0, tmp1, final_op;
-+      enum rtx_code fp_code = *code;
-+      *code = riscv_reversed_fp_cond (&fp_code) ? EQ : NE;
-+
-+      switch (fp_code)
-+	{
-+	case ORDERED:
-+	  /* a == a && b == b */
-+	  tmp0 = gen_reg_rtx (SImode);
-+	  riscv_emit_binary (EQ, tmp0, cmp_op0, cmp_op0);
-+	  tmp1 = gen_reg_rtx (SImode);
-+	  riscv_emit_binary (EQ, tmp1, cmp_op1, cmp_op1);
-+	  final_op = gen_reg_rtx (SImode);
-+	  riscv_emit_binary (AND, final_op, tmp0, tmp1);
-+	  break;
-+
-+	case LTGT:
-+	  /* a < b || a > b */
-+	  tmp0 = gen_reg_rtx (SImode);
-+	  riscv_emit_binary (LT, tmp0, cmp_op0, cmp_op1);
-+	  tmp1 = gen_reg_rtx (SImode);
-+	  riscv_emit_binary (GT, tmp1, cmp_op0, cmp_op1);
-+	  final_op = gen_reg_rtx (SImode);
-+	  riscv_emit_binary (IOR, final_op, tmp0, tmp1);
-+	  break;
-+
-+	case EQ:
-+	case LE:
-+	case LT:
-+	case GE:
-+	case GT:
-+	  /* We have instructions for these cases. */
-+	  final_op = gen_reg_rtx (SImode);
-+	  riscv_emit_binary (fp_code, final_op, cmp_op0, cmp_op1);
-+	  break;
-+
-+	default:
-+	  gcc_unreachable ();
-+	}
-+
-+      /* Compare the binary result against 0. */
-+      *op0 = final_op;
-+      *op1 = const0_rtx;
-+    }
-+}
-+
-+/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
-+   and OPERAND[3].  Store the result in OPERANDS[0].
-+
-+   On 64-bit targets, the mode of the comparison and target will always be
-+   SImode, thus possibly narrower than that of the comparison's operands.  */
-+
-+void
-+riscv_expand_scc (rtx operands[])
-+{
-+  rtx target = operands[0];
-+  enum rtx_code code = GET_CODE (operands[1]);
-+  rtx op0 = operands[2];
-+  rtx op1 = operands[3];
-+
-+  gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
-+
-+  if (code == EQ || code == NE)
-+    {
-+      rtx zie = riscv_zero_if_equal (op0, op1);
-+      riscv_emit_binary (code, target, zie, const0_rtx);
-+    }
-+  else
-+    riscv_emit_int_order_test (code, 0, target, op0, op1);
-+}
-+
-+/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
-+   CODE and jump to OPERANDS[3] if the condition holds.  */
-+
-+void
-+riscv_expand_conditional_branch (rtx *operands)
-+{
-+  enum rtx_code code = GET_CODE (operands[0]);
-+  rtx op0 = operands[1];
-+  rtx op1 = operands[2];
-+  rtx condition;
-+
-+  riscv_emit_compare (&code, &op0, &op1);
-+  condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
-+  emit_jump_insn (gen_condjump (condition, operands[3]));
-+}
-+
-+/* Implement TARGET_FUNCTION_ARG_BOUNDARY.  Every parameter gets at
-+   least PARM_BOUNDARY bits of alignment, but will be given anything up
-+   to STACK_BOUNDARY bits if the type requires it.  */
-+
-+static unsigned int
-+riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
-+{
-+  unsigned int alignment;
-+
-+  alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
-+  if (alignment < PARM_BOUNDARY)
-+    alignment = PARM_BOUNDARY;
-+  if (alignment > STACK_BOUNDARY)
-+    alignment = STACK_BOUNDARY;
-+  return alignment;
-+}
-+
-+/* Fill INFO with information about a single argument.  CUM is the
-+   cumulative state for earlier arguments.  MODE is the mode of this
-+   argument and TYPE is its type (if known).  NAMED is true if this
-+   is a named (fixed) argument rather than a variable one.  */
-+
-+static void
-+riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
-+		   enum machine_mode mode, const_tree type, bool named)
-+{
-+  bool doubleword_aligned_p;
-+  unsigned int num_bytes, num_words, max_regs;
-+
-+  /* Work out the size of the argument.  */
-+  num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
-+  num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
-+
-+  /* Scalar, complex and vector floating-point types are passed in
-+     floating-point registers, as long as this is a named rather
-+     than a variable argument.  */
-+  info->fpr_p = (named
-+		 && (type == 0 || FLOAT_TYPE_P (type))
-+		 && (GET_MODE_CLASS (mode) == MODE_FLOAT
-+		     || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
-+		     || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
-+		 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
-+
-+  /* Complex floats should only go into FPRs if there are two FPRs free,
-+     otherwise they should be passed in the same way as a struct
-+     containing two floats.  */
-+  if (info->fpr_p
-+      && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
-+      && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
-+    {
-+      if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
-+        info->fpr_p = false;
-+      else
-+        num_words = 2;
-+    }
-+
-+  /* See whether the argument has doubleword alignment.  */
-+  doubleword_aligned_p = (riscv_function_arg_boundary (mode, type)
-+			  > BITS_PER_WORD);
-+
-+  /* Set REG_OFFSET to the register count we're interested in.
-+     The EABI allocates the floating-point registers separately,
-+     but the other ABIs allocate them like integer registers.  */
-+  info->reg_offset = cum->num_gprs;
-+
-+  /* Advance to an even register if the argument is doubleword-aligned.  */
-+  if (doubleword_aligned_p)
-+    info->reg_offset += info->reg_offset & 1;
-+
-+  /* Work out the offset of a stack argument.  */
-+  info->stack_offset = cum->stack_words;
-+  if (doubleword_aligned_p)
-+    info->stack_offset += info->stack_offset & 1;
-+
-+  max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
-+
-+  /* Partition the argument between registers and stack.  */
-+  info->reg_words = MIN (num_words, max_regs);
-+  info->stack_words = num_words - info->reg_words;
-+}
-+
-+/* INFO describes a register argument that has the normal format for the
-+   argument's mode.  Return the register it uses, assuming that FPRs are
-+   available if HARD_FLOAT_P.  */
-+
-+static unsigned int
-+riscv_arg_regno (const struct riscv_arg_info *info, bool hard_float_p)
-+{
-+  if (!info->fpr_p || !hard_float_p)
-+    return GP_ARG_FIRST + info->reg_offset;
-+  else
-+    return FP_ARG_FIRST + info->reg_offset;
-+}
-+
-+/* Implement TARGET_FUNCTION_ARG.  */
-+
-+static rtx
-+riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
-+		    const_tree type, bool named)
-+{
-+  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
-+  struct riscv_arg_info info;
-+
-+  if (mode == VOIDmode)
-+    return NULL;
-+
-+  riscv_get_arg_info (&info, cum, mode, type, named);
-+
-+  /* Return straight away if the whole argument is passed on the stack.  */
-+  if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
-+    return NULL;
-+
-+  /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
-+     contains a double in its entirety, then that 64-bit chunk is passed
-+     in a floating-point register.  */
-+  if (TARGET_HARD_FLOAT
-+      && named
-+      && type != 0
-+      && TREE_CODE (type) == RECORD_TYPE
-+      && TYPE_SIZE_UNIT (type)
-+      && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
-+    {
-+      tree field;
-+
-+      /* First check to see if there is any such field.  */
-+      for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
-+	if (TREE_CODE (field) == FIELD_DECL
-+	    && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
-+	    && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
-+	    && tree_fits_shwi_p (bit_position (field))
-+	    && int_bit_position (field) % BITS_PER_WORD == 0)
-+	  break;
-+
-+      if (field != 0)
-+	{
-+	  /* Now handle the special case by returning a PARALLEL
-+	     indicating where each 64-bit chunk goes.  INFO.REG_WORDS
-+	     chunks are passed in registers.  */
-+	  unsigned int i;
-+	  HOST_WIDE_INT bitpos;
-+	  rtx ret;
-+
-+	  /* assign_parms checks the mode of ENTRY_PARM, so we must
-+	     use the actual mode here.  */
-+	  ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
-+
-+	  bitpos = 0;
-+	  field = TYPE_FIELDS (type);
-+	  for (i = 0; i < info.reg_words; i++)
-+	    {
-+	      rtx reg;
-+
-+	      for (; field; field = DECL_CHAIN (field))
-+		if (TREE_CODE (field) == FIELD_DECL
-+		    && int_bit_position (field) >= bitpos)
-+		  break;
-+
-+	      if (field
-+		  && int_bit_position (field) == bitpos
-+		  && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
-+		  && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
-+		reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
-+	      else
-+		reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
-+
-+	      XVECEXP (ret, 0, i)
-+		= gen_rtx_EXPR_LIST (VOIDmode, reg,
-+				     GEN_INT (bitpos / BITS_PER_UNIT));
-+
-+	      bitpos += BITS_PER_WORD;
-+	    }
-+	  return ret;
-+	}
-+    }
-+
-+  /* Handle the n32/n64 conventions for passing complex floating-point
-+     arguments in FPR pairs.  The real part goes in the lower register
-+     and the imaginary part goes in the upper register.  */
-+  if (info.fpr_p
-+      && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
-+    {
-+      rtx real, imag;
-+      enum machine_mode inner;
-+      unsigned int regno;
-+
-+      inner = GET_MODE_INNER (mode);
-+      regno = FP_ARG_FIRST + info.reg_offset;
-+      if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
-+	{
-+	  /* Real part in registers, imaginary part on stack.  */
-+	  gcc_assert (info.stack_words == info.reg_words);
-+	  return gen_rtx_REG (inner, regno);
-+	}
-+      else
-+	{
-+	  gcc_assert (info.stack_words == 0);
-+	  real = gen_rtx_EXPR_LIST (VOIDmode,
-+				    gen_rtx_REG (inner, regno),
-+				    const0_rtx);
-+	  imag = gen_rtx_EXPR_LIST (VOIDmode,
-+				    gen_rtx_REG (inner,
-+						 regno + info.reg_words / 2),
-+				    GEN_INT (GET_MODE_SIZE (inner)));
-+	  return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
-+	}
-+    }
-+
-+  return gen_rtx_REG (mode, riscv_arg_regno (&info, TARGET_HARD_FLOAT));
-+}
-+
-+/* Implement TARGET_FUNCTION_ARG_ADVANCE.  */
-+
-+static void
-+riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
-+			    const_tree type, bool named)
-+{
-+  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
-+  struct riscv_arg_info info;
-+
-+  riscv_get_arg_info (&info, cum, mode, type, named);
-+
-+  /* Advance the register count.  This has the effect of setting
-+     num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
-+     argument required us to skip the final GPR and pass the whole
-+     argument on the stack.  */
-+  cum->num_gprs = info.reg_offset + info.reg_words;
-+
-+  /* Advance the stack word count.  */
-+  if (info.stack_words > 0)
-+    cum->stack_words = info.stack_offset + info.stack_words;
-+}
-+
-+/* Implement TARGET_ARG_PARTIAL_BYTES.  */
-+
-+static int
-+riscv_arg_partial_bytes (cumulative_args_t cum,
-+			 enum machine_mode mode, tree type, bool named)
-+{
-+  struct riscv_arg_info info;
-+
-+  riscv_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
-+  return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
-+}
-+
-+/* See whether VALTYPE is a record whose fields should be returned in
-+   floating-point registers.  If so, return the number of fields and
-+   list them in FIELDS (which should have two elements).  Return 0
-+   otherwise.
-+
-+   For n32 & n64, a structure with one or two fields is returned in
-+   floating-point registers as long as every field has a floating-point
-+   type.  */
-+
-+static int
-+riscv_fpr_return_fields (const_tree valtype, tree *fields)
-+{
-+  tree field;
-+  int i;
-+
-+  if (TREE_CODE (valtype) != RECORD_TYPE)
-+    return 0;
-+
-+  i = 0;
-+  for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
-+    {
-+      if (TREE_CODE (field) != FIELD_DECL)
-+	continue;
-+
-+      if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
-+	return 0;
-+
-+      if (i == 2)
-+	return 0;
-+
-+      fields[i++] = field;
-+    }
-+  return i;
-+}
-+
-+/* Return true if the function return value MODE will get returned in a
-+   floating-point register.  */
-+
-+static bool
-+riscv_return_mode_in_fpr_p (enum machine_mode mode)
-+{
-+  return ((GET_MODE_CLASS (mode) == MODE_FLOAT
-+	   || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
-+	   || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
-+	  && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
-+}
-+
-+/* Return the representation of an FPR return register when the
-+   value being returned in FP_RETURN has mode VALUE_MODE and the
-+   return type itself has mode TYPE_MODE.  On NewABI targets,
-+   the two modes may be different for structures like:
-+
-+       struct __attribute__((packed)) foo { float f; }
-+
-+   where we return the SFmode value of "f" in FP_RETURN, but where
-+   the structure itself has mode BLKmode.  */
-+
-+static rtx
-+riscv_return_fpr_single (enum machine_mode type_mode,
-+			enum machine_mode value_mode)
-+{
-+  rtx x;
-+
-+  x = gen_rtx_REG (value_mode, FP_RETURN);
-+  if (type_mode != value_mode)
-+    {
-+      x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
-+      x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
-+    }
-+  return x;
-+}
-+
-+/* Return a composite value in a pair of floating-point registers.
-+   MODE1 and OFFSET1 are the mode and byte offset for the first value,
-+   likewise MODE2 and OFFSET2 for the second.  MODE is the mode of the
-+   complete value.
-+
-+   For n32 & n64, $f0 always holds the first value and $f2 the second.
-+   Otherwise the values are packed together as closely as possible.  */
-+
-+static rtx
-+riscv_return_fpr_pair (enum machine_mode mode,
-+		      enum machine_mode mode1, HOST_WIDE_INT offset1,
-+		      enum machine_mode mode2, HOST_WIDE_INT offset2)
-+{
-+  return gen_rtx_PARALLEL
-+    (mode,
-+     gen_rtvec (2,
-+		gen_rtx_EXPR_LIST (VOIDmode,
-+				   gen_rtx_REG (mode1, FP_RETURN),
-+				   GEN_INT (offset1)),
-+		gen_rtx_EXPR_LIST (VOIDmode,
-+				   gen_rtx_REG (mode2, FP_RETURN + 1),
-+				   GEN_INT (offset2))));
-+
-+}
-+
-+/* Implement FUNCTION_VALUE and LIBCALL_VALUE.  For normal calls,
-+   VALTYPE is the return type and MODE is VOIDmode.  For libcalls,
-+   VALTYPE is null and MODE is the mode of the return value.  */
-+
-+rtx
-+riscv_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
-+{
-+  if (valtype)
-+    {
-+      tree fields[2];
-+      int unsigned_p;
-+
-+      mode = TYPE_MODE (valtype);
-+      unsigned_p = TYPE_UNSIGNED (valtype);
-+
-+      /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
-+	 return values, promote the mode here too.  */
-+      mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
-+
-+      /* Handle structures whose fields are returned in $f0/$f2.  */
-+      switch (riscv_fpr_return_fields (valtype, fields))
-+	{
-+	case 1:
-+	  return riscv_return_fpr_single (mode,
-+					 TYPE_MODE (TREE_TYPE (fields[0])));
-+
-+	case 2:
-+	  return riscv_return_fpr_pair (mode,
-+				       TYPE_MODE (TREE_TYPE (fields[0])),
-+				       int_byte_position (fields[0]),
-+				       TYPE_MODE (TREE_TYPE (fields[1])),
-+				       int_byte_position (fields[1]));
-+	}
-+
-+      /* Only use FPRs for scalar, complex or vector types.  */
-+      if (!FLOAT_TYPE_P (valtype))
-+	return gen_rtx_REG (mode, GP_RETURN);
-+    }
-+
-+  /* Handle long doubles for n32 & n64.  */
-+  if (mode == TFmode)
-+    return riscv_return_fpr_pair (mode,
-+    			     DImode, 0,
-+    			     DImode, GET_MODE_SIZE (mode) / 2);
-+
-+  if (riscv_return_mode_in_fpr_p (mode))
-+    {
-+      if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
-+        return riscv_return_fpr_pair (mode,
-+    				 GET_MODE_INNER (mode), 0,
-+    				 GET_MODE_INNER (mode),
-+    				 GET_MODE_SIZE (mode) / 2);
-+      else
-+        return gen_rtx_REG (mode, FP_RETURN);
-+    }
-+
-+  return gen_rtx_REG (mode, GP_RETURN);
-+}
-+
-+/* Implement TARGET_RETURN_IN_MEMORY.  Scalars and small structures
-+   that fit in two registers are returned in a0/a1. */
-+
-+static bool
-+riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
-+{
-+  return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD);
-+}
-+
-+/* Implement TARGET_PASS_BY_REFERENCE. */
-+
-+static bool
-+riscv_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
-+			 enum machine_mode mode, const_tree type,
-+			 bool named ATTRIBUTE_UNUSED)
-+{
-+  if (type && riscv_return_in_memory (type, NULL_TREE))
-+    return true;
-+  return targetm.calls.must_pass_in_stack (mode, type);
-+}
-+
-+/* Implement TARGET_SETUP_INCOMING_VARARGS.  */
-+
-+static void
-+riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
-+			     tree type, int *pretend_size ATTRIBUTE_UNUSED,
-+			     int no_rtl)
-+{
-+  CUMULATIVE_ARGS local_cum;
-+  int gp_saved;
-+
-+  /* The caller has advanced CUM up to, but not beyond, the last named
-+     argument.  Advance a local copy of CUM past the last "real" named
-+     argument, to find out how many registers are left over.  */
-+  local_cum = *get_cumulative_args (cum);
-+  riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
-+
-+  /* Found out how many registers we need to save.  */
-+  gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
-+
-+  if (!no_rtl && gp_saved > 0)
-+    {
-+      rtx ptr, mem;
-+
-+      ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
-+			   REG_PARM_STACK_SPACE (cfun->decl)
-+			   - gp_saved * UNITS_PER_WORD);
-+      mem = gen_frame_mem (BLKmode, ptr);
-+      set_mem_alias_set (mem, get_varargs_alias_set ());
-+
-+      move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
-+			   mem, gp_saved);
-+    }
-+  if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
-+    cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
-+}
-+
-+/* Implement TARGET_EXPAND_BUILTIN_VA_START.  */
-+
-+static void
-+riscv_va_start (tree valist, rtx nextarg)
-+{
-+  nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
-+  std_expand_builtin_va_start (valist, nextarg);
-+}
-+
-+/* Expand a call of type TYPE.  RESULT is where the result will go (null
-+   for "call"s and "sibcall"s), ADDR is the address of the function,
-+   ARGS_SIZE is the size of the arguments and AUX is the value passed
-+   to us by riscv_function_arg.  Return the call itself.  */
-+
-+rtx
-+riscv_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size)
-+{
-+  rtx pattern;
-+
-+  if (!call_insn_operand (addr, VOIDmode))
-+    {
-+      rtx reg = RISCV_PROLOGUE_TEMP (Pmode);
-+      riscv_emit_move (reg, addr);
-+      addr = reg;
-+    }
-+
-+  if (result == 0)
-+    {
-+      rtx (*fn) (rtx, rtx);
-+
-+      if (sibcall_p)
-+	fn = gen_sibcall_internal;
-+      else
-+	fn = gen_call_internal;
-+
-+      pattern = fn (addr, args_size);
-+    }
-+  else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
-+    {
-+      /* Handle return values created by riscv_return_fpr_pair.  */
-+      rtx (*fn) (rtx, rtx, rtx, rtx);
-+      rtx reg1, reg2;
-+
-+      if (sibcall_p)
-+	fn = gen_sibcall_value_multiple_internal;
-+      else
-+	fn = gen_call_value_multiple_internal;
-+
-+      reg1 = XEXP (XVECEXP (result, 0, 0), 0);
-+      reg2 = XEXP (XVECEXP (result, 0, 1), 0);
-+      pattern = fn (reg1, addr, args_size, reg2);
-+    }
-+  else
-+    {
-+      rtx (*fn) (rtx, rtx, rtx);
-+
-+      if (sibcall_p)
-+	fn = gen_sibcall_value_internal;
-+      else
-+	fn = gen_call_value_internal;
-+
-+      /* Handle return values created by riscv_return_fpr_single.  */
-+      if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
-+	result = XEXP (XVECEXP (result, 0, 0), 0);
-+      pattern = fn (result, addr, args_size);
-+    }
-+
-+  return emit_call_insn (pattern);
-+}
-+
-+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
-+   Assume that the areas do not overlap.  */
-+
-+static void
-+riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
-+{
-+  HOST_WIDE_INT offset, delta;
-+  unsigned HOST_WIDE_INT bits;
-+  int i;
-+  enum machine_mode mode;
-+  rtx *regs;
-+
-+  bits = MAX( BITS_PER_UNIT,
-+             MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) );
-+
-+  mode = mode_for_size (bits, MODE_INT, 0);
-+  delta = bits / BITS_PER_UNIT;
-+
-+  /* Allocate a buffer for the temporary registers.  */
-+  regs = XALLOCAVEC (rtx, length / delta);
-+
-+  /* Load as many BITS-sized chunks as possible.  Use a normal load if
-+     the source has enough alignment, otherwise use left/right pairs.  */
-+  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
-+    {
-+      regs[i] = gen_reg_rtx (mode);
-+	riscv_emit_move (regs[i], adjust_address (src, mode, offset));
-+    }
-+
-+  /* Copy the chunks to the destination.  */
-+  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
-+      riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
-+
-+  /* Mop up any left-over bytes.  */
-+  if (offset < length)
-+    {
-+      src = adjust_address (src, BLKmode, offset);
-+      dest = adjust_address (dest, BLKmode, offset);
-+      move_by_pieces (dest, src, length - offset,
-+		      MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
-+    }
-+}
-+
-+/* Helper function for doing a loop-based block operation on memory
-+   reference MEM.  Each iteration of the loop will operate on LENGTH
-+   bytes of MEM.
-+
-+   Create a new base register for use within the loop and point it to
-+   the start of MEM.  Create a new memory reference that uses this
-+   register.  Store them in *LOOP_REG and *LOOP_MEM respectively.  */
-+
-+static void
-+riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
-+		       rtx *loop_reg, rtx *loop_mem)
-+{
-+  *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
-+
-+  /* Although the new mem does not refer to a known location,
-+     it does keep up to LENGTH bytes of alignment.  */
-+  *loop_mem = change_address (mem, BLKmode, *loop_reg);
-+  set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
-+}
-+
-+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
-+   bytes at a time.  LENGTH must be at least BYTES_PER_ITER.  Assume that
-+   the memory regions do not overlap.  */
-+
-+static void
-+riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
-+		      HOST_WIDE_INT bytes_per_iter)
-+{
-+  rtx label, src_reg, dest_reg, final_src, test;
-+  HOST_WIDE_INT leftover;
-+
-+  leftover = length % bytes_per_iter;
-+  length -= leftover;
-+
-+  /* Create registers and memory references for use within the loop.  */
-+  riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
-+  riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
-+
-+  /* Calculate the value that SRC_REG should have after the last iteration
-+     of the loop.  */
-+  final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
-+				   0, 0, OPTAB_WIDEN);
-+
-+  /* Emit the start of the loop.  */
-+  label = gen_label_rtx ();
-+  emit_label (label);
-+
-+  /* Emit the loop body.  */
-+  riscv_block_move_straight (dest, src, bytes_per_iter);
-+
-+  /* Move on to the next block.  */
-+  riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
-+  riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
-+
-+  /* Emit the loop condition.  */
-+  test = gen_rtx_NE (VOIDmode, src_reg, final_src);
-+  if (Pmode == DImode)
-+    emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
-+  else
-+    emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
-+
-+  /* Mop up any left-over bytes.  */
-+  if (leftover)
-+    riscv_block_move_straight (dest, src, leftover);
-+}
-+
-+/* Expand a movmemsi instruction, which copies LENGTH bytes from
-+   memory reference SRC to memory reference DEST.  */
-+
-+bool
-+riscv_expand_block_move (rtx dest, rtx src, rtx length)
-+{
-+  if (CONST_INT_P (length))
-+    {
-+      HOST_WIDE_INT factor, align;
-+      
-+      align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
-+      factor = BITS_PER_WORD / align;
-+
-+      if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
-+	{
-+	  riscv_block_move_straight (dest, src, INTVAL (length));
-+	  return true;
-+	}
-+      else if (optimize && align >= BITS_PER_WORD)
-+	{
-+	  riscv_block_move_loop (dest, src, INTVAL (length),
-+				RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / factor);
-+	  return true;
-+	}
-+    }
-+  return false;
-+}
-+
-+/* (Re-)Initialize riscv_lo_relocs and riscv_hi_relocs.  */
-+
-+static void
-+riscv_init_relocs (void)
-+{
-+  memset (riscv_hi_relocs, '\0', sizeof (riscv_hi_relocs));
-+  memset (riscv_lo_relocs, '\0', sizeof (riscv_lo_relocs));
-+
-+  if (!flag_pic && riscv_cmodel == CM_MEDLOW)
-+    {
-+      riscv_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
-+      riscv_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
-+    }
-+
-+  if (!flag_pic || flag_pie)
-+    {
-+      riscv_hi_relocs[SYMBOL_TLS_LE] = "%tprel_hi(";
-+      riscv_lo_relocs[SYMBOL_TLS_LE] = "%tprel_lo(";
-+    }
-+}
-+
-+/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
-+   in context CONTEXT.  RELOCS is the array of relocations to use.  */
-+
-+static void
-+riscv_print_operand_reloc (FILE *file, rtx op, const char **relocs)
-+{
-+  enum riscv_symbol_type symbol_type;
-+  const char *p;
-+
-+  symbol_type = riscv_classify_symbolic_expression (op);
-+  gcc_assert (relocs[symbol_type]);
-+
-+  fputs (relocs[symbol_type], file);
-+  output_addr_const (file, riscv_strip_unspec_address (op));
-+  for (p = relocs[symbol_type]; *p != 0; p++)
-+    if (*p == '(')
-+      fputc (')', file);
-+}
-+
-+static const char *
-+riscv_memory_model_suffix (enum memmodel model)
-+{
-+  switch (model)
-+    {
-+      case MEMMODEL_ACQ_REL:
-+      case MEMMODEL_SEQ_CST:
-+      case MEMMODEL_SYNC_SEQ_CST:
-+	return ".sc";
-+      case MEMMODEL_ACQUIRE:
-+      case MEMMODEL_CONSUME:
-+      case MEMMODEL_SYNC_ACQUIRE:
-+	return ".aq";
-+      case MEMMODEL_RELEASE:
-+      case MEMMODEL_SYNC_RELEASE:
-+	return ".rl";
-+      case MEMMODEL_RELAXED:
-+	return "";
-+      default:
-+        fprintf(stderr, "riscv_memory_model_suffix(%ld)\n", model);
-+        gcc_unreachable();
-+    }
-+}
-+
-+/* Implement TARGET_PRINT_OPERAND.  The RISCV-specific operand codes are:
-+
-+   'h'	Print the high-part relocation associated with OP, after stripping
-+	  any outermost HIGH.
-+   'R'	Print the low-part relocation associated with OP.
-+   'C'	Print the integer branch condition for comparison OP.
-+   'A'	Print the atomic operation suffix for memory model OP.
-+   'z'	Print $0 if OP is zero, otherwise print OP normally.  */
-+
-+static void
-+riscv_print_operand (FILE *file, rtx op, int letter)
-+{
-+  enum rtx_code code;
-+
-+  gcc_assert (op);
-+  code = GET_CODE (op);
-+
-+  switch (letter)
-+    {
-+    case 'h':
-+      if (code == HIGH)
-+	op = XEXP (op, 0);
-+      riscv_print_operand_reloc (file, op, riscv_hi_relocs);
-+      break;
-+
-+    case 'R':
-+      riscv_print_operand_reloc (file, op, riscv_lo_relocs);
-+      break;
-+
-+    case 'C':
-+      /* The RTL names match the instruction names. */
-+      fputs (GET_RTX_NAME (code), file);
-+      break;
-+
-+    case 'A':
-+      fputs (riscv_memory_model_suffix ((enum memmodel)INTVAL (op)), file);
-+      break;
-+
-+    default:
-+      switch (code)
-+	{
-+	case REG:
-+	  if (letter && letter != 'z')
-+	    output_operand_lossage ("invalid use of '%%%c'", letter);
-+	  fprintf (file, "%s", reg_names[REGNO (op)]);
-+	  break;
-+
-+	case MEM:
-+	  if (letter == 'y')
-+	    fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]);
-+	  else if (letter && letter != 'z')
-+	    output_operand_lossage ("invalid use of '%%%c'", letter);
-+	  else
-+	    output_address (XEXP (op, 0));
-+	  break;
-+
-+	default:
-+	  if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
-+	    fputs (reg_names[GP_REG_FIRST], file);
-+	  else if (letter && letter != 'z')
-+	    output_operand_lossage ("invalid use of '%%%c'", letter);
-+	  else
-+	    output_addr_const (file, riscv_strip_unspec_address (op));
-+	  break;
-+	}
-+    }
-+}
-+
-+/* Implement TARGET_PRINT_OPERAND_ADDRESS.  */
-+
-+static void
-+riscv_print_operand_address (FILE *file, rtx x)
-+{
-+  struct riscv_address_info addr;
-+
-+  if (riscv_classify_address (&addr, x, word_mode, true))
-+    switch (addr.type)
-+      {
-+      case ADDRESS_REG:
-+	riscv_print_operand (file, addr.offset, 0);
-+	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
-+	return;
-+
-+      case ADDRESS_LO_SUM:
-+	riscv_print_operand_reloc (file, addr.offset, riscv_lo_relocs);
-+	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
-+	return;
-+
-+      case ADDRESS_CONST_INT:
-+	output_addr_const (file, x);
-+	fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
-+	return;
-+
-+      case ADDRESS_SYMBOLIC:
-+	output_addr_const (file, riscv_strip_unspec_address (x));
-+	return;
-+      }
-+  gcc_unreachable ();
-+}
-+
-+static bool
-+riscv_size_ok_for_small_data_p (int size)
-+{
-+  return g_switch_value && IN_RANGE (size, 1, g_switch_value);
-+}
-+
-+/* Return true if EXP should be placed in the small data section. */
-+
-+static bool
-+riscv_in_small_data_p (const_tree x)
-+{
-+  if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
-+    return false;
-+
-+  if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
-+    {
-+      const char *sec = DECL_SECTION_NAME (x);
-+      return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
-+    }
-+
-+  return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
-+}
-+
-+/* Return a section for X, handling small data. */
-+
-+static section *
-+riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
-+			      unsigned HOST_WIDE_INT align)
-+{
-+  section *s = default_elf_select_rtx_section (mode, x, align);
-+
-+  if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
-+    {
-+      if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
-+	{
-+	  /* Rename .rodata.cst* to .srodata.cst*. */
-+	  char *name = (char *) alloca (strlen (s->named.name) + 2);
-+	  sprintf (name, ".s%s", s->named.name + 1);
-+	  return get_section (name, s->named.common.flags, NULL);
-+	}
-+
-+      if (s == data_section)
-+	return sdata_section;
-+    }
-+
-+  return s;
-+}
-+
-+/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL.  */
-+
-+static void ATTRIBUTE_UNUSED
-+riscv_output_dwarf_dtprel (FILE *file, int size, rtx x)
-+{
-+  switch (size)
-+    {
-+    case 4:
-+      fputs ("\t.dtprelword\t", file);
-+      break;
-+
-+    case 8:
-+      fputs ("\t.dtpreldword\t", file);
-+      break;
-+
-+    default:
-+      gcc_unreachable ();
-+    }
-+  output_addr_const (file, x);
-+  fputs ("+0x800", file);
-+}
-+
-+/* Make the last instruction frame-related and note that it performs
-+   the operation described by FRAME_PATTERN.  */
-+
-+static void
-+riscv_set_frame_expr (rtx frame_pattern)
-+{
-+  rtx insn;
-+
-+  insn = get_last_insn ();
-+  RTX_FRAME_RELATED_P (insn) = 1;
-+  REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
-+				      frame_pattern,
-+				      REG_NOTES (insn));
-+}
-+
-+/* Return a frame-related rtx that stores REG at MEM.
-+   REG must be a single register.  */
-+
-+static rtx
-+riscv_frame_set (rtx mem, rtx reg)
-+{
-+  rtx set;
-+
-+  set = gen_rtx_SET (VOIDmode, mem, reg);
-+  RTX_FRAME_RELATED_P (set) = 1;
-+
-+  return set;
-+}
-+
-+/* Return true if the current function must save register REGNO.  */
-+
-+static bool
-+riscv_save_reg_p (unsigned int regno)
-+{
-+  bool call_saved = !global_regs[regno] && !call_really_used_regs[regno];
-+  bool might_clobber = crtl->saves_all_registers
-+		       || df_regs_ever_live_p (regno)
-+		       || (regno == HARD_FRAME_POINTER_REGNUM
-+			   && frame_pointer_needed);
-+
-+  return (call_saved && might_clobber)
-+	 || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return);
-+}
-+
-+/* Determine whether to call GPR save/restore routines.  */
-+static bool
-+riscv_use_save_libcall (const struct riscv_frame_info *frame)
-+{
-+  if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
-+    return false;
-+
-+  return frame->save_libcall_adjustment != 0;
-+}
-+
-+/* Determine which GPR save/restore routine to call.  */
-+
-+static unsigned
-+riscv_save_libcall_count (unsigned mask)
-+{
-+  for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
-+    if (BITSET_P (mask, n))
-+      return CALLEE_SAVED_REG_NUMBER (n) + 1;
-+  abort ();
-+}
-+
-+/* Populate the current function's riscv_frame_info structure.
-+
-+   RISC-V stack frames grown downward.  High addresses are at the top.
-+
-+	+-------------------------------+
-+	|                               |
-+	|  incoming stack arguments     |
-+	|                               |
-+	+-------------------------------+ <-- incoming stack pointer
-+	|                               |
-+	|  callee-allocated save area   |
-+	|  for arguments that are       |
-+	|  split between registers and  |
-+	|  the stack                    |
-+	|                               |
-+	+-------------------------------+ <-- arg_pointer_rtx
-+	|                               |
-+	|  callee-allocated save area   |
-+	|  for register varargs         |
-+	|                               |
-+	+-------------------------------+ <-- hard_frame_pointer_rtx;
-+	|                               |     stack_pointer_rtx + gp_sp_offset
-+	|  GPR save area                |       + UNITS_PER_WORD
-+	|                               |
-+	+-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
-+	|                               |       + UNITS_PER_HWVALUE
-+	|  FPR save area                |
-+	|                               |
-+	+-------------------------------+ <-- frame_pointer_rtx (virtual)
-+	|                               |
-+	|  local variables              |
-+	|                               |
-+      P +-------------------------------+
-+	|                               |
-+	|  outgoing stack arguments     |
-+	|                               |
-+	+-------------------------------+ <-- stack_pointer_rtx
-+
-+   Dynamic stack allocations such as alloca insert data at point P.
-+   They decrease stack_pointer_rtx but leave frame_pointer_rtx and
-+   hard_frame_pointer_rtx unchanged.  */
-+
-+static void
-+riscv_compute_frame_info (void)
-+{
-+  struct riscv_frame_info *frame;
-+  HOST_WIDE_INT offset;
-+  unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
-+
-+  frame = &cfun->machine->frame;
-+  memset (frame, 0, sizeof (*frame));
-+
-+  /* Find out which GPRs we need to save.  */
-+  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
-+    if (riscv_save_reg_p (regno))
-+      frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
-+
-+  /* If this function calls eh_return, we must also save and restore the
-+     EH data registers.  */
-+  if (crtl->calls_eh_return)
-+    for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
-+      frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
-+
-+  /* Find out which FPRs we need to save.  This loop must iterate over
-+     the same space as its companion in riscv_for_each_saved_reg.  */
-+  if (TARGET_HARD_FLOAT)
-+    for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
-+      if (riscv_save_reg_p (regno))
-+        frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
-+
-+  /* At the bottom of the frame are any outgoing stack arguments. */
-+  offset = crtl->outgoing_args_size;
-+  /* Next are local stack variables. */
-+  offset += RISCV_STACK_ALIGN (get_frame_size ());
-+  /* The virtual frame pointer points above the local variables. */
-+  frame->frame_pointer_offset = offset;
-+  /* Next are the callee-saved FPRs. */
-+  if (frame->fmask)
-+    {
-+      offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FPREG);
-+      frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
-+    }
-+  /* Next are the callee-saved GPRs. */
-+  if (frame->mask)
-+    {
-+      unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
-+      unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
-+
-+      /* Only use save/restore routines if they don't alter the stack size.  */
-+      if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
-+	frame->save_libcall_adjustment = x_save_size;
-+
-+      offset += x_save_size;
-+      frame->gp_sp_offset = offset - UNITS_PER_WORD;
-+    }
-+  /* The hard frame pointer points above the callee-saved GPRs. */
-+  frame->hard_frame_pointer_offset = offset;
-+  /* Above the hard frame pointer is the callee-allocated varags save area. */
-+  offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
-+  frame->arg_pointer_offset = offset;
-+  /* Next is the callee-allocated area for pretend stack arguments.  */
-+  offset += crtl->args.pretend_args_size;
-+  frame->total_size = offset;
-+  /* Next points the incoming stack pointer and any incoming arguments. */
-+
-+  /* Only use save/restore routines when the GPRs are atop the frame.  */
-+  if (frame->hard_frame_pointer_offset != frame->total_size)
-+    frame->save_libcall_adjustment = 0;
-+}
-+
-+/* Make sure that we're not trying to eliminate to the wrong hard frame
-+   pointer.  */
-+
-+static bool
-+riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
-+{
-+  return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
-+}
-+
-+/* Implement INITIAL_ELIMINATION_OFFSET.  FROM is either the frame pointer
-+   or argument pointer.  TO is either the stack pointer or hard frame
-+   pointer.  */
-+
-+HOST_WIDE_INT
-+riscv_initial_elimination_offset (int from, int to)
-+{
-+  HOST_WIDE_INT src, dest;
-+
-+  riscv_compute_frame_info ();
-+
-+  if (to == HARD_FRAME_POINTER_REGNUM)
-+    dest = cfun->machine->frame.hard_frame_pointer_offset;
-+  else if (to == STACK_POINTER_REGNUM)
-+    dest = 0; /* this is the base of all offsets */
-+  else
-+    gcc_unreachable ();
-+
-+  if (from == FRAME_POINTER_REGNUM)
-+    src = cfun->machine->frame.frame_pointer_offset;
-+  else if (from == ARG_POINTER_REGNUM)
-+    src = cfun->machine->frame.arg_pointer_offset;
-+  else
-+    gcc_unreachable ();
-+
-+  return src - dest;
-+}
-+
-+/* Implement RETURN_ADDR_RTX.  We do not support moving back to a
-+   previous frame.  */
-+
-+rtx
-+riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
-+{
-+  if (count != 0)
-+    return const0_rtx;
-+
-+  return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
-+}
-+
-+/* Emit code to change the current function's return address to
-+   ADDRESS.  SCRATCH is available as a scratch register, if needed.
-+   ADDRESS and SCRATCH are both word-mode GPRs.  */
-+
-+void
-+riscv_set_return_address (rtx address, rtx scratch)
-+{
-+  rtx slot_address;
-+
-+  gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
-+  slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
-+				  cfun->machine->frame.gp_sp_offset);
-+  riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
-+}
-+
-+/* A function to save or store a register.  The first argument is the
-+   register and the second is the stack slot.  */
-+typedef void (*riscv_save_restore_fn) (rtx, rtx);
-+
-+/* Use FN to save or restore register REGNO.  MODE is the register's
-+   mode and OFFSET is the offset of its save slot from the current
-+   stack pointer.  */
-+
-+static void
-+riscv_save_restore_reg (enum machine_mode mode, int regno,
-+		       HOST_WIDE_INT offset, riscv_save_restore_fn fn)
-+{
-+  rtx mem;
-+
-+  mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
-+  fn (gen_rtx_REG (mode, regno), mem);
-+}
-+
-+/* Call FN for each register that is saved by the current function.
-+   SP_OFFSET is the offset of the current stack pointer from the start
-+   of the frame.  */
-+
-+static void
-+riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn)
-+{
-+  HOST_WIDE_INT offset;
-+  int regno;
-+
-+  /* Save the link register and s-registers. */
-+  offset = cfun->machine->frame.gp_sp_offset - sp_offset;
-+  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
-+    if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
-+      {
-+        riscv_save_restore_reg (word_mode, regno, offset, fn);
-+        offset -= UNITS_PER_WORD;
-+      }
-+
-+  /* This loop must iterate over the same space as its companion in
-+     riscv_compute_frame_info.  */
-+  offset = cfun->machine->frame.fp_sp_offset - sp_offset;
-+  for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
-+    if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
-+      {
-+	riscv_save_restore_reg (DFmode, regno, offset, fn);
-+	offset -= GET_MODE_SIZE (DFmode);
-+      }
-+}
-+
-+/* Save register REG to MEM.  Make the instruction frame-related.  */
-+
-+static void
-+riscv_save_reg (rtx reg, rtx mem)
-+{
-+  riscv_emit_move (mem, reg);
-+  riscv_set_frame_expr (riscv_frame_set (mem, reg));
-+}
-+
-+/* Restore register REG from MEM.  */
-+
-+static void
-+riscv_restore_reg (rtx reg, rtx mem)
-+{
-+  riscv_emit_move (reg, mem);
-+}
-+
-+/* Return the code to invoke the GPR save routine.  */
-+
-+const char *
-+riscv_output_gpr_save (unsigned mask)
-+{
-+  static char buf[GP_REG_NUM * 32];
-+  size_t len = 0;
-+  unsigned n = riscv_save_libcall_count (mask), i;
-+  unsigned frame_size = RISCV_STACK_ALIGN ((n + 1) * UNITS_PER_WORD);
-+
-+  len += sprintf (buf + len, "call\tt0,__riscv_save_%u", n);
-+
-+#ifdef DWARF2_UNWIND_INFO
-+  /* Describe the effect of the call to __riscv_save_X.  */
-+  if (dwarf2out_do_cfi_asm ())
-+    {
-+      len += sprintf (buf + len, "\n\t.cfi_def_cfa_offset %u", frame_size);
-+
-+      for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
-+	if (BITSET_P (cfun->machine->frame.mask, i))
-+	  len += sprintf (buf + len, "\n\t.cfi_offset %u,%d", i,
-+			  (CALLEE_SAVED_REG_NUMBER (i) + 2) * -UNITS_PER_WORD);
-+    }
-+#endif
-+
-+  return buf;
-+}
-+
-+/* Expand the "prologue" pattern.  */
-+
-+void
-+riscv_expand_prologue (void)
-+{
-+  struct riscv_frame_info *frame = &cfun->machine->frame;
-+  HOST_WIDE_INT size = frame->total_size;
-+  unsigned mask = frame->mask;
-+  rtx insn;
-+
-+  if (flag_stack_usage_info)
-+    current_function_static_stack_size = size;
-+
-+  /* When optimizing for size, call a subroutine to save the registers.  */
-+  if (riscv_use_save_libcall (frame))
-+    {
-+      frame->mask = 0; /* Temporarily fib that we need not save GPRs.  */
-+      size -= frame->save_libcall_adjustment;
-+      emit_insn (gen_gpr_save (GEN_INT (mask)));
-+    }
-+
-+  /* Save the registers.  Allocate up to RISCV_MAX_FIRST_STACK_STEP
-+     bytes beforehand; this is enough to cover the register save area
-+     without going out of range.  */
-+  if ((frame->mask | frame->fmask) != 0)
-+    {
-+      HOST_WIDE_INT step1;
-+
-+      step1 = MIN (size, RISCV_MAX_FIRST_STACK_STEP);
-+      insn = gen_add3_insn (stack_pointer_rtx,
-+			    stack_pointer_rtx,
-+			    GEN_INT (-step1));
-+      RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
-+      size -= step1;
-+      riscv_for_each_saved_reg (size, riscv_save_reg);
-+    }
-+
-+  frame->mask = mask; /* Undo the above fib.  */
-+
-+  /* Set up the frame pointer, if we're using one.  */
-+  if (frame_pointer_needed)
-+    {
-+      insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
-+                            GEN_INT (frame->hard_frame_pointer_offset - size));
-+      RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
-+    }
-+
-+  /* Allocate the rest of the frame.  */
-+  if (size > 0)
-+    {
-+      if (SMALL_OPERAND (-size))
-+	{
-+	  insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
-+				GEN_INT (-size));
-+	  RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
-+	}
-+      else
-+	{
-+	  riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
-+	  emit_insn (gen_add3_insn (stack_pointer_rtx,
-+				    stack_pointer_rtx,
-+				    RISCV_PROLOGUE_TEMP (Pmode)));
-+
-+	  /* Describe the effect of the previous instructions.  */
-+	  insn = plus_constant (Pmode, stack_pointer_rtx, -size);
-+	  insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx, insn);
-+	  riscv_set_frame_expr (insn);
-+	}
-+    }
-+}
-+
-+/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
-+   says which.  */
-+
-+void
-+riscv_expand_epilogue (bool sibcall_p)
-+{
-+  /* Split the frame into two.  STEP1 is the amount of stack we should
-+     deallocate before restoring the registers.  STEP2 is the amount we
-+     should deallocate afterwards.
-+
-+     Start off by assuming that no registers need to be restored.  */
-+  struct riscv_frame_info *frame = &cfun->machine->frame;
-+  unsigned mask = frame->mask;
-+  HOST_WIDE_INT step1 = frame->total_size;
-+  HOST_WIDE_INT step2 = 0;
-+  bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
-+  rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
-+
-+  if (!sibcall_p && riscv_can_use_return_insn ())
-+    {
-+      emit_jump_insn (gen_return ());
-+      return;
-+    }
-+
-+  /* Move past any dynamic stack allocations.  */
-+  if (cfun->calls_alloca)
-+    {
-+      rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
-+      if (!SMALL_OPERAND (INTVAL (adjust)))
-+	{
-+	  riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
-+	  adjust = RISCV_PROLOGUE_TEMP (Pmode);
-+	}
-+
-+      emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
-+				adjust));
-+    }
-+
-+  /* If we need to restore registers, deallocate as much stack as
-+     possible in the second step without going out of range.  */
-+  if ((frame->mask | frame->fmask) != 0)
-+    {
-+      step2 = MIN (step1, RISCV_MAX_FIRST_STACK_STEP);
-+      step1 -= step2;
-+    }
-+
-+  /* Set TARGET to BASE + STEP1.  */
-+  if (step1 > 0)
-+    {
-+      /* Get an rtx for STEP1 that we can add to BASE.  */
-+      rtx adjust = GEN_INT (step1);
-+      if (!SMALL_OPERAND (step1))
-+	{
-+	  riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
-+	  adjust = RISCV_PROLOGUE_TEMP (Pmode);
-+	}
-+
-+      emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
-+    }
-+
-+  if (use_restore_libcall)
-+    frame->mask = 0; /* Temporarily fib that we need not save GPRs.  */
-+
-+  /* Restore the registers.  */
-+  riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg);
-+
-+  if (use_restore_libcall)
-+    {
-+      frame->mask = mask; /* Undo the above fib.  */
-+      gcc_assert (step2 >= frame->save_libcall_adjustment);
-+      step2 -= frame->save_libcall_adjustment;
-+    }
-+
-+  /* Deallocate the final bit of the frame.  */
-+  if (step2 > 0)
-+    emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
-+			      GEN_INT (step2)));
-+
-+  if (use_restore_libcall)
-+    {
-+      emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
-+      emit_jump_insn (gen_gpr_restore_return (ra));
-+      return;
-+    }
-+
-+  /* Add in the __builtin_eh_return stack adjustment. */
-+  if (crtl->calls_eh_return)
-+    emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
-+			      EH_RETURN_STACKADJ_RTX));
-+
-+  if (!sibcall_p)
-+    emit_jump_insn (gen_simple_return_internal (ra));
-+}
-+
-+/* Return nonzero if this function is known to have a null epilogue.
-+   This allows the optimizer to omit jumps to jumps if no stack
-+   was created.  */
-+
-+bool
-+riscv_can_use_return_insn (void)
-+{
-+  return reload_completed && cfun->machine->frame.total_size == 0;
-+}
-+
-+/* Implement TARGET_REGISTER_MOVE_COST.  */
-+
-+static int
-+riscv_register_move_cost (enum machine_mode mode,
-+			  reg_class_t from, reg_class_t to)
-+{
-+  return SECONDARY_MEMORY_NEEDED (from, to, mode) ? 8 : 2;
-+}
-+
-+/* Return true if register REGNO can store a value of mode MODE.
-+   The result of this function is cached in riscv_hard_regno_mode_ok.  */
-+
-+static bool
-+riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
-+{
-+  unsigned int size = GET_MODE_SIZE (mode);
-+  enum mode_class mclass = GET_MODE_CLASS (mode);
-+
-+  /* This is hella bogus but ira_build segfaults on RV32 without it. */
-+  if (VECTOR_MODE_P (mode))
-+    return true;
-+
-+  if (GP_REG_P (regno))
-+    {
-+      if (size <= UNITS_PER_WORD)
-+	return true;
-+
-+      /* Double-word values must be even-register-aligned.  */
-+      if (size <= 2 * UNITS_PER_WORD)
-+	return regno % 2 == 0;
-+    }
-+
-+  if (FP_REG_P (regno))
-+    {
-+      if (mclass == MODE_FLOAT
-+	  || mclass == MODE_COMPLEX_FLOAT
-+	  || mclass == MODE_VECTOR_FLOAT)
-+	return size <= UNITS_PER_FPVALUE;
-+    }
-+
-+  return false;
-+}
-+
-+/* Implement HARD_REGNO_NREGS.  */
-+
-+unsigned int
-+riscv_hard_regno_nregs (int regno, enum machine_mode mode)
-+{
-+  if (FP_REG_P (regno))
-+    return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
-+
-+  /* All other registers are word-sized.  */
-+  return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
-+}
-+
-+/* Implement CLASS_MAX_NREGS.  */
-+
-+static unsigned char
-+riscv_class_max_nregs (reg_class_t rclass, enum machine_mode mode)
-+{
-+  if (reg_class_subset_p (FP_REGS, rclass))
-+    return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
-+
-+  if (reg_class_subset_p (GR_REGS, rclass))
-+    return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
-+
-+  return 0;
-+}
-+
-+/* Implement TARGET_PREFERRED_RELOAD_CLASS.  */
-+
-+static reg_class_t
-+riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
-+{
-+  return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
-+         reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
-+	 rclass;
-+}
-+
-+/* Implement TARGET_MEMORY_MOVE_COST.  */
-+
-+static int
-+riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
-+{
-+  return (tune_info->memory_cost
-+	  + memory_move_secondary_cost (mode, rclass, in));
-+} 
-+
-+/* Implement TARGET_MODE_REP_EXTENDED.  */
-+
-+static int
-+riscv_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
-+{
-+  /* On 64-bit targets, SImode register values are sign-extended to DImode.  */
-+  if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
-+    return SIGN_EXTEND;
-+
-+  return UNKNOWN;
-+}
-+
-+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P.  */
-+
-+static bool
-+riscv_scalar_mode_supported_p (enum machine_mode mode)
-+{
-+  if (ALL_FIXED_POINT_MODE_P (mode)
-+      && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
-+    return true;
-+
-+  return default_scalar_mode_supported_p (mode);
-+}
-+
-+/* Return the number of instructions that can be issued per cycle.  */
-+
-+static int
-+riscv_issue_rate (void)
-+{
-+  return tune_info->issue_rate;
-+}
-+
-+/* This structure describes a single built-in function.  */
-+struct riscv_builtin_description {
-+  /* The code of the main .md file instruction.  See riscv_builtin_type
-+     for more information.  */
-+  enum insn_code icode;
-+
-+  /* The name of the built-in function.  */
-+  const char *name;
-+
-+  /* Specifies how the function should be expanded.  */
-+  enum riscv_builtin_type builtin_type;
-+
-+  /* The function's prototype.  */
-+  enum riscv_function_type function_type;
-+
-+  /* Whether the function is available.  */
-+  unsigned int (*avail) (void);
-+};
-+
-+static unsigned int
-+riscv_builtin_avail_riscv (void)
-+{
-+  return 1;
-+}
-+
-+/* Construct a riscv_builtin_description from the given arguments.
-+
-+   INSN is the name of the associated instruction pattern, without the
-+   leading CODE_FOR_riscv_.
-+
-+   CODE is the floating-point condition code associated with the
-+   function.  It can be 'f' if the field is not applicable.
-+
-+   NAME is the name of the function itself, without the leading
-+   "__builtin_riscv_".
-+
-+   BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
-+
-+   AVAIL is the name of the availability predicate, without the leading
-+   riscv_builtin_avail_.  */
-+#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL)	\
-+  { CODE_FOR_ ## INSN, "__builtin_riscv_" NAME,				\
-+    BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
-+
-+/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT function
-+   mapped to instruction CODE_FOR_<INSN>,  FUNCTION_TYPE and AVAIL
-+   are as for RISCV_BUILTIN.  */
-+#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)			\
-+  RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
-+
-+/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
-+   function mapped to instruction CODE_FOR_<INSN>,  FUNCTION_TYPE
-+   and AVAIL are as for RISCV_BUILTIN.  */
-+#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)		\
-+  RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET,		\
-+		FUNCTION_TYPE, AVAIL)
-+
-+static const struct riscv_builtin_description riscv_builtins[] = {
-+  DIRECT_NO_TARGET_BUILTIN (nop, RISCV_VOID_FTYPE_VOID, riscv),
-+};
-+
-+/* Index I is the function declaration for riscv_builtins[I], or null if the
-+   function isn't defined on this target.  */
-+static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
-+
-+
-+/* Source-level argument types.  */
-+#define RISCV_ATYPE_VOID void_type_node
-+#define RISCV_ATYPE_INT integer_type_node
-+#define RISCV_ATYPE_POINTER ptr_type_node
-+#define RISCV_ATYPE_CPOINTER const_ptr_type_node
-+
-+/* Standard mode-based argument types.  */
-+#define RISCV_ATYPE_UQI unsigned_intQI_type_node
-+#define RISCV_ATYPE_SI intSI_type_node
-+#define RISCV_ATYPE_USI unsigned_intSI_type_node
-+#define RISCV_ATYPE_DI intDI_type_node
-+#define RISCV_ATYPE_UDI unsigned_intDI_type_node
-+#define RISCV_ATYPE_SF float_type_node
-+#define RISCV_ATYPE_DF double_type_node
-+
-+/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
-+   their associated RISCV_ATYPEs.  */
-+#define RISCV_FTYPE_ATYPES1(A, B) \
-+  RISCV_ATYPE_##A, RISCV_ATYPE_##B
-+
-+#define RISCV_FTYPE_ATYPES2(A, B, C) \
-+  RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
-+
-+#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
-+  RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
-+
-+#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
-+  RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
-+  RISCV_ATYPE_##E
-+
-+/* Return the function type associated with function prototype TYPE.  */
-+
-+static tree
-+riscv_build_function_type (enum riscv_function_type type)
-+{
-+  static tree types[(int) RISCV_MAX_FTYPE_MAX];
-+
-+  if (types[(int) type] == NULL_TREE)
-+    switch (type)
-+      {
-+#define DEF_RISCV_FTYPE(NUM, ARGS)					\
-+  case RISCV_FTYPE_NAME##NUM ARGS:					\
-+    types[(int) type]							\
-+      = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS,		\
-+				  NULL_TREE);				\
-+    break;
-+#include "config/riscv/riscv-ftypes.def"
-+#undef DEF_RISCV_FTYPE
-+      default:
-+	gcc_unreachable ();
-+      }
-+
-+  return types[(int) type];
-+}
-+
-+/* Implement TARGET_INIT_BUILTINS.  */
-+
-+static void
-+riscv_init_builtins (void)
-+{
-+  const struct riscv_builtin_description *d;
-+  unsigned int i;
-+
-+  /* Iterate through all of the bdesc arrays, initializing all of the
-+     builtin functions.  */
-+  for (i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
-+    {
-+      d = &riscv_builtins[i];
-+      if (d->avail ())
-+	riscv_builtin_decls[i]
-+	  = add_builtin_function (d->name,
-+				  riscv_build_function_type (d->function_type),
-+				  i, BUILT_IN_MD, NULL, NULL);
-+    }
-+}
-+
-+/* Implement TARGET_BUILTIN_DECL.  */
-+
-+static tree
-+riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
-+{
-+  if (code >= ARRAY_SIZE (riscv_builtins))
-+    return error_mark_node;
-+  return riscv_builtin_decls[code];
-+}
-+
-+/* Take argument ARGNO from EXP's argument list and convert it into a
-+   form suitable for input operand OPNO of instruction ICODE.  Return the
-+   value.  */
-+
-+static rtx
-+riscv_prepare_builtin_arg (enum insn_code icode,
-+			  unsigned int opno, tree exp, unsigned int argno)
-+{
-+  tree arg;
-+  rtx value;
-+  enum machine_mode mode;
-+
-+  arg = CALL_EXPR_ARG (exp, argno);
-+  value = expand_normal (arg);
-+  mode = insn_data[icode].operand[opno].mode;
-+  if (!insn_data[icode].operand[opno].predicate (value, mode))
-+    {
-+      /* We need to get the mode from ARG for two reasons:
-+
-+	   - to cope with address operands, where MODE is the mode of the
-+	     memory, rather than of VALUE itself.
-+
-+	   - to cope with special predicates like pmode_register_operand,
-+	     where MODE is VOIDmode.  */
-+      value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
-+
-+      /* Check the predicate again.  */
-+      if (!insn_data[icode].operand[opno].predicate (value, mode))
-+	{
-+	  error ("invalid argument to built-in function");
-+	  return const0_rtx;
-+	}
-+    }
-+
-+  return value;
-+}
-+
-+/* Return an rtx suitable for output operand OP of instruction ICODE.
-+   If TARGET is non-null, try to use it where possible.  */
-+
-+static rtx
-+riscv_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
-+{
-+  enum machine_mode mode;
-+
-+  mode = insn_data[icode].operand[op].mode;
-+  if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
-+    target = gen_reg_rtx (mode);
-+
-+  return target;
-+}
-+
-+/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
-+   HAS_TARGET_P says which.  EXP is the CALL_EXPR that calls the function
-+   and ICODE is the code of the associated .md pattern.  TARGET, if nonnull,
-+   suggests a good place to put the result.  */
-+
-+static rtx
-+riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
-+			    bool has_target_p)
-+{
-+  rtx ops[MAX_RECOG_OPERANDS];
-+  int opno, argno;
-+
-+  /* Map any target to operand 0.  */
-+  opno = 0;
-+  if (has_target_p)
-+    {
-+      target = riscv_prepare_builtin_target (icode, opno, target);
-+      ops[opno] = target;
-+      opno++;
-+    }
-+
-+  /* Map the arguments to the other operands.  The n_operands value
-+     for an expander includes match_dups and match_scratches as well as
-+     match_operands, so n_operands is only an upper bound on the number
-+     of arguments to the expander function.  */
-+  gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
-+  for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
-+    ops[opno] = riscv_prepare_builtin_arg (icode, opno, exp, argno);
-+
-+  switch (opno)
-+    {
-+    case 2:
-+      emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
-+      break;
-+
-+    case 3:
-+      emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
-+      break;
-+
-+    case 4:
-+      emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
-+      break;
-+
-+    default:
-+      gcc_unreachable ();
-+    }
-+  return target;
-+}
-+
-+/* Implement TARGET_EXPAND_BUILTIN.  */
-+
-+static rtx
-+riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
-+		     enum machine_mode mode ATTRIBUTE_UNUSED,
-+		     int ignore ATTRIBUTE_UNUSED)
-+{
-+  tree fndecl;
-+  unsigned int fcode, avail;
-+  const struct riscv_builtin_description *d;
-+
-+  fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
-+  fcode = DECL_FUNCTION_CODE (fndecl);
-+  gcc_assert (fcode < ARRAY_SIZE (riscv_builtins));
-+  d = &riscv_builtins[fcode];
-+  avail = d->avail ();
-+  gcc_assert (avail != 0);
-+  switch (d->builtin_type)
-+    {
-+    case RISCV_BUILTIN_DIRECT:
-+      return riscv_expand_builtin_direct (d->icode, target, exp, true);
-+
-+    case RISCV_BUILTIN_DIRECT_NO_TARGET:
-+      return riscv_expand_builtin_direct (d->icode, target, exp, false);
-+    }
-+  gcc_unreachable ();
-+}
-+
-+/* Implement TARGET_ASM_OUTPUT_MI_THUNK.  Generate rtl rather than asm text
-+   in order to avoid duplicating too much logic from elsewhere.  */
-+
-+static void
-+riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
-+		      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
-+		      tree function)
-+{
-+  rtx this_rtx, temp1, temp2, fnaddr;
-+  rtx_insn *insn;
-+  bool use_sibcall_p;
-+
-+  /* Pretend to be a post-reload pass while generating rtl.  */
-+  reload_completed = 1;
-+
-+  /* Mark the end of the (empty) prologue.  */
-+  emit_note (NOTE_INSN_PROLOGUE_END);
-+
-+  /* Determine if we can use a sibcall to call FUNCTION directly.  */
-+  fnaddr = XEXP (DECL_RTL (function), 0);
-+  use_sibcall_p = absolute_symbolic_operand (fnaddr, Pmode);
-+
-+  /* We need two temporary registers in some cases.  */
-+  temp1 = gen_rtx_REG (Pmode, GP_TEMP_FIRST);
-+  temp2 = gen_rtx_REG (Pmode, GP_TEMP_FIRST + 1);
-+
-+  /* Find out which register contains the "this" pointer.  */
-+  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
-+    this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
-+  else
-+    this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
-+
-+  /* Add DELTA to THIS_RTX.  */
-+  if (delta != 0)
-+    {
-+      rtx offset = GEN_INT (delta);
-+      if (!SMALL_OPERAND (delta))
-+	{
-+	  riscv_emit_move (temp1, offset);
-+	  offset = temp1;
-+	}
-+      emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
-+    }
-+
-+  /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX.  */
-+  if (vcall_offset != 0)
-+    {
-+      rtx addr;
-+
-+      /* Set TEMP1 to *THIS_RTX.  */
-+      riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
-+
-+      /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET.  */
-+      addr = riscv_add_offset (temp2, temp1, vcall_offset);
-+
-+      /* Load the offset and add it to THIS_RTX.  */
-+      riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
-+      emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
-+    }
-+
-+  /* Jump to the target function.  Use a sibcall if direct jumps are
-+     allowed, otherwise load the address into a register first.  */
-+  if (use_sibcall_p)
-+    {
-+      insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
-+      SIBLING_CALL_P (insn) = 1;
-+    }
-+  else
-+    {
-+      riscv_emit_move(temp1, fnaddr);
-+      emit_jump_insn (gen_indirect_jump (temp1));
-+    }
-+
-+  /* Run just enough of rest_of_compilation.  This sequence was
-+     "borrowed" from alpha.c.  */
-+  insn = get_insns ();
-+  split_all_insns_noflow ();
-+  shorten_branches (insn);
-+  final_start_function (insn, file, 1);
-+  final (insn, file, 1);
-+  final_end_function ();
-+
-+  /* Clean up the vars set above.  Note that final_end_function resets
-+     the global pointer for us.  */
-+  reload_completed = 0;
-+}
-+
-+/* Allocate a chunk of memory for per-function machine-dependent data.  */
-+
-+static struct machine_function *
-+riscv_init_machine_status (void)
-+{
-+  return ggc_cleared_alloc<machine_function> ();
-+}
-+
-+/* Implement TARGET_OPTION_OVERRIDE.  */
-+
-+static void
-+riscv_option_override (void)
-+{
-+  int regno, mode;
-+  const struct riscv_cpu_info *cpu;
-+
-+#ifdef SUBTARGET_OVERRIDE_OPTIONS
-+  SUBTARGET_OVERRIDE_OPTIONS;
-+#endif
-+
-+  flag_pcc_struct_return = 0;
-+
-+  if (flag_pic)
-+    g_switch_value = 0;
-+
-+  /* Prefer a call to memcpy over inline code when optimizing for size,
-+     though see MOVE_RATIO in riscv.h.  */
-+  if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
-+    target_flags |= MASK_MEMCPY;
-+
-+  /* Handle -mtune.  */
-+  cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
-+			 RISCV_TUNE_STRING_DEFAULT);
-+  tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
-+
-+  /* If the user hasn't specified a branch cost, use the processor's
-+     default.  */
-+  if (riscv_branch_cost == 0)
-+    riscv_branch_cost = tune_info->branch_cost;
-+
-+  /* Set up riscv_hard_regno_mode_ok.  */
-+  for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
-+    for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
-+      riscv_hard_regno_mode_ok[mode][regno]
-+	= riscv_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
-+
-+  /* Function to allocate machine-dependent function status.  */
-+  init_machine_status = &riscv_init_machine_status;
-+
-+  if (riscv_cmodel_string)
-+    {
-+      if (strcmp (riscv_cmodel_string, "medlow") == 0)
-+	riscv_cmodel = CM_MEDLOW;
-+      else if (strcmp (riscv_cmodel_string, "medany") == 0)
-+	riscv_cmodel = CM_MEDANY;
-+      else
-+	error ("unsupported code model: %s", riscv_cmodel_string);
-+    }
-+
-+  if (flag_pic)
-+    riscv_cmodel = CM_PIC;
-+
-+  riscv_init_relocs ();
-+}
-+
-+/* Implement TARGET_CONDITIONAL_REGISTER_USAGE.  */
-+
-+static void
-+riscv_conditional_register_usage (void)
-+{
-+  int regno;
-+
-+  if (!TARGET_HARD_FLOAT)
-+    {
-+      for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
-+	fixed_regs[regno] = call_used_regs[regno] = 1;
-+    }
-+}
-+
-+/* Return a register priority for hard reg REGNO.  */
-+static int
-+riscv_register_priority (int regno)
-+{
-+  /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection.  */
-+  if (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
-+		     || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)))
-+    return 1;
-+
-+  return 0;
-+}
-+
-+/* Implement TARGET_TRAMPOLINE_INIT.  */
-+
-+static void
-+riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
-+{
-+  rtx addr, end_addr, mem;
-+  uint32_t trampoline[4];
-+  unsigned int i;
-+  HOST_WIDE_INT static_chain_offset, target_function_offset;
-+
-+  /* Work out the offsets of the pointers from the start of the
-+     trampoline code.  */
-+  gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
-+  static_chain_offset = TRAMPOLINE_CODE_SIZE;
-+  target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
-+
-+  /* Get pointers to the beginning and end of the code block.  */
-+  addr = force_reg (Pmode, XEXP (m_tramp, 0));
-+  end_addr = riscv_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE));
-+
-+  /* auipc   t0, 0
-+     l[wd]   t1, target_function_offset(t0)
-+     l[wd]   t0, static_chain_offset(t0)
-+     jr      t1
-+  */
-+  trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
-+  trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
-+		  | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
-+		  | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
-+		  | (target_function_offset << SHIFT_IMM);
-+  trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
-+		  | (STATIC_CHAIN_REGNUM << SHIFT_RD)
-+		  | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
-+		  | (static_chain_offset << SHIFT_IMM);
-+  trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
-+
-+  /* Copy the trampoline code.  */
-+  for (i = 0; i < ARRAY_SIZE (trampoline); i++)
-+    {
-+      mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
-+      riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
-+    }
-+
-+  /* Set up the static chain pointer field.  */
-+  mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
-+  riscv_emit_move (mem, chain_value);
-+
-+  /* Set up the target function field.  */
-+  mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
-+  riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
-+
-+  /* Flush the code part of the trampoline.  */
-+  emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
-+  emit_insn (gen_clear_cache (addr, end_addr));
-+}
-+
-+/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL.  */
-+
-+static bool
-+riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
-+			       tree exp ATTRIBUTE_UNUSED)
-+{
-+  if (TARGET_SAVE_RESTORE)
-+    {
-+      /* When optimzing for size, don't use sibcalls in non-leaf routines */
-+      if (cfun->machine->is_leaf == 0)
-+	cfun->machine->is_leaf = leaf_function_p () ? 1 : -1;
-+
-+      return cfun->machine->is_leaf > 0;
-+    }
-+
-+  return true;
-+}
-+
-+/* Initialize the GCC target structure.  */
-+#undef TARGET_ASM_ALIGNED_HI_OP
-+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
-+#undef TARGET_ASM_ALIGNED_SI_OP
-+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
-+#undef TARGET_ASM_ALIGNED_DI_OP
-+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
-+
-+#undef TARGET_OPTION_OVERRIDE
-+#define TARGET_OPTION_OVERRIDE riscv_option_override
-+
-+#undef TARGET_LEGITIMIZE_ADDRESS
-+#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
-+
-+#undef TARGET_SCHED_ISSUE_RATE
-+#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
-+
-+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
-+#define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
-+
-+#undef TARGET_REGISTER_MOVE_COST
-+#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
-+#undef TARGET_MEMORY_MOVE_COST
-+#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
-+#undef TARGET_RTX_COSTS
-+#define TARGET_RTX_COSTS riscv_rtx_costs
-+#undef TARGET_ADDRESS_COST
-+#define TARGET_ADDRESS_COST riscv_address_cost
-+
-+#undef  TARGET_PREFERRED_RELOAD_CLASS
-+#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
-+
-+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
-+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
-+
-+#undef TARGET_EXPAND_BUILTIN_VA_START
-+#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
-+
-+#undef  TARGET_PROMOTE_FUNCTION_MODE
-+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
-+
-+#undef TARGET_RETURN_IN_MEMORY
-+#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
-+
-+#undef TARGET_ASM_OUTPUT_MI_THUNK
-+#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
-+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
-+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
-+
-+#undef TARGET_PRINT_OPERAND
-+#define TARGET_PRINT_OPERAND riscv_print_operand
-+#undef TARGET_PRINT_OPERAND_ADDRESS
-+#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
-+
-+#undef TARGET_SETUP_INCOMING_VARARGS
-+#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
-+#undef TARGET_STRICT_ARGUMENT_NAMING
-+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
-+#undef TARGET_MUST_PASS_IN_STACK
-+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
-+#undef TARGET_PASS_BY_REFERENCE
-+#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
-+#undef TARGET_ARG_PARTIAL_BYTES
-+#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
-+#undef TARGET_FUNCTION_ARG
-+#define TARGET_FUNCTION_ARG riscv_function_arg
-+#undef TARGET_FUNCTION_ARG_ADVANCE
-+#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
-+#undef TARGET_FUNCTION_ARG_BOUNDARY
-+#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
-+
-+#undef TARGET_MODE_REP_EXTENDED
-+#define TARGET_MODE_REP_EXTENDED riscv_mode_rep_extended
-+
-+#undef TARGET_SCALAR_MODE_SUPPORTED_P
-+#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
-+
-+#undef TARGET_INIT_BUILTINS
-+#define TARGET_INIT_BUILTINS riscv_init_builtins
-+#undef TARGET_BUILTIN_DECL
-+#define TARGET_BUILTIN_DECL riscv_builtin_decl
-+#undef TARGET_EXPAND_BUILTIN
-+#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
-+
-+#undef TARGET_HAVE_TLS
-+#define TARGET_HAVE_TLS HAVE_AS_TLS
-+
-+#undef TARGET_CANNOT_FORCE_CONST_MEM
-+#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
-+
-+#undef TARGET_LEGITIMATE_CONSTANT_P
-+#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
-+
-+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
-+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
-+
-+#ifdef HAVE_AS_DTPRELWORD
-+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
-+#define TARGET_ASM_OUTPUT_DWARF_DTPREL riscv_output_dwarf_dtprel
-+#endif
-+
-+#undef TARGET_LEGITIMATE_ADDRESS_P
-+#define TARGET_LEGITIMATE_ADDRESS_P	riscv_legitimate_address_p
-+
-+#undef TARGET_CAN_ELIMINATE
-+#define TARGET_CAN_ELIMINATE riscv_can_eliminate
-+
-+#undef TARGET_CONDITIONAL_REGISTER_USAGE
-+#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
-+
-+#undef TARGET_CLASS_MAX_NREGS
-+#define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
-+
-+#undef TARGET_TRAMPOLINE_INIT
-+#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
-+
-+#undef TARGET_IN_SMALL_DATA_P
-+#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
-+
-+#undef TARGET_ASM_SELECT_RTX_SECTION
-+#define TARGET_ASM_SELECT_RTX_SECTION  riscv_elf_select_rtx_section
-+
-+#undef TARGET_MIN_ANCHOR_OFFSET
-+#define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
-+
-+#undef TARGET_MAX_ANCHOR_OFFSET
-+#define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
-+
-+#undef TARGET_LRA_P
-+#define TARGET_LRA_P hook_bool_void_true
-+
-+#undef TARGET_REGISTER_PRIORITY
-+#define TARGET_REGISTER_PRIORITY riscv_register_priority
-+
-+struct gcc_target targetm = TARGET_INITIALIZER;
-+
-+#include "gt-riscv.h"
-diff -urN empty/gcc/config/riscv/riscv-ftypes.def gcc-5.3.0/gcc/config/riscv/riscv-ftypes.def
---- empty/gcc/config/riscv/riscv-ftypes.def	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv-ftypes.def	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,39 @@
-+/* Definitions of prototypes for RISC-V built-in functions.
-+   Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
-+   MIPS built-in functions, where:
-+
-+      NARGS is the number of arguments.
-+      LIST contains the return-type code followed by the codes for each
-+        argument type.
-+
-+   Argument- and return-type codes are either modes or one of the following:
-+
-+      VOID for void_type_node
-+      INT for integer_type_node
-+      POINTER for ptr_type_node
-+
-+   (we don't use PTR because that's a ANSI-compatibillity macro).
-+
-+   Please keep this list lexicographically sorted by the LIST argument.  */
-+
-+DEF_RISCV_FTYPE (1, (VOID, VOID))
-diff -urN empty/gcc/config/riscv/riscv.h gcc-5.3.0/gcc/config/riscv/riscv.h
---- empty/gcc/config/riscv/riscv.h	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv.h	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,1079 @@
-+/* Definition of RISC-V target for GNU compiler.
-+   Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is
-+   directly accessible, while the command-line options select
-+   TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI
-+   in use.  */
-+#define TARGET_HARD_FLOAT TARGET_HARD_FLOAT_ABI
-+#define TARGET_SOFT_FLOAT TARGET_SOFT_FLOAT_ABI
-+
-+/* Target CPU builtins.  */
-+#define TARGET_CPU_CPP_BUILTINS()					\
-+  do									\
-+    {									\
-+      builtin_assert ("machine=riscv");					\
-+									\
-+      builtin_assert ("cpu=riscv");					\
-+      builtin_define ("__riscv__");					\
-+      builtin_define ("__riscv");					\
-+      builtin_define ("_riscv");					\
-+      builtin_define ("__riscv");					\
-+									\
-+      if (TARGET_64BIT)							\
-+	{								\
-+	  builtin_define ("__riscv64");					\
-+	  builtin_define ("_RISCV_SIM=_ABI64");				\
-+	}								\
-+      else								\
-+	{								\
-+	  builtin_define ("__riscv32");					\
-+	  builtin_define ("_RISCV_SIM=_ABI32");				\
-+	}								\
-+									\
-+      builtin_define ("_ABI32=1");					\
-+      builtin_define ("_ABI64=3");					\
-+									\
-+									\
-+      builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE);	\
-+      builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE);	\
-+      builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE);	\
-+									\
-+      if (TARGET_RVC)							\
-+	builtin_define ("__riscv_compressed");				\
-+									\
-+      if (TARGET_ATOMIC)						\
-+	builtin_define ("__riscv_atomic");				\
-+									\
-+      if (TARGET_MULDIV)						\
-+	builtin_define ("__riscv_muldiv");				\
-+									\
-+      if (TARGET_HARD_FLOAT_ABI)					\
-+	{								\
-+	  builtin_define ("__riscv_hard_float");			\
-+	  if (TARGET_FDIV)						\
-+	    {								\
-+	      builtin_define ("__riscv_fdiv");				\
-+	      builtin_define ("__riscv_fsqrt");				\
-+	    }								\
-+	}								\
-+      else								\
-+	builtin_define ("__riscv_soft_float");				\
-+									\
-+      /* The base RISC-V ISA is always little-endian. */		\
-+      builtin_define_std ("RISCVEL");					\
-+									\
-+      if (riscv_cmodel == CM_MEDANY)					\
-+	builtin_define ("_RISCV_CMODEL_MEDANY");			\
-+    }									\
-+  while (0)
-+
-+/* Default target_flags if no switches are specified  */
-+
-+#ifndef TARGET_DEFAULT
-+#define TARGET_DEFAULT 0
-+#endif
-+
-+#ifndef RISCV_ARCH_STRING_DEFAULT
-+#define RISCV_ARCH_STRING_DEFAULT "IMAFD"
-+#endif
-+
-+#ifndef RISCV_TUNE_STRING_DEFAULT
-+#define RISCV_TUNE_STRING_DEFAULT "rocket"
-+#endif
-+
-+#ifndef TARGET_64BIT_DEFAULT
-+#define TARGET_64BIT_DEFAULT 1
-+#endif
-+
-+#if TARGET_64BIT_DEFAULT
-+# define MULTILIB_ARCH_DEFAULT "m64"
-+# define OPT_ARCH64 "!m32"
-+# define OPT_ARCH32 "m32"
-+#else
-+# define MULTILIB_ARCH_DEFAULT "m32"
-+# define OPT_ARCH64 "m64"
-+# define OPT_ARCH32 "!m64"
-+#endif
-+
-+#ifndef MULTILIB_DEFAULTS
-+#define MULTILIB_DEFAULTS \
-+    { MULTILIB_ARCH_DEFAULT }
-+#endif
-+
-+
-+/* Support for a compile-time default CPU, et cetera.  The rules are:
-+   --with-arch is ignored if -march is specified.
-+   --with-tune is ignored if -mtune is specified.
-+   --with-float is ignored if -mhard-float or -msoft-float are specified. */
-+#define OPTION_DEFAULT_SPECS \
-+  {"arch", "%{!march=*:-march=%(VALUE)}"},			   \
-+  {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \
-+  {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \
-+  {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
-+  {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
-+
-+#define DRIVER_SELF_SPECS ""
-+
-+#ifdef IN_LIBGCC2
-+#undef TARGET_64BIT
-+/* Make this compile time constant for libgcc2 */
-+#ifdef __riscv64
-+#define TARGET_64BIT		1
-+#else
-+#define TARGET_64BIT		0
-+#endif
-+#endif /* IN_LIBGCC2 */
-+
-+/* Tell collect what flags to pass to nm.  */
-+#ifndef NM_FLAGS
-+#define NM_FLAGS "-Bn"
-+#endif
-+
-+#undef ASM_SPEC
-+#define ASM_SPEC "\
-+%(subtarget_asm_debugging_spec) \
-+%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \
-+%{mrvc} %{mno-rvc} \
-+%{msoft-float} %{mhard-float} \
-+%{fPIC|fpic|fPIE|fpie:-fpic} \
-+%{march=*} \
-+%(subtarget_asm_spec)"
-+
-+/* Extra switches sometimes passed to the linker.  */
-+
-+#ifndef LINK_SPEC
-+#define LINK_SPEC "\
-+%{!T:-dT riscv.ld} \
-+%{m64:-melf64lriscv} \
-+%{m32:-melf32lriscv} \
-+%{shared}"
-+#endif  /* LINK_SPEC defined */
-+
-+/* This macro defines names of additional specifications to put in the specs
-+   that can be used in various specifications like CC1_SPEC.  Its definition
-+   is an initializer with a subgrouping for each command option.
-+
-+   Each subgrouping contains a string constant, that defines the
-+   specification name, and a string constant that used by the GCC driver
-+   program.
-+
-+   Do not define this macro if it does not need to do anything.  */
-+
-+#define EXTRA_SPECS							\
-+  { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT },		\
-+  SUBTARGET_EXTRA_SPECS
-+
-+#ifndef SUBTARGET_EXTRA_SPECS
-+#define SUBTARGET_EXTRA_SPECS
-+#endif
-+
-+#define TARGET_DEFAULT_CMODEL CM_MEDLOW
-+
-+/* By default, turn on GDB extensions.  */
-+#define DEFAULT_GDB_EXTENSIONS 1
-+
-+#define LOCAL_LABEL_PREFIX	"."
-+#define USER_LABEL_PREFIX	""
-+
-+#define DWARF2_DEBUGGING_INFO 1
-+#define DWARF2_ASM_LINE_DEBUG_INFO 0
-+
-+/* The mapping from gcc register number to DWARF 2 CFA column number.  */
-+#define DWARF_FRAME_REGNUM(REGNO) \
-+  (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
-+
-+/* The DWARF 2 CFA column which tracks the return address.  */
-+#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
-+
-+/* Don't emit .cfi_sections, as it does not work */
-+#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
-+#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0
-+
-+/* Before the prologue, RA lives in r31.  */
-+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
-+
-+/* Describe how we implement __builtin_eh_return.  */
-+#define EH_RETURN_DATA_REGNO(N) \
-+  ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
-+
-+#define EH_RETURN_STACKADJ_RTX  gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
-+
-+/* Target machine storage layout */
-+
-+#define BITS_BIG_ENDIAN 0
-+#define BYTES_BIG_ENDIAN 0
-+#define WORDS_BIG_ENDIAN 0
-+
-+#define MAX_BITS_PER_WORD 64
-+
-+/* Width of a word, in units (bytes).  */
-+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
-+#ifndef IN_LIBGCC2
-+#define MIN_UNITS_PER_WORD 4
-+#endif
-+
-+/* We currently require both or neither of the `F' and `D' extensions. */
-+#define UNITS_PER_FPREG 8
-+
-+/* The largest size of value that can be held in floating-point
-+   registers and moved with a single instruction.  */
-+#define UNITS_PER_HWFPVALUE \
-+  (TARGET_SOFT_FLOAT_ABI ? 0 : UNITS_PER_FPREG)
-+
-+/* The largest size of value that can be held in floating-point
-+   registers.  */
-+#define UNITS_PER_FPVALUE			\
-+  (TARGET_SOFT_FLOAT_ABI ? 0			\
-+   : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
-+
-+/* The number of bytes in a double.  */
-+#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
-+
-+/* Set the sizes of the core types.  */
-+#define SHORT_TYPE_SIZE 16
-+#define INT_TYPE_SIZE 32
-+#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32)
-+#define LONG_LONG_TYPE_SIZE 64
-+
-+#define FLOAT_TYPE_SIZE 32
-+#define DOUBLE_TYPE_SIZE 64
-+/* XXX The ABI says long doubles are IEEE-754-2008 float128s. */
-+#define LONG_DOUBLE_TYPE_SIZE 64
-+
-+#ifdef IN_LIBGCC2
-+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
-+#endif
-+
-+/* Allocation boundary (in *bits*) for storing arguments in argument list.  */
-+#define PARM_BOUNDARY BITS_PER_WORD
-+
-+/* Allocation boundary (in *bits*) for the code of a function.  */
-+#define FUNCTION_BOUNDARY (TARGET_RVC ? 16 : 32)
-+
-+/* There is no point aligning anything to a rounder boundary than this.  */
-+#define BIGGEST_ALIGNMENT 128
-+
-+/* All accesses must be aligned.  */
-+#define STRICT_ALIGNMENT 1
-+
-+/* Define this if you wish to imitate the way many other C compilers
-+   handle alignment of bitfields and the structures that contain
-+   them.
-+
-+   The behavior is that the type written for a bit-field (`int',
-+   `short', or other integer type) imposes an alignment for the
-+   entire structure, as if the structure really did contain an
-+   ordinary field of that type.  In addition, the bit-field is placed
-+   within the structure so that it would fit within such a field,
-+   not crossing a boundary for it.
-+
-+   Thus, on most machines, a bit-field whose type is written as `int'
-+   would not cross a four-byte boundary, and would force four-byte
-+   alignment for the whole structure.  (The alignment used may not
-+   be four bytes; it is controlled by the other alignment
-+   parameters.)
-+
-+   If the macro is defined, its definition should be a C expression;
-+   a nonzero value for the expression enables this behavior.  */
-+
-+#define PCC_BITFIELD_TYPE_MATTERS 1
-+
-+/* If defined, a C expression to compute the alignment given to a
-+   constant that is being placed in memory.  CONSTANT is the constant
-+   and ALIGN is the alignment that the object would ordinarily have.
-+   The value of this macro is used instead of that alignment to align
-+   the object.
-+
-+   If this macro is not defined, then ALIGN is used.
-+
-+   The typical use of this macro is to increase alignment for string
-+   constants to be word aligned so that `strcpy' calls that copy
-+   constants can be done inline.  */
-+
-+#define CONSTANT_ALIGNMENT(EXP, ALIGN)					\
-+  ((TREE_CODE (EXP) == STRING_CST  || TREE_CODE (EXP) == CONSTRUCTOR)	\
-+   && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
-+
-+/* If defined, a C expression to compute the alignment for a static
-+   variable.  TYPE is the data type, and ALIGN is the alignment that
-+   the object would ordinarily have.  The value of this macro is used
-+   instead of that alignment to align the object.
-+
-+   If this macro is not defined, then ALIGN is used.
-+
-+   One use of this macro is to increase alignment of medium-size
-+   data to make it all fit in fewer cache lines.  Another is to
-+   cause character arrays to be word-aligned so that `strcpy' calls
-+   that copy constants to character arrays can be done inline.  */
-+
-+#undef DATA_ALIGNMENT
-+#define DATA_ALIGNMENT(TYPE, ALIGN)					\
-+  ((((ALIGN) < BITS_PER_WORD)						\
-+    && (TREE_CODE (TYPE) == ARRAY_TYPE					\
-+	|| TREE_CODE (TYPE) == UNION_TYPE				\
-+	|| TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
-+
-+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
-+   character arrays to be word-aligned so that `strcpy' calls that copy
-+   constants to character arrays can be done inline, and 'strcmp' can be
-+   optimised to use word loads. */
-+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
-+  DATA_ALIGNMENT (TYPE, ALIGN)
-+
-+/* Define if operations between registers always perform the operation
-+   on the full register even if a narrower mode is specified.  */
-+#define WORD_REGISTER_OPERATIONS
-+
-+/* When in 64-bit mode, move insns will sign extend SImode and CCmode
-+   moves.  All other references are zero extended.  */
-+#define LOAD_EXTEND_OP(MODE) \
-+  (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
-+   ? SIGN_EXTEND : ZERO_EXTEND)
-+
-+/* Define this macro if it is advisable to hold scalars in registers
-+   in a wider mode than that declared by the program.  In such cases,
-+   the value is constrained to be within the bounds of the declared
-+   type, but kept valid in the wider mode.  The signedness of the
-+   extension may differ from that of the type.  */
-+
-+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE)	\
-+  if (GET_MODE_CLASS (MODE) == MODE_INT		\
-+      && GET_MODE_SIZE (MODE) < 4)		\
-+    {						\
-+      (MODE) = Pmode;				\
-+    }
-+
-+/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
-+   Extensions of pointers to word_mode must be signed.  */
-+#define POINTERS_EXTEND_UNSIGNED false
-+
-+/* When floating-point registers are wider than integer ones, moves between
-+   them must go through memory.  */
-+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE)	\
-+  (GET_MODE_SIZE (MODE) > UNITS_PER_WORD		\
-+   && ((CLASS1) == FP_REGS) != ((CLASS2) == FP_REGS))
-+
-+/* Define if loading short immediate values into registers sign extends.  */
-+#define SHORT_IMMEDIATES_SIGN_EXTEND
-+
-+/* Standard register usage.  */
-+
-+/* Number of hardware registers.  We have:
-+
-+   - 32 integer registers
-+   - 32 floating point registers
-+   - 32 vector integer registers
-+   - 32 vector floating point registers
-+   - 2 fake registers:
-+	- ARG_POINTER_REGNUM
-+	- FRAME_POINTER_REGNUM */
-+
-+#define FIRST_PSEUDO_REGISTER 66
-+
-+/* x0, sp, gp, and tp are fixed. */
-+
-+#define FIXED_REGISTERS							\
-+{ /* General registers.  */                                             \
-+  1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
-+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
-+  /* Floating-point registers.  */                                      \
-+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
-+  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
-+  /* Others.  */                                                        \
-+  1, 1 \
-+}
-+
-+
-+/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
-+   The call RTLs themselves clobber ra.  */
-+
-+#define CALL_USED_REGISTERS						\
-+{ /* General registers.  */                                             \
-+  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
-+  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
-+  /* Floating-point registers.  */                                      \
-+  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
-+  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
-+  /* Others.  */                                                        \
-+  1, 1 \
-+}
-+
-+#define CALL_REALLY_USED_REGISTERS                                      \
-+{ /* General registers.  */                                             \
-+  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
-+  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
-+  /* Floating-point registers.  */                                      \
-+  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
-+  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
-+  /* Others.  */                                                        \
-+  1, 1 \
-+}
-+
-+/* Internal macros to classify an ISA register's type. */
-+
-+#define GP_REG_FIRST 0
-+#define GP_REG_LAST  31
-+#define GP_REG_NUM   (GP_REG_LAST - GP_REG_FIRST + 1)
-+
-+#define FP_REG_FIRST 32
-+#define FP_REG_LAST  63
-+#define FP_REG_NUM   (FP_REG_LAST - FP_REG_FIRST + 1)
-+
-+/* The DWARF 2 CFA column which tracks the return address from a
-+   signal handler context.  This means that to maintain backwards
-+   compatibility, no hard register can be assigned this column if it
-+   would need to be handled by the DWARF unwinder.  */
-+#define DWARF_ALT_FRAME_RETURN_COLUMN 64
-+
-+#define GP_REG_P(REGNO)	\
-+  ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
-+#define FP_REG_P(REGNO)  \
-+  ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
-+
-+#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
-+
-+/* Return coprocessor number from register number.  */
-+
-+#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) 				\
-+  (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2'			\
-+   : COP3_REG_P (REGNO) ? '3' : '?')
-+
-+
-+#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
-+
-+#define HARD_REGNO_MODE_OK(REGNO, MODE)					\
-+  riscv_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
-+
-+#define MODES_TIEABLE_P(MODE1, MODE2)					\
-+  ((MODE1) == (MODE2) || (GET_MODE_CLASS (MODE1) == MODE_INT		\
-+			  && GET_MODE_CLASS (MODE2) == MODE_INT))
-+
-+/* Use s0 as the frame pointer if it is so requested. */
-+#define HARD_FRAME_POINTER_REGNUM 8
-+#define STACK_POINTER_REGNUM 2
-+#define THREAD_POINTER_REGNUM 4
-+
-+/* These two registers don't really exist: they get eliminated to either
-+   the stack or hard frame pointer.  */
-+#define ARG_POINTER_REGNUM 64
-+#define FRAME_POINTER_REGNUM 65
-+
-+#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
-+#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
-+
-+/* Register in which static-chain is passed to a function.  */
-+#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
-+
-+/* Registers used as temporaries in prologue/epilogue code.
-+
-+   The prologue registers mustn't conflict with any
-+   incoming arguments, the static chain pointer, or the frame pointer.
-+   The epilogue temporary mustn't conflict with the return registers,
-+   the frame pointer, the EH stack adjustment, or the EH data registers. */
-+
-+#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
-+#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
-+
-+#define FUNCTION_PROFILER(STREAM, LABELNO)	\
-+{						\
-+    sorry ("profiler support for RISC-V");	\
-+}
-+
-+/* Define this macro if it is as good or better to call a constant
-+   function address than to call an address kept in a register.  */
-+#define NO_FUNCTION_CSE 1
-+
-+/* Define the classes of registers for register constraints in the
-+   machine description.  Also define ranges of constants.
-+
-+   One of the classes must always be named ALL_REGS and include all hard regs.
-+   If there is more than one class, another class must be named NO_REGS
-+   and contain no registers.
-+
-+   The name GENERAL_REGS must be the name of a class (or an alias for
-+   another name such as ALL_REGS).  This is the class of registers
-+   that is allowed by "g" or "r" in a register constraint.
-+   Also, registers outside this class are allocated only when
-+   instructions express preferences for them.
-+
-+   The classes must be numbered in nondecreasing order; that is,
-+   a larger-numbered class must never be contained completely
-+   in a smaller-numbered class.
-+
-+   For any two classes, it is very desirable that there be another
-+   class that represents their union.  */
-+
-+enum reg_class
-+{
-+  NO_REGS,			/* no registers in set */
-+  T_REGS,			/* registers used by indirect sibcalls */
-+  JALR_REGS,			/* registers used by indirect calls */
-+  GR_REGS,			/* integer registers */
-+  FP_REGS,			/* floating point registers */
-+  FRAME_REGS,			/* $arg and $frame */
-+  ALL_REGS,			/* all registers */
-+  LIM_REG_CLASSES		/* max value + 1 */
-+};
-+
-+#define N_REG_CLASSES (int) LIM_REG_CLASSES
-+
-+#define GENERAL_REGS GR_REGS
-+
-+/* An initializer containing the names of the register classes as C
-+   string constants.  These names are used in writing some of the
-+   debugging dumps.  */
-+
-+#define REG_CLASS_NAMES							\
-+{									\
-+  "NO_REGS",								\
-+  "T_REGS",								\
-+  "JALR_REGS",								\
-+  "GR_REGS",								\
-+  "FP_REGS",								\
-+  "FRAME_REGS",								\
-+  "ALL_REGS"								\
-+}
-+
-+/* An initializer containing the contents of the register classes,
-+   as integers which are bit masks.  The Nth integer specifies the
-+   contents of class N.  The way the integer MASK is interpreted is
-+   that register R is in the class if `MASK & (1 << R)' is 1.
-+
-+   When the machine has more than 32 registers, an integer does not
-+   suffice.  Then the integers are replaced by sub-initializers,
-+   braced groupings containing several integers.  Each
-+   sub-initializer must be suitable as an initializer for the type
-+   `HARD_REG_SET' which is defined in `hard-reg-set.h'.  */
-+
-+#define REG_CLASS_CONTENTS									\
-+{												\
-+  { 0x00000000, 0x00000000, 0x00000000 },	/* NO_REGS */		\
-+  { 0xf0000040, 0x00000000, 0x00000000 },	/* T_REGS */		\
-+  { 0xffffff40, 0x00000000, 0x00000000 },	/* JALR_REGS */		\
-+  { 0xffffffff, 0x00000000, 0x00000000 },	/* GR_REGS */		\
-+  { 0x00000000, 0xffffffff, 0x00000000 },	/* FP_REGS */		\
-+  { 0x00000000, 0x00000000, 0x00000003 },	/* FRAME_REGS */	\
-+  { 0xffffffff, 0xffffffff, 0x00000003 }	/* ALL_REGS */		\
-+}
-+
-+/* A C expression whose value is a register class containing hard
-+   register REGNO.  In general there is more that one such class;
-+   choose a class which is "minimal", meaning that no smaller class
-+   also contains the register.  */
-+
-+#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
-+
-+/* A macro whose definition is the name of the class to which a
-+   valid base register must belong.  A base register is one used in
-+   an address which is the register value plus a displacement.  */
-+
-+#define BASE_REG_CLASS GR_REGS
-+
-+/* A macro whose definition is the name of the class to which a
-+   valid index register must belong.  An index register is one used
-+   in an address where its value is either multiplied by a scale
-+   factor or added to another register (as well as added to a
-+   displacement).  */
-+
-+#define INDEX_REG_CLASS NO_REGS
-+
-+/* We generally want to put call-clobbered registers ahead of
-+   call-saved ones.  (IRA expects this.)  */
-+
-+#define REG_ALLOC_ORDER							\
-+{ \
-+  /* Call-clobbered GPRs.  */						\
-+  15, 14, 13, 12, 11, 10, 16, 17, 6, 28, 29, 30, 31, 5, 7, 1,		\
-+  /* Call-saved GPRs.  */						\
-+  8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,	       			\
-+  /* GPRs that can never be exposed to the register allocator.  */	\
-+  0, 2, 3, 4,								\
-+  /* Call-clobbered FPRs.  */						\
-+  47, 46, 45, 44, 43, 42, 32, 33, 34, 35, 36, 37, 38, 39, 48, 49,	\
-+  60, 61, 62, 63,							\
-+  /* Call-saved FPRs.  */						\
-+  40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,			\
-+  /* None of the remaining classes have defined call-saved		\
-+     registers.  */							\
-+  64, 65								\
-+}
-+
-+/* True if VALUE is a signed 12-bit number.  */
-+
-+#define SMALL_OPERAND(VALUE) \
-+  ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH/2 < IMM_REACH)
-+
-+/* True if VALUE can be loaded into a register using LUI.  */
-+
-+#define LUI_OPERAND(VALUE)						\
-+  (((VALUE) | ((1UL<<31) - IMM_REACH)) == ((1UL<<31) - IMM_REACH)	\
-+   || ((VALUE) | ((1UL<<31) - IMM_REACH)) + IMM_REACH == 0)
-+
-+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
-+  reg_classes_intersect_p (FP_REGS, CLASS)
-+
-+/* Stack layout; function entry, exit and calling.  */
-+
-+#define STACK_GROWS_DOWNWARD
-+
-+#define FRAME_GROWS_DOWNWARD 1
-+
-+#define STARTING_FRAME_OFFSET 0
-+
-+#define RETURN_ADDR_RTX riscv_return_addr
-+
-+#define ELIMINABLE_REGS							\
-+{{ ARG_POINTER_REGNUM,   STACK_POINTER_REGNUM},				\
-+ { ARG_POINTER_REGNUM,   HARD_FRAME_POINTER_REGNUM},			\
-+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM},				\
-+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}				\
-+
-+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
-+  (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
-+
-+/* Allocate stack space for arguments at the beginning of each function.  */
-+#define ACCUMULATE_OUTGOING_ARGS 1
-+
-+/* The argument pointer always points to the first argument.  */
-+#define FIRST_PARM_OFFSET(FNDECL) 0
-+
-+#define REG_PARM_STACK_SPACE(FNDECL) 0
-+
-+/* Define this if it is the responsibility of the caller to
-+   allocate the area reserved for arguments passed in registers.
-+   If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
-+   of this macro is to determine whether the space is included in
-+   `crtl->outgoing_args_size'.  */
-+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
-+
-+#define STACK_BOUNDARY 128
-+

-+/* Symbolic macros for the registers used to return integer and floating
-+   point values.  */
-+
-+#define GP_RETURN GP_ARG_FIRST
-+#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : FP_ARG_FIRST)
-+
-+#define MAX_ARGS_IN_REGISTERS 8
-+
-+/* Symbolic macros for the first/last argument registers.  */
-+
-+#define GP_ARG_FIRST (GP_REG_FIRST + 10)
-+#define GP_ARG_LAST  (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
-+#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
-+#define FP_ARG_FIRST (FP_REG_FIRST + 10)
-+#define FP_ARG_LAST  (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
-+
-+#define CALLEE_SAVED_REG_NUMBER(REGNO)			\
-+  ((REGNO) >= 8 && (REGNO) <= 9 ? (REGNO) - 8 :		\
-+   (REGNO) >= 18 && (REGNO) <= 27 ? (REGNO) - 16 : -1)
-+
-+#define LIBCALL_VALUE(MODE) \
-+  riscv_function_value (NULL_TREE, NULL_TREE, MODE)
-+
-+#define FUNCTION_VALUE(VALTYPE, FUNC) \
-+  riscv_function_value (VALTYPE, FUNC, VOIDmode)
-+
-+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
-+
-+/* 1 if N is a possible register number for function argument passing.
-+   We have no FP argument registers when soft-float.  When FP registers
-+   are 32 bits, we can't directly reference the odd numbered ones.  */
-+
-+/* Accept arguments in a0-a7 and/or fa0-fa7. */
-+#define FUNCTION_ARG_REGNO_P(N)					\
-+  (IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST)			\
-+   || IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST))
-+
-+/* The ABI views the arguments as a structure, of which the first 8
-+   words go in registers and the rest go on the stack.  If I < 8, N, the Ith
-+   word might go in the Ith integer argument register or the Ith
-+   floating-point argument register. */
-+
-+typedef struct {
-+  /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
-+  unsigned int num_gprs;
-+
-+  /* Number of words passed on the stack.  */
-+  unsigned int stack_words;
-+} CUMULATIVE_ARGS;
-+
-+/* Initialize a variable CUM of type CUMULATIVE_ARGS
-+   for a call to a function whose data type is FNTYPE.
-+   For a library call, FNTYPE is 0.  */
-+
-+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
-+  memset (&(CUM), 0, sizeof (CUM))
-+
-+#define EPILOGUE_USES(REGNO)	((REGNO) == RETURN_ADDR_REGNUM)
-+
-+/* ABI requires 16-byte alignment, even on ven on RV32. */
-+#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
-+
-+#define NO_PROFILE_COUNTERS 1
-+
-+/* Define this macro if the code for function profiling should come
-+   before the function prologue.  Normally, the profiling code comes
-+   after.  */
-+
-+/* #define PROFILE_BEFORE_PROLOGUE */
-+
-+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
-+   the stack pointer does not matter.  The value is tested only in
-+   functions that have frame pointers.
-+   No definition is equivalent to always zero.  */
-+
-+#define EXIT_IGNORE_STACK 1
-+
-+
-+/* Trampolines are a block of code followed by two pointers.  */
-+
-+#define TRAMPOLINE_CODE_SIZE 16
-+#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
-+#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
-+
-+/* Addressing modes, and classification of registers for them.  */
-+
-+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
-+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
-+  riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
-+
-+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
-+   and check its validity for a certain class.
-+   We have two alternate definitions for each of them.
-+   The usual definition accepts all pseudo regs; the other rejects them all.
-+   The symbol REG_OK_STRICT causes the latter definition to be used.
-+
-+   Most source files want to accept pseudo regs in the hope that
-+   they will get allocated to the class that the insn wants them to be in.
-+   Some source files that are used after register allocation
-+   need to be strict.  */
-+
-+#ifndef REG_OK_STRICT
-+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
-+  riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
-+#else
-+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
-+  riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
-+#endif
-+
-+#define REG_OK_FOR_INDEX_P(X) 0
-+
-+

-+/* Maximum number of registers that can appear in a valid memory address.  */
-+
-+#define MAX_REGS_PER_ADDRESS 1
-+
-+#define CONSTANT_ADDRESS_P(X) \
-+  (CONSTANT_P (X) && memory_address_p (SImode, X))
-+
-+/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
-+   'the start of the function that this code is output in'.  */
-+
-+#define ASM_OUTPUT_LABELREF(FILE,NAME)  \
-+  if (strcmp (NAME, "..CURRENT_FUNCTION") == 0)				\
-+    asm_fprintf ((FILE), "%U%s",					\
-+		 XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));	\
-+  else									\
-+    asm_fprintf ((FILE), "%U%s", (NAME))
-+
-+/* This flag marks functions that cannot be lazily bound.  */
-+#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
-+#define SYMBOL_REF_BIND_NOW_P(RTX) \
-+  ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
-+
-+#define JUMP_TABLES_IN_TEXT_SECTION 0
-+#define CASE_VECTOR_MODE SImode
-+#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
-+
-+/* Define this as 1 if `char' should by default be signed; else as 0.  */
-+#define DEFAULT_SIGNED_CHAR 0
-+
-+/* Consider using fld/fsd to move 8 bytes at a time for RV32IFD. */
-+#define MOVE_MAX UNITS_PER_WORD
-+#define MAX_MOVE_MAX 8
-+
-+#define SLOW_BYTE_ACCESS 0
-+
-+#define SHIFT_COUNT_TRUNCATED 1
-+
-+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
-+   is done just by pretending it is already truncated.  */
-+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
-+  (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
-+
-+/* Specify the machine mode that pointers have.
-+   After generation of rtl, the compiler makes no further distinction
-+   between pointers and any other objects of this machine mode.  */
-+
-+#ifndef Pmode
-+#define Pmode (TARGET_64BIT ? DImode : SImode)
-+#endif
-+
-+/* Give call MEMs SImode since it is the "most permissive" mode
-+   for both 32-bit and 64-bit targets.  */
-+
-+#define FUNCTION_MODE SImode
-+
-+/* A C expression for the cost of a branch instruction.  A value of 2
-+   seems to minimize code size.  */
-+
-+#define BRANCH_COST(speed_p, predictable_p) \
-+  ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
-+
-+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
-+
-+/* Control the assembler format that we output.  */
-+
-+/* Output to assembler file text saying following lines
-+   may contain character constants, extra white space, comments, etc.  */
-+
-+#ifndef ASM_APP_ON
-+#define ASM_APP_ON " #APP\n"
-+#endif
-+
-+/* Output to assembler file text saying following lines
-+   no longer contain unusual constructs.  */
-+
-+#ifndef ASM_APP_OFF
-+#define ASM_APP_OFF " #NO_APP\n"
-+#endif
-+
-+#define REGISTER_NAMES						\
-+{ "zero","ra",  "sp",  "gp",  "tp",  "t0",  "t1",  "t2",	\
-+  "s0",  "s1",  "a0",  "a1",  "a2",  "a3",  "a4",  "a5",	\
-+  "a6",  "a7",  "s2",  "s3",  "s4",  "s5",  "s6",  "s7",	\
-+  "s8",  "s9",  "s10", "s11", "t3",  "t4",  "t5",  "t6",	\
-+  "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",	\
-+  "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",	\
-+  "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",	\
-+  "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11",	\
-+  "arg", "frame", }
-+
-+#define ADDITIONAL_REGISTER_NAMES					\
-+{									\
-+  { "x0",	 0 + GP_REG_FIRST },					\
-+  { "x1",	 1 + GP_REG_FIRST },					\
-+  { "x2",	 2 + GP_REG_FIRST },					\
-+  { "x3",	 3 + GP_REG_FIRST },					\
-+  { "x4",	 4 + GP_REG_FIRST },					\
-+  { "x5",	 5 + GP_REG_FIRST },					\
-+  { "x6",	 6 + GP_REG_FIRST },					\
-+  { "x7",	 7 + GP_REG_FIRST },					\
-+  { "x8",	 8 + GP_REG_FIRST },					\
-+  { "x9",	 9 + GP_REG_FIRST },					\
-+  { "x10",	10 + GP_REG_FIRST },					\
-+  { "x11",	11 + GP_REG_FIRST },					\
-+  { "x12",	12 + GP_REG_FIRST },					\
-+  { "x13",	13 + GP_REG_FIRST },					\
-+  { "x14",	14 + GP_REG_FIRST },					\
-+  { "x15",	15 + GP_REG_FIRST },					\
-+  { "x16",	16 + GP_REG_FIRST },					\
-+  { "x17",	17 + GP_REG_FIRST },					\
-+  { "x18",	18 + GP_REG_FIRST },					\
-+  { "x19",	19 + GP_REG_FIRST },					\
-+  { "x20",	20 + GP_REG_FIRST },					\
-+  { "x21",	21 + GP_REG_FIRST },					\
-+  { "x22",	22 + GP_REG_FIRST },					\
-+  { "x23",	23 + GP_REG_FIRST },					\
-+  { "x24",	24 + GP_REG_FIRST },					\
-+  { "x25",	25 + GP_REG_FIRST },					\
-+  { "x26",	26 + GP_REG_FIRST },					\
-+  { "x27",	27 + GP_REG_FIRST },					\
-+  { "x28",	28 + GP_REG_FIRST },					\
-+  { "x29",	29 + GP_REG_FIRST },					\
-+  { "x30",	30 + GP_REG_FIRST },					\
-+  { "x31",	31 + GP_REG_FIRST },					\
-+  { "f0",	 0 + FP_REG_FIRST },					\
-+  { "f1",	 1 + FP_REG_FIRST },					\
-+  { "f2",	 2 + FP_REG_FIRST },					\
-+  { "f3",	 3 + FP_REG_FIRST },					\
-+  { "f4",	 4 + FP_REG_FIRST },					\
-+  { "f5",	 5 + FP_REG_FIRST },					\
-+  { "f6",	 6 + FP_REG_FIRST },					\
-+  { "f7",	 7 + FP_REG_FIRST },					\
-+  { "f8",	 8 + FP_REG_FIRST },					\
-+  { "f9",	 9 + FP_REG_FIRST },					\
-+  { "f10",	10 + FP_REG_FIRST },					\
-+  { "f11",	11 + FP_REG_FIRST },					\
-+  { "f12",	12 + FP_REG_FIRST },					\
-+  { "f13",	13 + FP_REG_FIRST },					\
-+  { "f14",	14 + FP_REG_FIRST },					\
-+  { "f15",	15 + FP_REG_FIRST },					\
-+  { "f16",	16 + FP_REG_FIRST },					\
-+  { "f17",	17 + FP_REG_FIRST },					\
-+  { "f18",	18 + FP_REG_FIRST },					\
-+  { "f19",	19 + FP_REG_FIRST },					\
-+  { "f20",	20 + FP_REG_FIRST },					\
-+  { "f21",	21 + FP_REG_FIRST },					\
-+  { "f22",	22 + FP_REG_FIRST },					\
-+  { "f23",	23 + FP_REG_FIRST },					\
-+  { "f24",	24 + FP_REG_FIRST },					\
-+  { "f25",	25 + FP_REG_FIRST },					\
-+  { "f26",	26 + FP_REG_FIRST },					\
-+  { "f27",	27 + FP_REG_FIRST },					\
-+  { "f28",	28 + FP_REG_FIRST },					\
-+  { "f29",	29 + FP_REG_FIRST },					\
-+  { "f30",	30 + FP_REG_FIRST },					\
-+  { "f31",	31 + FP_REG_FIRST },					\
-+}
-+
-+/* Globalizing directive for a label.  */
-+#define GLOBAL_ASM_OP "\t.globl\t"
-+
-+/* This is how to store into the string LABEL
-+   the symbol_ref name of an internal numbered label where
-+   PREFIX is the class of label and NUM is the number within the class.
-+   This is suitable for output with `assemble_name'.  */
-+
-+#undef ASM_GENERATE_INTERNAL_LABEL
-+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM)			\
-+  sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
-+
-+/* This is how to output an element of a case-vector that is absolute.  */
-+
-+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE)				\
-+  fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
-+
-+/* This is how to output an element of a PIC case-vector. */
-+
-+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL)		\
-+  fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n",				\
-+	   LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
-+
-+/* This is how to output an assembler line
-+   that says to advance the location counter
-+   to a multiple of 2**LOG bytes.  */
-+
-+#define ASM_OUTPUT_ALIGN(STREAM,LOG)					\
-+  fprintf (STREAM, "\t.align\t%d\n", (LOG))
-+
-+/* Define the strings to put out for each section in the object file.  */
-+#define TEXT_SECTION_ASM_OP	"\t.text"	/* instructions */
-+#define DATA_SECTION_ASM_OP	"\t.data"	/* large data */
-+#define READONLY_DATA_SECTION_ASM_OP	"\t.section\t.rodata"
-+#define BSS_SECTION_ASM_OP	"\t.bss"
-+#define SBSS_SECTION_ASM_OP	"\t.section\t.sbss,\"aw\", at nobits"
-+#define SDATA_SECTION_ASM_OP	"\t.section\t.sdata,\"aw\", at progbits"
-+
-+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO)				\
-+do									\
-+  {									\
-+    fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n",		\
-+	     reg_names[STACK_POINTER_REGNUM],				\
-+	     reg_names[STACK_POINTER_REGNUM],				\
-+	     TARGET_64BIT ? "sd" : "sw",				\
-+	     reg_names[REGNO],						\
-+	     reg_names[STACK_POINTER_REGNUM]);				\
-+  }									\
-+while (0)
-+
-+#define ASM_OUTPUT_REG_POP(STREAM,REGNO)				\
-+do									\
-+  {									\
-+    fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n",		\
-+	     TARGET_64BIT ? "ld" : "lw",				\
-+	     reg_names[REGNO],						\
-+	     reg_names[STACK_POINTER_REGNUM],				\
-+	     reg_names[STACK_POINTER_REGNUM],				\
-+	     reg_names[STACK_POINTER_REGNUM]);				\
-+  }									\
-+while (0)
-+
-+#define ASM_COMMENT_START "#"
-+
-+#undef SIZE_TYPE
-+#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
-+
-+#undef PTRDIFF_TYPE
-+#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
-+
-+/* The maximum number of bytes that can be copied by one iteration of
-+   a movmemsi loop; see riscv_block_move_loop.  */
-+#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
-+
-+/* The maximum number of bytes that can be copied by a straight-line
-+   implementation of movmemsi; see riscv_block_move_straight.  We want
-+   to make sure that any loop-based implementation will iterate at
-+   least twice.  */
-+#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
-+
-+/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
-+
-+#define RISCV_CALL_RATIO 6
-+
-+/* Any loop-based implementation of movmemsi will have at least
-+   RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
-+   moves, so allow individual copies of fewer elements.
-+
-+   When movmemsi is not available, use a value approximating
-+   the length of a memcpy call sequence, so that move_by_pieces
-+   will generate inline code if it is shorter than a function call.
-+   Since move_by_pieces_ninsns counts memory-to-memory moves, but
-+   we'll have to generate a load/store pair for each, halve the
-+   value of RISCV_CALL_RATIO to take that into account.  */
-+
-+#define MOVE_RATIO(speed)				\
-+  (HAVE_movmemsi					\
-+   ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX		\
-+   : RISCV_CALL_RATIO / 2)
-+
-+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
-+   of the length of a memset call, but use the default otherwise.  */
-+
-+#define CLEAR_RATIO(speed)\
-+  ((speed) ? 15 : RISCV_CALL_RATIO)
-+
-+/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
-+   optimizing for size adjust the ratio to account for the overhead of
-+   loading the constant and replicating it across the word.  */
-+
-+#define SET_RATIO(speed) \
-+  ((speed) ? 15 : RISCV_CALL_RATIO - 2)
-+
-+#ifndef HAVE_AS_TLS
-+#define HAVE_AS_TLS 0
-+#endif
-+
-+#ifndef USED_FOR_TARGET
-+
-+extern const enum reg_class riscv_regno_to_class[];
-+extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
-+extern const char* riscv_hi_relocs[];
-+#endif
-+
-+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
-+  (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
-+
-+/* ISA constants needed for code generation.  */
-+#define OPCODE_LW    0x2003
-+#define OPCODE_LD    0x3003
-+#define OPCODE_AUIPC 0x17
-+#define OPCODE_JALR  0x67
-+#define SHIFT_RD  7
-+#define SHIFT_RS1 15
-+#define SHIFT_IMM 20
-+#define IMM_BITS 12
-+
-+#define IMM_REACH (1LL << IMM_BITS)
-+#define CONST_HIGH_PART(VALUE) (((VALUE) + (IMM_REACH/2)) & ~(IMM_REACH-1))
-+#define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE))
-diff -urN empty/gcc/config/riscv/riscv.md gcc-5.3.0/gcc/config/riscv/riscv.md
---- empty/gcc/config/riscv/riscv.md	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv.md	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,2421 @@
-+;; Machine description for RISC-V for GNU compiler.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+
-+;; This file is part of GCC.
-+
-+;; GCC is free software; you can redistribute it and/or modify
-+;; it under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+
-+;; GCC is distributed in the hope that it will be useful,
-+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+;; GNU General Public License for more details.
-+
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3.  If not see
-+;; <http://www.gnu.org/licenses/>.
-+
-+(define_c_enum "unspec" [
-+  ;; Floating-point moves.
-+  UNSPEC_LOAD_LOW
-+  UNSPEC_LOAD_HIGH
-+  UNSPEC_STORE_WORD
-+
-+  ;; GP manipulation.
-+  UNSPEC_EH_RETURN
-+
-+  ;; Symbolic accesses.
-+  UNSPEC_ADDRESS_FIRST
-+  UNSPEC_LOAD_GOT
-+  UNSPEC_TLS
-+  UNSPEC_TLS_LE
-+  UNSPEC_TLS_IE
-+  UNSPEC_TLS_GD
-+
-+  ;; Register save and restore.
-+  UNSPEC_GPR_SAVE
-+  UNSPEC_GPR_RESTORE
-+
-+  ;; Blockage and synchronisation.
-+  UNSPEC_BLOCKAGE
-+  UNSPEC_FENCE
-+  UNSPEC_FENCE_I
-+])
-+
-+(define_constants
-+  [(RETURN_ADDR_REGNUM		1)
-+   (T0_REGNUM			5)
-+   (T1_REGNUM			6)
-+])
-+
-+(include "predicates.md")
-+(include "constraints.md")
-+
-+;; ....................
-+;;
-+;;	Attributes
-+;;
-+;; ....................
-+
-+(define_attr "got" "unset,xgot_high,load"
-+  (const_string "unset"))
-+
-+;; Classification of moves, extensions and truncations.  Most values
-+;; are as for "type" (see below) but there are also the following
-+;; move-specific values:
-+;;
-+;; andi		a single ANDI instruction
-+;; shift_shift	a shift left followed by a shift right
-+;;
-+;; This attribute is used to determine the instruction's length and
-+;; scheduling type.  For doubleword moves, the attribute always describes
-+;; the split instructions; in some cases, it is more appropriate for the
-+;; scheduling type to be "multi" instead.
-+(define_attr "move_type"
-+  "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
-+   const,logical,arith,andi,shift_shift"
-+  (const_string "unknown"))
-+
-+;; Main data type used by the insn
-+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
-+  (const_string "unknown"))
-+
-+;; True if the main data type is twice the size of a word.
-+(define_attr "dword_mode" "no,yes"
-+  (cond [(and (eq_attr "mode" "DI,DF")
-+	      (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
-+	 (const_string "yes")
-+
-+	 (and (eq_attr "mode" "TI,TF")
-+	      (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
-+	 (const_string "yes")]
-+	(const_string "no")))
-+
-+;; Classification of each insn.
-+;; branch	conditional branch
-+;; jump		unconditional jump
-+;; call		unconditional call
-+;; load		load instruction(s)
-+;; fpload	floating point load
-+;; store	store instruction(s)
-+;; fpstore	floating point store
-+;; mtc		transfer to coprocessor
-+;; mfc		transfer from coprocessor
-+;; const	load constant
-+;; arith	integer arithmetic instructions
-+;; logical      integer logical instructions
-+;; shift	integer shift instructions
-+;; slt		set less than instructions
-+;; imul		integer multiply 
-+;; idiv		integer divide
-+;; move		integer register move (addi rd, rs1, 0)
-+;; fmove	floating point register move
-+;; fadd		floating point add/subtract
-+;; fmul		floating point multiply
-+;; fmadd	floating point multiply-add
-+;; fdiv		floating point divide
-+;; fcmp		floating point compare
-+;; fcvt		floating point convert
-+;; fsqrt	floating point square root
-+;; multi	multiword sequence (or user asm statements)
-+;; nop		no operation
-+;; ghost	an instruction that produces no real code
-+(define_attr "type"
-+  "unknown,branch,jump,call,load,fpload,store,fpstore,
-+   mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
-+   fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
-+  (cond [(eq_attr "got" "load") (const_string "load")
-+
-+	 ;; If a doubleword move uses these expensive instructions,
-+	 ;; it is usually better to schedule them in the same way
-+	 ;; as the singleword form, rather than as "multi".
-+	 (eq_attr "move_type" "load") (const_string "load")
-+	 (eq_attr "move_type" "fpload") (const_string "fpload")
-+	 (eq_attr "move_type" "store") (const_string "store")
-+	 (eq_attr "move_type" "fpstore") (const_string "fpstore")
-+	 (eq_attr "move_type" "mtc") (const_string "mtc")
-+	 (eq_attr "move_type" "mfc") (const_string "mfc")
-+
-+	 ;; These types of move are always single insns.
-+	 (eq_attr "move_type" "fmove") (const_string "fmove")
-+	 (eq_attr "move_type" "arith") (const_string "arith")
-+	 (eq_attr "move_type" "logical") (const_string "logical")
-+	 (eq_attr "move_type" "andi") (const_string "logical")
-+
-+	 ;; These types of move are always split.
-+	 (eq_attr "move_type" "shift_shift")
-+	   (const_string "multi")
-+
-+	 ;; These types of move are split for doubleword modes only.
-+	 (and (eq_attr "move_type" "move,const")
-+	      (eq_attr "dword_mode" "yes"))
-+	   (const_string "multi")
-+	 (eq_attr "move_type" "move") (const_string "move")
-+	 (eq_attr "move_type" "const") (const_string "const")]
-+	(const_string "unknown")))
-+
-+;; Mode for conversion types (fcvt)
-+;; I2S          integer to float single (SI/DI to SF)
-+;; I2D          integer to float double (SI/DI to DF)
-+;; S2I          float to integer (SF to SI/DI)
-+;; D2I          float to integer (DF to SI/DI)
-+;; D2S          double to float single
-+;; S2D          float single to double
-+
-+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" 
-+  (const_string "unknown"))
-+
-+;; Length of instruction in bytes.
-+(define_attr "length" ""
-+   (cond [
-+	  ;; Direct branch instructions have a range of [-0x1000,0xffc],
-+	  ;; relative to the address of the delay slot.  If a branch is
-+	  ;; outside this range, convert a branch like:
-+	  ;;
-+	  ;;	bne	r1,r2,target
-+	  ;;
-+	  ;; to:
-+	  ;;
-+	  ;;	beq	r1,r2,1f
-+	  ;;  j target
-+	  ;; 1:
-+	  ;;
-+	  (eq_attr "type" "branch")
-+	  (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
-+				  (le (minus (pc) (match_dup 0)) (const_int 4092)))
-+	  (const_int 4)
-+	  (const_int 8))
-+
-+	  ;; Conservatively assume calls take two instructions, as in:
-+	  ;;   auipc t0, %pcrel_hi(target)
-+	  ;;   jalr  ra, t0, %lo(target)
-+	  ;; The linker will relax these into JAL when appropriate.
-+	  (eq_attr "type" "call") (const_int 8)
-+
-+	  ;; "Ghost" instructions occupy no space.
-+	  (eq_attr "type" "ghost") (const_int 0)
-+
-+	  (eq_attr "got" "load") (const_int 8)
-+
-+	  (eq_attr "type" "fcmp") (const_int 8)
-+
-+	  ;; SHIFT_SHIFTs are decomposed into two separate instructions.
-+	  (eq_attr "move_type" "shift_shift")
-+		(const_int 8)
-+
-+	  ;; Check for doubleword moves that are decomposed into two
-+	  ;; instructions.
-+	  (and (eq_attr "move_type" "mtc,mfc,move")
-+	       (eq_attr "dword_mode" "yes"))
-+	  (const_int 8)
-+
-+	  ;; Doubleword CONST{,N} moves are split into two word
-+	  ;; CONST{,N} moves.
-+	  (and (eq_attr "move_type" "const")
-+	       (eq_attr "dword_mode" "yes"))
-+	  (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
-+
-+	  ;; Otherwise, constants, loads and stores are handled by external
-+	  ;; routines.
-+	  (eq_attr "move_type" "load,fpload")
-+	  (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
-+	  (eq_attr "move_type" "store,fpstore")
-+	  (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
-+	  ] (const_int 4)))
-+
-+;; Describe a user's asm statement.
-+(define_asm_attributes
-+  [(set_attr "type" "multi")])
-+
-+;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
-+;; from the same template.
-+(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
-+(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
-+
-+;; A copy of GPR that can be used when a pattern has two independent
-+;; modes.
-+(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
-+
-+;; This mode iterator allows :P to be used for patterns that operate on
-+;; pointer-sized quantities.  Exactly one of the two alternatives will match.
-+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
-+
-+;; 32-bit integer moves for which we provide move patterns.
-+(define_mode_iterator IMOVE32 [SI])
-+
-+;; 64-bit modes for which we provide move patterns.
-+(define_mode_iterator MOVE64 [DI DF])
-+
-+;; 128-bit modes for which we provide move patterns on 64-bit targets.
-+(define_mode_iterator MOVE128 [TI TF])
-+
-+;; This mode iterator allows the QI and HI extension patterns to be
-+;; defined from the same template.
-+(define_mode_iterator SHORT [QI HI])
-+
-+;; Likewise the 64-bit truncate-and-shift patterns.
-+(define_mode_iterator SUBDI [QI HI SI])
-+(define_mode_iterator HISI [HI SI])
-+(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
-+
-+;; This mode iterator allows :ANYF to be used wherever a scalar or vector
-+;; floating-point mode is allowed.
-+(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
-+			    (DF "TARGET_HARD_FLOAT")])
-+(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
-+			     (SF "TARGET_HARD_FLOAT")
-+			     (DF "TARGET_HARD_FLOAT")])
-+
-+;; Like ANYF, but only applies to scalar modes.
-+(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT")
-+			       (DF "TARGET_HARD_FLOAT")])
-+
-+;; A floating-point mode for which moves involving FPRs may need to be split.
-+(define_mode_iterator SPLITF
-+  [(DF "!TARGET_64BIT")
-+   (DI "!TARGET_64BIT")
-+   (TF "TARGET_64BIT")])
-+
-+;; This attribute gives the length suffix for a sign- or zero-extension
-+;; instruction.
-+(define_mode_attr size [(QI "b") (HI "h")])
-+
-+;; Mode attributes for loads.
-+(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
-+
-+;; Instruction names for stores.
-+(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
-+
-+;; This attribute gives the best constraint to use for registers of
-+;; a given mode.
-+(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
-+
-+;; This attribute gives the format suffix for floating-point operations.
-+(define_mode_attr fmt [(SF "s") (DF "d")])
-+
-+;; This attribute gives the format suffix for atomic memory operations.
-+(define_mode_attr amo [(SI "w") (DI "d")])
-+
-+;; This attribute gives the upper-case mode name for one unit of a
-+;; floating-point mode.
-+(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
-+
-+;; This attribute gives the integer mode that has half the size of
-+;; the controlling mode.
-+(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
-+
-+;; This code iterator allows signed and unsigned widening multiplications
-+;; to use the same template.
-+(define_code_iterator any_extend [sign_extend zero_extend])
-+
-+;; This code iterator allows the two right shift instructions to be
-+;; generated from the same template.
-+(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
-+
-+;; This code iterator allows the three shift instructions to be generated
-+;; from the same template.
-+(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
-+
-+;; This code iterator allows unsigned and signed division to be generated
-+;; from the same template.
-+(define_code_iterator any_div [div udiv])
-+
-+;; This code iterator allows unsigned and signed modulus to be generated
-+;; from the same template.
-+(define_code_iterator any_mod [mod umod])
-+
-+;; These code iterators allow the signed and unsigned scc operations to use
-+;; the same template.
-+(define_code_iterator any_gt [gt gtu])
-+(define_code_iterator any_ge [ge geu])
-+(define_code_iterator any_lt [lt ltu])
-+(define_code_iterator any_le [le leu])
-+
-+;; <u> expands to an empty string when doing a signed operation and
-+;; "u" when doing an unsigned operation.
-+(define_code_attr u [(sign_extend "") (zero_extend "u")
-+		     (div "") (udiv "u")
-+		     (mod "") (umod "u")
-+		     (gt "") (gtu "u")
-+		     (ge "") (geu "u")
-+		     (lt "") (ltu "u")
-+		     (le "") (leu "u")])
-+
-+;; <su> is like <u>, but the signed form expands to "s" rather than "".
-+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
-+
-+;; <optab> expands to the name of the optab for a particular code.
-+(define_code_attr optab [(ashift "ashl")
-+			 (ashiftrt "ashr")
-+			 (lshiftrt "lshr")
-+			 (ior "ior")
-+			 (xor "xor")
-+			 (and "and")
-+			 (plus "add")
-+			 (minus "sub")])
-+
-+;; <insn> expands to the name of the insn that implements a particular code.
-+(define_code_attr insn [(ashift "sll")
-+			(ashiftrt "sra")
-+			(lshiftrt "srl")
-+			(ior "or")
-+			(xor "xor")
-+			(and "and")
-+			(plus "add")
-+			(minus "sub")])
-+
-+;; Ghost instructions produce no real code and introduce no hazards.
-+;; They exist purely to express an effect on dataflow.
-+(define_insn_reservation "ghost" 0
-+  (eq_attr "type" "ghost")
-+  "nothing")
-+
-+;;
-+;;  ....................
-+;;
-+;;	ADDITION
-+;;
-+;;  ....................
-+;;
-+
-+(define_insn "add<mode>3"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+	(plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+		   (match_operand:ANYF 2 "register_operand" "f")))]
-+  ""
-+  "fadd.<fmt>\t%0,%1,%2"
-+  [(set_attr "type" "fadd")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_expand "add<mode>3"
-+  [(set (match_operand:GPR 0 "register_operand")
-+	(plus:GPR (match_operand:GPR 1 "register_operand")
-+		  (match_operand:GPR 2 "arith_operand")))]
-+  "")
-+
-+(define_insn "*addsi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r,r")
-+	(plus:SI (match_operand:GPR 1 "register_operand" "r,r")
-+		  (match_operand:GPR2 2 "arith_operand" "r,Q")))]
-+  ""
-+  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*adddi3"
-+  [(set (match_operand:DI 0 "register_operand" "=r,r")
-+	(plus:DI (match_operand:DI 1 "register_operand" "r,r")
-+		  (match_operand:DI 2 "arith_operand" "r,Q")))]
-+  "TARGET_64BIT"
-+  "add\t%0,%1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn "*addsi3_extended"
-+  [(set (match_operand:DI 0 "register_operand" "=r,r")
-+	(sign_extend:DI
-+	     (plus:SI (match_operand:SI 1 "register_operand" "r,r")
-+		      (match_operand:SI 2 "arith_operand" "r,Q"))))]
-+  "TARGET_64BIT"
-+  "addw\t%0,%1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*adddisi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r,r")
-+	     (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
-+		      (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
-+  "TARGET_64BIT"
-+  "addw\t%0,%1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*adddisisi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r,r")
-+	     (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
-+		      (match_operand:SI 2 "arith_operand" "r,Q")))]
-+  "TARGET_64BIT"
-+  "addw\t%0,%1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*adddi3_truncsi"
-+  [(set (match_operand:SI 0 "register_operand" "=r,r")
-+          (truncate:SI
-+	     (plus:DI (match_operand:DI 1 "register_operand" "r,r")
-+		      (match_operand:DI 2 "arith_operand" "r,Q"))))]
-+  "TARGET_64BIT"
-+  "addw\t%0,%1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	SUBTRACTION
-+;;
-+;;  ....................
-+;;
-+
-+(define_insn "sub<mode>3"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+	(minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+		    (match_operand:ANYF 2 "register_operand" "f")))]
-+  ""
-+  "fsub.<fmt>\t%0,%1,%2"
-+  [(set_attr "type" "fadd")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_expand "sub<mode>3"
-+  [(set (match_operand:GPR 0 "register_operand")
-+	(minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
-+		   (match_operand:GPR 2 "register_operand")))]
-+  "")
-+
-+(define_insn "*subdi3"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
-+		   (match_operand:DI 2 "register_operand" "r")))]
-+  "TARGET_64BIT"
-+  "sub\t%0,%z1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn "*subsi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
-+		   (match_operand:GPR2 2 "register_operand" "r")))]
-+  ""
-+  { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*subsi3_extended"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(sign_extend:DI
-+	    (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
-+		      (match_operand:SI 2 "register_operand" "r"))))]
-+  "TARGET_64BIT"
-+  "subw\t%0,%z1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn "*subdisi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	     (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
-+		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
-+  "TARGET_64BIT"
-+  "subw\t%0,%z1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*subdisisi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	     (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
-+		      (match_operand:SI 2 "register_operand" "r")))]
-+  "TARGET_64BIT"
-+  "subw\t%0,%z1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*subsidisi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	     (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
-+		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
-+  "TARGET_64BIT"
-+  "subw\t%0,%z1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*subdi3_truncsi"
-+  [(set (match_operand:SI 0 "register_operand" "=r,r")
-+          (truncate:SI
-+	     (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
-+		      (match_operand:DI 2 "arith_operand" "r,Q"))))]
-+  "TARGET_64BIT"
-+  "subw\t%0,%z1,%2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "SI")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	MULTIPLICATION
-+;;
-+;;  ....................
-+;;
-+
-+(define_insn "mul<mode>3"
-+  [(set (match_operand:SCALARF 0 "register_operand" "=f")
-+	(mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f")
-+		      (match_operand:SCALARF 2 "register_operand" "f")))]
-+  ""
-+  "fmul.<fmt>\t%0,%1,%2"
-+  [(set_attr "type" "fmul")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_expand "mul<mode>3"
-+  [(set (match_operand:GPR 0 "register_operand")
-+	(mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
-+		   (match_operand:GPR 2 "register_operand")))]
-+  "TARGET_MULDIV")
-+
-+(define_insn "*mulsi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(mult:SI (match_operand:GPR 1 "register_operand" "r")
-+		  (match_operand:GPR2 2 "register_operand" "r")))]
-+  "TARGET_MULDIV"
-+  { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
-+  [(set_attr "type" "imul")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*muldisi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	     (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
-+		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "mulw\t%0,%1,%2"
-+  [(set_attr "type" "imul")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*muldi3_truncsi"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+          (truncate:SI
-+	     (mult:DI (match_operand:DI 1 "register_operand" "r")
-+		      (match_operand:DI 2 "register_operand" "r"))))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "mulw\t%0,%1,%2"
-+  [(set_attr "type" "imul")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*muldi3"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(mult:DI (match_operand:DI 1 "register_operand" "r")
-+		  (match_operand:DI 2 "register_operand" "r")))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "mul\t%0,%1,%2"
-+  [(set_attr "type" "imul")
-+   (set_attr "mode" "DI")])
-+
-+;;
-+;;  ........................
-+;;
-+;;	MULTIPLICATION HIGH-PART
-+;;
-+;;  ........................
-+;;
-+
-+
-+;; Using a clobber here is ghetto, but I'm not smart enough to do better. '
-+(define_insn_and_split "<u>mulditi3"
-+  [(set (match_operand:TI 0 "register_operand" "=r")
-+	(mult:TI (any_extend:TI
-+		   (match_operand:DI 1 "register_operand" "r"))
-+		 (any_extend:TI
-+		   (match_operand:DI 2 "register_operand" "r"))))
-+  (clobber (match_scratch:DI 3 "=r"))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "#"
-+  "reload_completed"
-+  [
-+   (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
-+   (set (match_dup 4) (truncate:DI
-+			(lshiftrt:TI
-+			  (mult:TI (any_extend:TI (match_dup 1))
-+				   (any_extend:TI (match_dup 2)))
-+			  (const_int 64))))
-+   (set (match_dup 5) (match_dup 3))
-+  ]
-+{
-+  operands[4] = riscv_subword (operands[0], true);
-+  operands[5] = riscv_subword (operands[0], false);
-+}
-+  )
-+
-+(define_insn "<u>muldi3_highpart"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(truncate:DI
-+	  (lshiftrt:TI
-+	    (mult:TI (any_extend:TI
-+		       (match_operand:DI 1 "register_operand" "r"))
-+		     (any_extend:TI
-+		       (match_operand:DI 2 "register_operand" "r")))
-+	    (const_int 64))))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "mulh<u>\t%0,%1,%2"
-+  [(set_attr "type" "imul")
-+   (set_attr "mode" "DI")])
-+
-+
-+(define_insn_and_split "usmulditi3"
-+  [(set (match_operand:TI 0 "register_operand" "=r")
-+	(mult:TI (zero_extend:TI
-+		   (match_operand:DI 1 "register_operand" "r"))
-+		 (sign_extend:TI
-+		   (match_operand:DI 2 "register_operand" "r"))))
-+  (clobber (match_scratch:DI 3 "=r"))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "#"
-+  "reload_completed"
-+  [
-+   (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
-+   (set (match_dup 4) (truncate:DI
-+			(lshiftrt:TI
-+			  (mult:TI (zero_extend:TI (match_dup 1))
-+				   (sign_extend:TI (match_dup 2)))
-+			  (const_int 64))))
-+   (set (match_dup 5) (match_dup 3))
-+  ]
-+{
-+  operands[4] = riscv_subword (operands[0], true);
-+  operands[5] = riscv_subword (operands[0], false);
-+}
-+  )
-+
-+(define_insn "usmuldi3_highpart"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(truncate:DI
-+	  (lshiftrt:TI
-+	    (mult:TI (zero_extend:TI
-+		       (match_operand:DI 1 "register_operand" "r"))
-+		     (sign_extend:TI
-+		       (match_operand:DI 2 "register_operand" "r")))
-+	    (const_int 64))))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "mulhsu\t%0,%2,%1"
-+  [(set_attr "type" "imul")
-+   (set_attr "mode" "DI")])
-+
-+(define_expand "<u>mulsidi3"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(mult:DI (any_extend:DI
-+		   (match_operand:SI 1 "register_operand" "r"))
-+		 (any_extend:DI
-+		   (match_operand:SI 2 "register_operand" "r"))))
-+  (clobber (match_scratch:SI 3 "=r"))]
-+  "TARGET_MULDIV && !TARGET_64BIT"
-+{
-+  rtx temp = gen_reg_rtx (SImode);
-+  emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
-+  emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
-+				     operands[1], operands[2]));
-+  emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
-+  DONE;
-+}
-+  )
-+
-+(define_insn "<u>mulsi3_highpart"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(truncate:SI
-+	  (lshiftrt:DI
-+	    (mult:DI (any_extend:DI
-+		       (match_operand:SI 1 "register_operand" "r"))
-+		     (any_extend:DI
-+		       (match_operand:SI 2 "register_operand" "r")))
-+	    (const_int 32))))]
-+  "TARGET_MULDIV && !TARGET_64BIT"
-+  "mulh<u>\t%0,%1,%2"
-+  [(set_attr "type" "imul")
-+   (set_attr "mode" "SI")])
-+
-+
-+(define_expand "usmulsidi3"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(mult:DI (zero_extend:DI
-+		   (match_operand:SI 1 "register_operand" "r"))
-+		 (sign_extend:DI
-+		   (match_operand:SI 2 "register_operand" "r"))))
-+  (clobber (match_scratch:SI 3 "=r"))]
-+  "TARGET_MULDIV && !TARGET_64BIT"
-+{
-+  rtx temp = gen_reg_rtx (SImode);
-+  emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
-+  emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
-+				     operands[1], operands[2]));
-+  emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
-+  DONE;
-+}
-+  )
-+
-+(define_insn "usmulsi3_highpart"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(truncate:SI
-+	  (lshiftrt:DI
-+	    (mult:DI (zero_extend:DI
-+		       (match_operand:SI 1 "register_operand" "r"))
-+		     (sign_extend:DI
-+		       (match_operand:SI 2 "register_operand" "r")))
-+	    (const_int 32))))]
-+  "TARGET_MULDIV && !TARGET_64BIT"
-+  "mulhsu\t%0,%2,%1"
-+  [(set_attr "type" "imul")
-+   (set_attr "mode" "SI")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	DIVISION and REMAINDER
-+;;
-+;;  ....................
-+;;
-+
-+(define_insn "<u>divsi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(any_div:SI (match_operand:SI 1 "register_operand" "r")
-+		  (match_operand:SI 2 "register_operand" "r")))]
-+  "TARGET_MULDIV"
-+  { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; }
-+  [(set_attr "type" "idiv")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "<u>divdi3"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(any_div:DI (match_operand:DI 1 "register_operand" "r")
-+		  (match_operand:DI 2 "register_operand" "r")))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "div<u>\t%0,%1,%2"
-+  [(set_attr "type" "idiv")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn "<u>modsi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(any_mod:SI (match_operand:SI 1 "register_operand" "r")
-+		  (match_operand:SI 2 "register_operand" "r")))]
-+  "TARGET_MULDIV"
-+  { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; }
-+  [(set_attr "type" "idiv")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "<u>moddi3"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(any_mod:DI (match_operand:DI 1 "register_operand" "r")
-+		  (match_operand:DI 2 "register_operand" "r")))]
-+  "TARGET_MULDIV && TARGET_64BIT"
-+  "rem<u>\t%0,%1,%2"
-+  [(set_attr "type" "idiv")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn "div<mode>3"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+	(div:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+		  (match_operand:ANYF 2 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT && TARGET_FDIV"
-+  "fdiv.<fmt>\t%0,%1,%2"
-+  [(set_attr "type" "fdiv")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	SQUARE ROOT
-+;;
-+;;  ....................
-+
-+(define_insn "sqrt<mode>2"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+	(sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT && TARGET_FDIV"
-+{
-+    return "fsqrt.<fmt>\t%0,%1";
-+}
-+  [(set_attr "type" "fsqrt")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+;; Floating point multiply accumulate instructions.
-+
-+(define_insn "fma<mode>4"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+    (fma:ANYF
-+      (match_operand:ANYF 1 "register_operand" "f")
-+      (match_operand:ANYF 2 "register_operand" "f")
-+      (match_operand:ANYF 3 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fmadd.<fmt>\t%0,%1,%2,%3"
-+  [(set_attr "type" "fmadd")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "fms<mode>4"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+    (fma:ANYF
-+      (match_operand:ANYF 1 "register_operand" "f")
-+      (match_operand:ANYF 2 "register_operand" "f")
-+      (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
-+  "TARGET_HARD_FLOAT"
-+  "fmsub.<fmt>\t%0,%1,%2,%3"
-+  [(set_attr "type" "fmadd")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "nfma<mode>4"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+    (neg:ANYF
-+      (fma:ANYF
-+        (match_operand:ANYF 1 "register_operand" "f")
-+        (match_operand:ANYF 2 "register_operand" "f")
-+        (match_operand:ANYF 3 "register_operand" "f"))))]
-+  "TARGET_HARD_FLOAT"
-+  "fnmadd.<fmt>\t%0,%1,%2,%3"
-+  [(set_attr "type" "fmadd")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "nfms<mode>4"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+    (neg:ANYF
-+      (fma:ANYF
-+        (match_operand:ANYF 1 "register_operand" "f")
-+        (match_operand:ANYF 2 "register_operand" "f")
-+        (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
-+  "TARGET_HARD_FLOAT"
-+  "fnmsub.<fmt>\t%0,%1,%2,%3"
-+  [(set_attr "type" "fmadd")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+;; modulo signed zeros, -(a*b+c) == -c-a*b
-+(define_insn "*nfma<mode>4_fastmath"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+    (minus:ANYF
-+      (match_operand:ANYF 3 "register_operand" "f")
-+      (mult:ANYF
-+        (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
-+        (match_operand:ANYF 2 "register_operand" "f"))))]
-+  "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
-+  "fnmadd.<fmt>\t%0,%1,%2,%3"
-+  [(set_attr "type" "fmadd")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+;; modulo signed zeros, -(a*b-c) == c-a*b
-+(define_insn "*nfms<mode>4_fastmath"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+    (minus:ANYF
-+      (match_operand:ANYF 3 "register_operand" "f")
-+      (mult:ANYF
-+        (match_operand:ANYF 1 "register_operand" "f")
-+        (match_operand:ANYF 2 "register_operand" "f"))))]
-+  "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
-+  "fnmsub.<fmt>\t%0,%1,%2,%3"
-+  [(set_attr "type" "fmadd")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	ABSOLUTE VALUE
-+;;
-+;;  ....................
-+
-+(define_insn "abs<mode>2"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+	(abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fabs.<fmt>\t%0,%1"
-+  [(set_attr "type" "fmove")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+
-+;;
-+;;  ....................
-+;;
-+;;	MIN/MAX
-+;;
-+;;  ....................
-+
-+(define_insn "smin<mode>3"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+		   (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+			    (match_operand:ANYF 2 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fmin.<fmt>\t%0,%1,%2"
-+  [(set_attr "type" "fmove")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "smax<mode>3"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+		   (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+			    (match_operand:ANYF 2 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fmax.<fmt>\t%0,%1,%2"
-+  [(set_attr "type" "fmove")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+
-+;;
-+;;  ....................
-+;;
-+;;	NEGATION and ONE'S COMPLEMENT '
-+;;
-+;;  ....................
-+
-+(define_insn "neg<mode>2"
-+  [(set (match_operand:ANYF 0 "register_operand" "=f")
-+	(neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fneg.<fmt>\t%0,%1"
-+  [(set_attr "type" "fmove")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "one_cmpl<mode>2"
-+  [(set (match_operand:GPR 0 "register_operand" "=r")
-+	(not:GPR (match_operand:GPR 1 "register_operand" "r")))]
-+  ""
-+  "not\t%0,%1"
-+  [(set_attr "type" "logical")
-+   (set_attr "mode" "<MODE>")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	LOGICAL
-+;;
-+;;  ....................
-+;;
-+
-+(define_insn "and<mode>3"
-+  [(set (match_operand:GPR 0 "register_operand" "=r,r")
-+	(and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
-+		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
-+  ""
-+  "and\t%0,%1,%2"
-+  [(set_attr "type" "logical")
-+   (set_attr "mode" "<MODE>")])
-+
-+(define_insn "ior<mode>3"
-+  [(set (match_operand:GPR 0 "register_operand" "=r,r")
-+	(ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
-+		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
-+  ""
-+  "or\t%0,%1,%2"
-+  [(set_attr "type" "logical")
-+   (set_attr "mode" "<MODE>")])
-+
-+(define_insn "xor<mode>3"
-+  [(set (match_operand:GPR 0 "register_operand" "=r,r")
-+	(xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
-+		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
-+  ""
-+  "xor\t%0,%1,%2"
-+  [(set_attr "type" "logical")
-+   (set_attr "mode" "<MODE>")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	TRUNCATION
-+;;
-+;;  ....................
-+
-+(define_insn "truncdfsf2"
-+  [(set (match_operand:SF 0 "register_operand" "=f")
-+	(float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.s.d\t%0,%1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "cnv_mode"	"D2S")   
-+   (set_attr "mode"	"SF")])
-+
-+;; Integer truncation patterns.  Truncating to HImode/QImode is a no-op.
-+;; Truncating from DImode to SImode is not, because we always keep SImode
-+;; values sign-extended in a register so we can safely use DImode branches
-+;; and comparisons on SImode values.
-+
-+(define_insn "truncdisi2"
-+  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
-+        (truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
-+  "TARGET_64BIT"
-+  "@
-+    sext.w\t%0,%1
-+    sw\t%1,%0"
-+  [(set_attr "move_type" "arith,store")
-+   (set_attr "mode" "SI")])
-+
-+;; Combiner patterns to optimize shift/truncate combinations.
-+
-+(define_insn "*ashr_trunc<mode>"
-+  [(set (match_operand:SUBDI 0 "register_operand" "=r")
-+        (truncate:SUBDI
-+	  (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
-+		       (match_operand:DI 2 "const_arith_operand" ""))))]
-+  "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
-+  "sra\t%0,%1,%2"
-+  [(set_attr "type" "shift")
-+   (set_attr "mode" "<MODE>")])
-+
-+(define_insn "*lshr32_trunc<mode>"
-+  [(set (match_operand:SUBDI 0 "register_operand" "=r")
-+        (truncate:SUBDI
-+	  (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
-+		       (const_int 32))))]
-+  "TARGET_64BIT"
-+  "sra\t%0,%1,32"
-+  [(set_attr "type" "shift")
-+   (set_attr "mode" "<MODE>")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	ZERO EXTENSION
-+;;
-+;;  ....................
-+
-+;; Extension insns.
-+
-+(define_insn_and_split "zero_extendsidi2"
-+  [(set (match_operand:DI 0 "register_operand" "=r,r")
-+        (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
-+  "TARGET_64BIT"
-+  "@
-+   #
-+   lwu\t%0,%1"
-+  "&& reload_completed && REG_P (operands[1])"
-+  [(set (match_dup 0)
-+        (ashift:DI (match_dup 1) (const_int 32)))
-+   (set (match_dup 0)
-+        (lshiftrt:DI (match_dup 0) (const_int 32)))]
-+  { operands[1] = gen_lowpart (DImode, operands[1]); }
-+  [(set_attr "move_type" "shift_shift,load")
-+   (set_attr "mode" "DI")])
-+
-+;; Combine is not allowed to convert this insn into a zero_extendsidi2
-+;; because of TRULY_NOOP_TRUNCATION.
-+
-+(define_insn_and_split "*clear_upper32"
-+  [(set (match_operand:DI 0 "register_operand" "=r,r")
-+        (and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
-+		(const_int 4294967295)))]
-+  "TARGET_64BIT"
-+{
-+  if (which_alternative == 0)
-+    return "#";
-+
-+  operands[1] = gen_lowpart (SImode, operands[1]);
-+  return "lwu\t%0,%1";
-+}
-+  "&& reload_completed && REG_P (operands[1])"
-+  [(set (match_dup 0)
-+        (ashift:DI (match_dup 1) (const_int 32)))
-+   (set (match_dup 0)
-+        (lshiftrt:DI (match_dup 0) (const_int 32)))]
-+  ""
-+  [(set_attr "move_type" "shift_shift,load")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn_and_split "zero_extendhi<GPR:mode>2"
-+  [(set (match_operand:GPR 0 "register_operand" "=r,r")
-+        (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
-+  ""
-+  "@
-+   #
-+   lhu\t%0,%1"
-+  "&& reload_completed && REG_P (operands[1])"
-+  [(set (match_dup 0)
-+        (ashift:GPR (match_dup 1) (match_dup 2)))
-+   (set (match_dup 0)
-+        (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
-+  {
-+    operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
-+    operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
-+  }
-+  [(set_attr "move_type" "shift_shift,load")
-+   (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "zero_extendqi<SUPERQI:mode>2"
-+  [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
-+        (zero_extend:SUPERQI
-+	     (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
-+  ""
-+  "@
-+   and\t%0,%1,0xff
-+   lbu\t%0,%1"
-+  [(set_attr "move_type" "andi,load")
-+   (set_attr "mode" "<SUPERQI:MODE>")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	SIGN EXTENSION
-+;;
-+;;  ....................
-+
-+;; Extension insns.
-+;; Those for integer source operand are ordered widest source type first.
-+
-+;; When TARGET_64BIT, all SImode integer registers should already be in
-+;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2).  We can
-+;; therefore get rid of register->register instructions if we constrain
-+;; the source to be in the same register as the destination.
-+;;
-+;; The register alternative has type "arith" so that the pre-reload
-+;; scheduler will treat it as a move.  This reflects what happens if
-+;; the register alternative needs a reload.
-+(define_insn_and_split "extendsidi2"
-+  [(set (match_operand:DI 0 "register_operand" "=r,r")
-+        (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
-+  "TARGET_64BIT"
-+  "@
-+   #
-+   lw\t%0,%1"
-+  "&& reload_completed && register_operand (operands[1], VOIDmode)"
-+  [(set (match_dup 0) (match_dup 1))]
-+{
-+  if (REGNO (operands[0]) == REGNO (operands[1]))
-+    {
-+      emit_note (NOTE_INSN_DELETED);
-+      DONE;
-+    }
-+  operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
-+}
-+  [(set_attr "move_type" "move,load")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
-+  [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
-+        (sign_extend:SUPERQI
-+	     (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
-+  ""
-+  "@
-+   #
-+   l<SHORT:size>\t%0,%1"
-+  "&& reload_completed && REG_P (operands[1])"
-+  [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
-+   (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
-+{
-+  operands[0] = gen_lowpart (SImode, operands[0]);
-+  operands[1] = gen_lowpart (SImode, operands[1]);
-+  operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
-+			 - GET_MODE_BITSIZE (<SHORT:MODE>mode));
-+}
-+  [(set_attr "move_type" "shift_shift,load")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "extendsfdf2"
-+  [(set (match_operand:DF 0 "register_operand" "=f")
-+	(float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.d.s\t%0,%1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "cnv_mode"	"S2D")   
-+   (set_attr "mode"	"DF")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	CONVERSIONS
-+;;
-+;;  ....................
-+
-+(define_insn "fix_truncdfsi2"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(fix:SI (match_operand:DF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.w.d %0,%1,rtz"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"DF")
-+   (set_attr "cnv_mode"	"D2I")])
-+
-+
-+(define_insn "fix_truncsfsi2"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(fix:SI (match_operand:SF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.w.s %0,%1,rtz"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"SF")
-+   (set_attr "cnv_mode"	"S2I")])
-+
-+
-+(define_insn "fix_truncdfdi2"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(fix:DI (match_operand:DF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT"
-+  "fcvt.l.d %0,%1,rtz"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"DF")
-+   (set_attr "cnv_mode"	"D2I")])
-+
-+
-+(define_insn "fix_truncsfdi2"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(fix:DI (match_operand:SF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT"
-+  "fcvt.l.s %0,%1,rtz"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"SF")
-+   (set_attr "cnv_mode"	"S2I")])
-+
-+
-+(define_insn "floatsidf2"
-+  [(set (match_operand:DF 0 "register_operand" "=f")
-+	(float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.d.w\t%0,%z1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"DF")
-+   (set_attr "cnv_mode"	"I2D")])
-+
-+
-+(define_insn "floatdidf2"
-+  [(set (match_operand:DF 0 "register_operand" "=f")
-+	(float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT"
-+  "fcvt.d.l\t%0,%z1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"DF")
-+   (set_attr "cnv_mode"	"I2D")])
-+
-+
-+(define_insn "floatsisf2"
-+  [(set (match_operand:SF 0 "register_operand" "=f")
-+	(float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.s.w\t%0,%z1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"SF")
-+   (set_attr "cnv_mode"	"I2S")])
-+
-+
-+(define_insn "floatdisf2"
-+  [(set (match_operand:SF 0 "register_operand" "=f")
-+	(float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT"
-+  "fcvt.s.l\t%0,%z1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"SF")
-+   (set_attr "cnv_mode"	"I2S")])
-+
-+
-+(define_insn "floatunssidf2"
-+  [(set (match_operand:DF 0 "register_operand" "=f")
-+	(unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.d.wu\t%0,%z1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"DF")
-+   (set_attr "cnv_mode"	"I2D")])
-+
-+
-+(define_insn "floatunsdidf2"
-+  [(set (match_operand:DF 0 "register_operand" "=f")
-+	(unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT"
-+  "fcvt.d.lu\t%0,%z1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"DF")
-+   (set_attr "cnv_mode"	"I2D")])
-+
-+
-+(define_insn "floatunssisf2"
-+  [(set (match_operand:SF 0 "register_operand" "=f")
-+	(unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.s.wu\t%0,%z1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"SF")
-+   (set_attr "cnv_mode"	"I2S")])
-+
-+
-+(define_insn "floatunsdisf2"
-+  [(set (match_operand:SF 0 "register_operand" "=f")
-+	(unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT"
-+  "fcvt.s.lu\t%0,%z1"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"SF")
-+   (set_attr "cnv_mode"	"I2S")])
-+
-+
-+(define_insn "fixuns_truncdfsi2"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.wu.d %0,%1,rtz"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"DF")
-+   (set_attr "cnv_mode"	"D2I")])
-+
-+
-+(define_insn "fixuns_truncsfsi2"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT"
-+  "fcvt.wu.s %0,%1,rtz"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"SF")
-+   (set_attr "cnv_mode"	"S2I")])
-+
-+
-+(define_insn "fixuns_truncdfdi2"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT"
-+  "fcvt.lu.d %0,%1,rtz"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"DF")
-+   (set_attr "cnv_mode"	"D2I")])
-+
-+
-+(define_insn "fixuns_truncsfdi2"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
-+  "TARGET_HARD_FLOAT && TARGET_64BIT"
-+  "fcvt.lu.s %0,%1,rtz"
-+  [(set_attr "type"	"fcvt")
-+   (set_attr "mode"	"SF")
-+   (set_attr "cnv_mode"	"S2I")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	DATA MOVEMENT
-+;;
-+;;  ....................
-+
-+;; Lower-level instructions for loading an address from the GOT.
-+;; We could use MEMs, but an unspec gives more optimization
-+;; opportunities.
-+
-+(define_insn "got_load<mode>"
-+   [(set (match_operand:P 0 "register_operand" "=r")
-+       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
-+		 UNSPEC_LOAD_GOT))]
-+  "flag_pic"
-+  "la\t%0,%1"
-+   [(set_attr "got" "load")
-+    (set_attr "mode" "<MODE>")])
-+
-+(define_insn "tls_add_tp_le<mode>"
-+  [(set (match_operand:P 0 "register_operand" "=r")
-+	(unspec:P [(match_operand:P 1 "register_operand" "r")
-+		   (match_operand:P 2 "register_operand" "r")
-+		   (match_operand:P 3 "symbolic_operand" "")]
-+		  UNSPEC_TLS_LE))]
-+  "!flag_pic || flag_pie"
-+  "add\t%0,%1,%2,%%tprel_add(%3)"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "<MODE>")])
-+
-+(define_insn "got_load_tls_gd<mode>"
-+  [(set (match_operand:P 0 "register_operand" "=r")
-+       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
-+                 UNSPEC_TLS_GD))]
-+  "flag_pic"
-+  "la.tls.gd\t%0,%1"
-+  [(set_attr "got" "load")
-+   (set_attr "mode" "<MODE>")])
-+
-+(define_insn "got_load_tls_ie<mode>"
-+  [(set (match_operand:P 0 "register_operand" "=r")
-+       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
-+                 UNSPEC_TLS_IE))]
-+  "flag_pic"
-+  "la.tls.ie\t%0,%1"
-+  [(set_attr "got" "load")
-+   (set_attr "mode" "<MODE>")])
-+
-+;; Instructions for adding the low 16 bits of an address to a register.
-+;; Operand 2 is the address: riscv_print_operand works out which relocation
-+;; should be applied.
-+
-+(define_insn "*low<mode>"
-+  [(set (match_operand:P 0 "register_operand" "=r")
-+	(lo_sum:P (match_operand:P 1 "register_operand" "r")
-+		  (match_operand:P 2 "immediate_operand" "")))]
-+  ""
-+  "add\t%0,%1,%R2"
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "<MODE>")])
-+
-+;; Allow combine to split complex const_int load sequences, using operand 2
-+;; to store the intermediate results.  See move_operand for details.
-+(define_split
-+  [(set (match_operand:GPR 0 "register_operand")
-+	(match_operand:GPR 1 "splittable_const_int_operand"))
-+   (clobber (match_operand:GPR 2 "register_operand"))]
-+  ""
-+  [(const_int 0)]
-+{
-+  riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
-+  DONE;
-+})
-+
-+;; Likewise, for symbolic operands.
-+(define_split
-+  [(set (match_operand:P 0 "register_operand")
-+	(match_operand:P 1))
-+   (clobber (match_operand:P 2 "register_operand"))]
-+  "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
-+  [(set (match_dup 0) (match_dup 3))]
-+{
-+  riscv_split_symbol (operands[2], operands[1],
-+		     MAX_MACHINE_MODE, &operands[3]);
-+})
-+
-+;; 64-bit integer moves
-+
-+;; Unlike most other insns, the move insns can't be split with '
-+;; different predicates, because register spilling and other parts of
-+;; the compiler, have memoized the insn number already.
-+
-+(define_expand "movdi"
-+  [(set (match_operand:DI 0 "")
-+	(match_operand:DI 1 ""))]
-+  ""
-+{
-+  if (riscv_legitimize_move (DImode, operands[0], operands[1]))
-+    DONE;
-+})
-+
-+(define_insn "*movdi_32bit"
-+  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
-+	(match_operand:DI 1 "move_operand" "r,i,m,r,*J*r,*m,*f,*f"))]
-+  "!TARGET_64BIT
-+   && (register_operand (operands[0], DImode)
-+       || reg_or_0_operand (operands[1], DImode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn "*movdi_64bit"
-+  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
-+	(match_operand:DI 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
-+  "TARGET_64BIT
-+   && (register_operand (operands[0], DImode)
-+       || reg_or_0_operand (operands[1], DImode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
-+   (set_attr "mode" "DI")])
-+
-+;; 32-bit Integer moves
-+
-+;; Unlike most other insns, the move insns can't be split with
-+;; different predicates, because register spilling and other parts of
-+;; the compiler, have memoized the insn number already.
-+
-+(define_expand "mov<mode>"
-+  [(set (match_operand:IMOVE32 0 "")
-+	(match_operand:IMOVE32 1 ""))]
-+  ""
-+{
-+  if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
-+    DONE;
-+})
-+
-+(define_insn "*mov<mode>_internal"
-+  [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
-+	(match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
-+  "(register_operand (operands[0], <MODE>mode)
-+    || reg_or_0_operand (operands[1], <MODE>mode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
-+   (set_attr "mode" "SI")])
-+
-+;; 16-bit Integer moves
-+
-+;; Unlike most other insns, the move insns can't be split with
-+;; different predicates, because register spilling and other parts of
-+;; the compiler, have memoized the insn number already.
-+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
-+
-+(define_expand "movhi"
-+  [(set (match_operand:HI 0 "")
-+	(match_operand:HI 1 ""))]
-+  ""
-+{
-+  if (riscv_legitimize_move (HImode, operands[0], operands[1]))
-+    DONE;
-+})
-+
-+(define_insn "*movhi_internal"
-+  [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
-+	(match_operand:HI 1 "move_operand"         "r,T,m,rJ,*r*J,*f"))]
-+  "(register_operand (operands[0], HImode)
-+    || reg_or_0_operand (operands[1], HImode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "move,const,load,store,mtc,mfc")
-+   (set_attr "mode" "HI")])
-+
-+;; HImode constant generation; see riscv_move_integer for details.
-+;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
-+
-+(define_insn "add<mode>hi3"
-+  [(set (match_operand:HI 0 "register_operand" "=r,r")
-+	(plus:HI (match_operand:HISI 1 "register_operand" "r,r")
-+		  (match_operand:HISI 2 "arith_operand" "r,Q")))]
-+  ""
-+  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
-+  [(set_attr "type" "arith")
-+   (set_attr "mode" "HI")])
-+
-+(define_insn "xor<mode>hi3"
-+  [(set (match_operand:HI 0 "register_operand" "=r,r")
-+	(xor:HI (match_operand:HISI 1 "register_operand" "r,r")
-+		  (match_operand:HISI 2 "arith_operand" "r,Q")))]
-+  ""
-+  "xor\t%0,%1,%2"
-+  [(set_attr "type" "logical")
-+   (set_attr "mode" "HI")])
-+
-+;; 8-bit Integer moves
-+
-+(define_expand "movqi"
-+  [(set (match_operand:QI 0 "")
-+	(match_operand:QI 1 ""))]
-+  ""
-+{
-+  if (riscv_legitimize_move (QImode, operands[0], operands[1]))
-+    DONE;
-+})
-+
-+(define_insn "*movqi_internal"
-+  [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
-+	(match_operand:QI 1 "move_operand"         "r,I,m,rJ,*r*J,*f"))]
-+  "(register_operand (operands[0], QImode)
-+    || reg_or_0_operand (operands[1], QImode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "move,const,load,store,mtc,mfc")
-+   (set_attr "mode" "QI")])
-+
-+;; 32-bit floating point moves
-+
-+(define_expand "movsf"
-+  [(set (match_operand:SF 0 "")
-+	(match_operand:SF 1 ""))]
-+  ""
-+{
-+  if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
-+    DONE;
-+})
-+
-+(define_insn "*movsf_hardfloat"
-+  [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
-+	(match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
-+  "TARGET_HARD_FLOAT
-+   && (register_operand (operands[0], SFmode)
-+       || reg_or_0_operand (operands[1], SFmode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
-+   (set_attr "mode" "SF")])
-+
-+(define_insn "*movsf_softfloat"
-+  [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
-+	(match_operand:SF 1 "move_operand" "Gr,m,r"))]
-+  "TARGET_SOFT_FLOAT
-+   && (register_operand (operands[0], SFmode)
-+       || reg_or_0_operand (operands[1], SFmode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "move,load,store")
-+   (set_attr "mode" "SF")])
-+
-+;; 64-bit floating point moves
-+
-+(define_expand "movdf"
-+  [(set (match_operand:DF 0 "")
-+	(match_operand:DF 1 ""))]
-+  ""
-+{
-+  if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
-+    DONE;
-+})
-+
-+;; In RV32, we lack mtf.d/mff.d.  Go through memory instead.
-+;; (except for moving a constant 0 to an FPR.  for that we use fcvt.d.w.)
-+(define_insn "*movdf_hardfloat_rv32"
-+  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
-+	(match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
-+  "!TARGET_64BIT && TARGET_HARD_FLOAT
-+   && (register_operand (operands[0], DFmode)
-+       || reg_or_0_operand (operands[1], DFmode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
-+   (set_attr "mode" "DF")])
-+
-+(define_insn "*movdf_hardfloat_rv64"
-+  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
-+	(match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
-+  "TARGET_64BIT && TARGET_HARD_FLOAT
-+   && (register_operand (operands[0], DFmode)
-+       || reg_or_0_operand (operands[1], DFmode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
-+   (set_attr "mode" "DF")])
-+
-+(define_insn "*movdf_softfloat"
-+  [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
-+	(match_operand:DF 1 "move_operand" "rG,m,rG"))]
-+  "TARGET_SOFT_FLOAT
-+   && (register_operand (operands[0], DFmode)
-+       || reg_or_0_operand (operands[1], DFmode))"
-+  { return riscv_output_move (operands[0], operands[1]); }
-+  [(set_attr "move_type" "move,load,store")
-+   (set_attr "mode" "DF")])
-+
-+;; 128-bit integer moves
-+
-+(define_expand "movti"
-+  [(set (match_operand:TI 0)
-+	(match_operand:TI 1))]
-+  "TARGET_64BIT"
-+{
-+  if (riscv_legitimize_move (TImode, operands[0], operands[1]))
-+    DONE;
-+})
-+
-+(define_insn "*movti"
-+  [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m")
-+	(match_operand:TI 1 "move_operand" "r,i,m,rJ"))]
-+  "TARGET_64BIT
-+   && (register_operand (operands[0], TImode)
-+       || reg_or_0_operand (operands[1], TImode))"
-+  "#"
-+  [(set_attr "move_type" "move,const,load,store")
-+   (set_attr "mode" "TI")])
-+
-+(define_split
-+  [(set (match_operand:MOVE64 0 "nonimmediate_operand")
-+	(match_operand:MOVE64 1 "move_operand"))]
-+  "reload_completed && !TARGET_64BIT
-+   && riscv_split_64bit_move_p (operands[0], operands[1])"
-+  [(const_int 0)]
-+{
-+  riscv_split_doubleword_move (operands[0], operands[1]);
-+  DONE;
-+})
-+
-+(define_split
-+  [(set (match_operand:MOVE128 0 "nonimmediate_operand")
-+	(match_operand:MOVE128 1 "move_operand"))]
-+  "TARGET_64BIT && reload_completed"
-+  [(const_int 0)]
-+{
-+  riscv_split_doubleword_move (operands[0], operands[1]);
-+  DONE;
-+})
-+
-+;; 64-bit paired-single floating point moves
-+
-+;; Load the low word of operand 0 with operand 1.
-+(define_insn "load_low<mode>"
-+  [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
-+	(unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")]
-+		       UNSPEC_LOAD_LOW))]
-+  "TARGET_HARD_FLOAT"
-+{
-+  operands[0] = riscv_subword (operands[0], 0);
-+  return riscv_output_move (operands[0], operands[1]);
-+}
-+  [(set_attr "move_type" "mtc,fpload")
-+   (set_attr "mode" "<HALFMODE>")])
-+
-+;; Load the high word of operand 0 from operand 1, preserving the value
-+;; in the low word.
-+(define_insn "load_high<mode>"
-+  [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
-+	(unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")
-+			(match_operand:SPLITF 2 "register_operand" "0,0")]
-+		       UNSPEC_LOAD_HIGH))]
-+  "TARGET_HARD_FLOAT"
-+{
-+  operands[0] = riscv_subword (operands[0], 1);
-+  return riscv_output_move (operands[0], operands[1]);
-+}
-+  [(set_attr "move_type" "mtc,fpload")
-+   (set_attr "mode" "<HALFMODE>")])
-+
-+;; Store one word of operand 1 in operand 0.  Operand 2 is 1 to store the
-+;; high word and 0 to store the low word.
-+(define_insn "store_word<mode>"
-+  [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=r,m")
-+	(unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f")
-+			    (match_operand 2 "const_int_operand")]
-+			   UNSPEC_STORE_WORD))]
-+  "TARGET_HARD_FLOAT"
-+{
-+  operands[1] = riscv_subword (operands[1], INTVAL (operands[2]));
-+  return riscv_output_move (operands[0], operands[1]);
-+}
-+  [(set_attr "move_type" "mfc,fpstore")
-+   (set_attr "mode" "<HALFMODE>")])
-+
-+;; Expand in-line code to clear the instruction cache between operand[0] and
-+;; operand[1].
-+(define_expand "clear_cache"
-+  [(match_operand 0 "pmode_register_operand")
-+   (match_operand 1 "pmode_register_operand")]
-+  ""
-+  "
-+{
-+  emit_insn(gen_fence_i());
-+  DONE;
-+}")
-+
-+(define_insn "fence"
-+  [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
-+  ""
-+  "%|fence%-")
-+
-+(define_insn "fence_i"
-+  [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
-+  ""
-+  "fence.i")
-+
-+;; Block moves, see riscv.c for more details.
-+;; Argument 0 is the destination
-+;; Argument 1 is the source
-+;; Argument 2 is the length
-+;; Argument 3 is the alignment
-+
-+(define_expand "movmemsi"
-+  [(parallel [(set (match_operand:BLK 0 "general_operand")
-+		   (match_operand:BLK 1 "general_operand"))
-+	      (use (match_operand:SI 2 ""))
-+	      (use (match_operand:SI 3 "const_int_operand"))])]
-+  "!TARGET_MEMCPY"
-+{
-+  if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
-+    DONE;
-+  else
-+    FAIL;
-+})
-+
-+;;
-+;;  ....................
-+;;
-+;;	SHIFTS
-+;;
-+;;  ....................
-+
-+(define_insn "<optab>si3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	(any_shift:SI (match_operand:SI 1 "register_operand" "r")
-+		       (match_operand:SI 2 "arith_operand" "rI")))]
-+  ""
-+{
-+  if (GET_CODE (operands[2]) == CONST_INT)
-+    operands[2] = GEN_INT (INTVAL (operands[2])
-+			   & (GET_MODE_BITSIZE (SImode) - 1));
-+
-+  return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
-+}
-+  [(set_attr "type" "shift")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*<optab>disi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	     (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
-+		      (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
-+  "TARGET_64BIT"
-+  "<insn>w\t%0,%1,%2"
-+  [(set_attr "type" "shift")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*ashldi3_truncsi"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+          (truncate:SI
-+	     (ashift:DI (match_operand:DI 1 "register_operand" "r")
-+		      (match_operand:DI 2 "const_arith_operand" "I"))))]
-+  "TARGET_64BIT && INTVAL (operands[2]) < 32"
-+  "sllw\t%0,%1,%2"
-+  [(set_attr "type" "shift")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "*ashldisi3"
-+  [(set (match_operand:SI 0 "register_operand" "=r")
-+	  (ashift:SI (match_operand:GPR 1 "register_operand" "r")
-+		      (match_operand:GPR2 2 "arith_operand" "rI")))]
-+  "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
-+  "sllw\t%0,%1,%2"
-+  [(set_attr "type" "shift")
-+   (set_attr "mode" "SI")])
-+
-+(define_insn "<optab>di3"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(any_shift:DI (match_operand:DI 1 "register_operand" "r")
-+		       (match_operand:DI 2 "arith_operand" "rI")))]
-+  "TARGET_64BIT"
-+{
-+  if (GET_CODE (operands[2]) == CONST_INT)
-+    operands[2] = GEN_INT (INTVAL (operands[2])
-+			   & (GET_MODE_BITSIZE (DImode) - 1));
-+
-+  return "<insn>\t%0,%1,%2";
-+}
-+  [(set_attr "type" "shift")
-+   (set_attr "mode" "DI")])
-+
-+(define_insn "<optab>si3_extend"
-+  [(set (match_operand:DI 0 "register_operand" "=r")
-+	(sign_extend:DI
-+	   (any_shift:SI (match_operand:SI 1 "register_operand" "r")
-+			 (match_operand:SI 2 "arith_operand" "rI"))))]
-+  "TARGET_64BIT"
-+{
-+  if (GET_CODE (operands[2]) == CONST_INT)
-+    operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
-+
-+  return "<insn>w\t%0,%1,%2";
-+}
-+  [(set_attr "type" "shift")
-+   (set_attr "mode" "SI")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	CONDITIONAL BRANCHES
-+;;
-+;;  ....................
-+
-+;; Conditional branches
-+
-+(define_insn "*branch_order<mode>"
-+  [(set (pc)
-+	(if_then_else
-+	 (match_operator 1 "order_operator"
-+			 [(match_operand:GPR 2 "register_operand" "r")
-+			  (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
-+	 (label_ref (match_operand 0 "" ""))
-+	 (pc)))]
-+  ""
-+{
-+  if (GET_CODE (operands[3]) == CONST_INT)
-+    return "b%C1z\t%2,%0";
-+  return "b%C1\t%2,%3,%0";
-+}
-+  [(set_attr "type" "branch")
-+   (set_attr "mode" "none")])
-+
-+;; Used to implement built-in functions.
-+(define_expand "condjump"
-+  [(set (pc)
-+	(if_then_else (match_operand 0)
-+		      (label_ref (match_operand 1))
-+		      (pc)))])
-+
-+(define_expand "cbranch<mode>4"
-+  [(set (pc)
-+	(if_then_else (match_operator 0 "comparison_operator"
-+		       [(match_operand:GPR 1 "register_operand")
-+		        (match_operand:GPR 2 "nonmemory_operand")])
-+		      (label_ref (match_operand 3 ""))
-+		      (pc)))]
-+  ""
-+{
-+  riscv_expand_conditional_branch (operands);
-+  DONE;
-+})
-+
-+(define_expand "cbranch<mode>4"
-+  [(set (pc)
-+	(if_then_else (match_operator 0 "comparison_operator"
-+		       [(match_operand:SCALARF 1 "register_operand")
-+		        (match_operand:SCALARF 2 "register_operand")])
-+		      (label_ref (match_operand 3 ""))
-+		      (pc)))]
-+  ""
-+{
-+  riscv_expand_conditional_branch (operands);
-+  DONE;
-+})
-+
-+(define_insn_and_split "*branch_on_bit<GPR:mode>"
-+  [(set (pc)
-+	(if_then_else
-+	 (match_operator 0 "equality_operator"
-+	  [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
-+		 (const_int 1)
-+		 (match_operand 3 "branch_on_bit_operand"))
-+		 (const_int 0)])
-+	 (label_ref (match_operand 1))
-+	 (pc)))
-+   (clobber (match_scratch:GPR 4 "=&r"))]
-+  ""
-+  "#"
-+  "reload_completed"
-+  [(set (match_dup 4)
-+        (ashift:GPR (match_dup 2) (match_dup 3)))
-+   (set (pc)
-+	(if_then_else
-+	 (match_op_dup 0 [(match_dup 4) (const_int 0)])
-+	 (label_ref (match_operand 1))
-+	 (pc)))]
-+{
-+  int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
-+  operands[3] = GEN_INT (shift);
-+
-+  if (GET_CODE (operands[0]) == EQ)
-+    operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
-+  else
-+    operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
-+})
-+
-+(define_insn_and_split "*branch_on_bit_range<GPR:mode>"
-+  [(set (pc)
-+	(if_then_else
-+	 (match_operator 0 "equality_operator"
-+	  [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
-+		 (match_operand 3 "branch_on_bit_operand")
-+		 (const_int 0))
-+		 (const_int 0)])
-+	 (label_ref (match_operand 1))
-+	 (pc)))
-+   (clobber (match_scratch:GPR 4 "=&r"))]
-+  ""
-+  "#"
-+  "reload_completed"
-+  [(set (match_dup 4)
-+        (ashift:GPR (match_dup 2) (match_dup 3)))
-+   (set (pc)
-+	(if_then_else
-+	 (match_op_dup 0 [(match_dup 4) (const_int 0)])
-+	 (label_ref (match_operand 1))
-+	 (pc)))]
-+{
-+  operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
-+})
-+
-+;;
-+;;  ....................
-+;;
-+;;	SETTING A REGISTER FROM A COMPARISON
-+;;
-+;;  ....................
-+
-+;; Destination is always set in SI mode.
-+
-+(define_expand "cstore<mode>4"
-+  [(set (match_operand:SI 0 "register_operand")
-+	(match_operator:SI 1 "order_operator"
-+	 [(match_operand:GPR 2 "register_operand")
-+	  (match_operand:GPR 3 "nonmemory_operand")]))]
-+  ""
-+{
-+  riscv_expand_scc (operands);
-+  DONE;
-+})
-+
-+(define_insn "cstore<mode>4"
-+   [(set (match_operand:SI 0 "register_operand" "=r")
-+        (match_operator:SI 1 "fp_order_operator"
-+	      [(match_operand:SCALARF 2 "register_operand" "f")
-+	       (match_operand:SCALARF 3 "register_operand" "f")]))]
-+  "TARGET_HARD_FLOAT"
-+{
-+  if (GET_CODE (operands[1]) == NE)
-+    return "feq.<fmt>\t%0,%2,%3; seqz %0, %0";
-+  return "f%C1.<fmt>\t%0,%2,%3";
-+}
-+  [(set_attr "type" "fcmp")
-+   (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
-+  [(set (match_operand:GPR2 0 "register_operand" "=r")
-+	(eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+		 (const_int 0)))]
-+  ""
-+  "seqz\t%0,%1"
-+  [(set_attr "type" "slt")
-+   (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
-+  [(set (match_operand:GPR2 0 "register_operand" "=r")
-+	(ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+		 (const_int 0)))]
-+  ""
-+  "snez\t%0,%1"
-+  [(set_attr "type" "slt")
-+   (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
-+  [(set (match_operand:GPR2 0 "register_operand" "=r")
-+	(any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+		     (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
-+  ""
-+  "slt<u>\t%0,%z2,%1"
-+  [(set_attr "type" "slt")
-+   (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
-+  [(set (match_operand:GPR2 0 "register_operand" "=r")
-+	(any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+		     (const_int 1)))]
-+  ""
-+  "slt<u>\t%0,zero,%1"
-+  [(set_attr "type" "slt")
-+   (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
-+  [(set (match_operand:GPR2 0 "register_operand" "=r")
-+	(any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+		     (match_operand:GPR 2 "arith_operand" "rI")))]
-+  ""
-+  "slt<u>\t%0,%1,%2"
-+  [(set_attr "type" "slt")
-+   (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
-+  [(set (match_operand:GPR2 0 "register_operand" "=r")
-+	(any_le:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+		     (match_operand:GPR 2 "sle_operand" "")))]
-+  ""
-+{
-+  operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
-+  return "slt<u>\t%0,%1,%2";
-+}
-+  [(set_attr "type" "slt")
-+   (set_attr "mode" "<GPR:MODE>")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	UNCONDITIONAL BRANCHES
-+;;
-+;;  ....................
-+
-+;; Unconditional branches.
-+
-+(define_insn "jump"
-+  [(set (pc)
-+	(label_ref (match_operand 0 "" "")))]
-+  ""
-+  "j\t%l0"
-+  [(set_attr "type"	"jump")
-+   (set_attr "mode"	"none")])
-+
-+(define_expand "indirect_jump"
-+  [(set (pc) (match_operand 0 "register_operand"))]
-+  ""
-+{
-+  operands[0] = force_reg (Pmode, operands[0]);
-+  if (Pmode == SImode)
-+    emit_jump_insn (gen_indirect_jumpsi (operands[0]));
-+  else
-+    emit_jump_insn (gen_indirect_jumpdi (operands[0]));
-+  DONE;
-+})
-+
-+(define_insn "indirect_jump<mode>"
-+  [(set (pc) (match_operand:P 0 "register_operand" "l"))]
-+  ""
-+  "jr\t%0"
-+  [(set_attr "type" "jump")
-+   (set_attr "mode" "none")])
-+
-+(define_expand "tablejump"
-+  [(set (pc) (match_operand 0 "register_operand" ""))
-+	      (use (label_ref (match_operand 1 "" "")))]
-+  ""
-+{
-+  if (CASE_VECTOR_PC_RELATIVE)
-+      operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
-+					 gen_rtx_LABEL_REF (Pmode, operands[1]),
-+					 NULL_RTX, 0, OPTAB_DIRECT);
-+
-+  if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
-+    emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
-+  else
-+    emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
-+  DONE;
-+})
-+
-+(define_insn "tablejump<mode>"
-+  [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
-+   (use (label_ref (match_operand 1 "" "")))]
-+  ""
-+  "jr\t%0"
-+  [(set_attr "type" "jump")
-+   (set_attr "mode" "none")])
-+
-+;;
-+;;  ....................
-+;;
-+;;	Function prologue/epilogue
-+;;
-+;;  ....................
-+;;
-+
-+(define_expand "prologue"
-+  [(const_int 1)]
-+  ""
-+{
-+  riscv_expand_prologue ();
-+  DONE;
-+})
-+
-+;; Block any insns from being moved before this point, since the
-+;; profiling call to mcount can use various registers that aren't
-+;; saved or used to pass arguments.
-+
-+(define_insn "blockage"
-+  [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
-+  ""
-+  ""
-+  [(set_attr "type" "ghost")
-+   (set_attr "mode" "none")])
-+
-+(define_expand "epilogue"
-+  [(const_int 2)]
-+  ""
-+{
-+  riscv_expand_epilogue (false);
-+  DONE;
-+})
-+
-+(define_expand "sibcall_epilogue"
-+  [(const_int 2)]
-+  ""
-+{
-+  riscv_expand_epilogue (true);
-+  DONE;
-+})
-+
-+;; Trivial return.  Make it look like a normal return insn as that
-+;; allows jump optimizations to work better.
-+
-+(define_expand "return"
-+  [(simple_return)]
-+  "riscv_can_use_return_insn ()"
-+  "")
-+
-+(define_insn "simple_return"
-+  [(simple_return)]
-+  ""
-+  "ret"
-+  [(set_attr "type"	"jump")
-+   (set_attr "mode"	"none")])
-+
-+;; Normal return.
-+
-+(define_insn "simple_return_internal"
-+  [(simple_return)
-+   (use (match_operand 0 "pmode_register_operand" ""))]
-+  ""
-+  "jr\t%0"
-+  [(set_attr "type"	"jump")
-+   (set_attr "mode"	"none")])
-+
-+;; This is used in compiling the unwind routines.
-+(define_expand "eh_return"
-+  [(use (match_operand 0 "general_operand"))]
-+  ""
-+{
-+  if (GET_MODE (operands[0]) != word_mode)
-+    operands[0] = convert_to_mode (word_mode, operands[0], 0);
-+  if (TARGET_64BIT)
-+    emit_insn (gen_eh_set_lr_di (operands[0]));
-+  else
-+    emit_insn (gen_eh_set_lr_si (operands[0]));
-+  DONE;
-+})
-+
-+;; Clobber the return address on the stack.  We can't expand this
-+;; until we know where it will be put in the stack frame.
-+
-+(define_insn "eh_set_lr_si"
-+  [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
-+   (clobber (match_scratch:SI 1 "=&r"))]
-+  "! TARGET_64BIT"
-+  "#")
-+
-+(define_insn "eh_set_lr_di"
-+  [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
-+   (clobber (match_scratch:DI 1 "=&r"))]
-+  "TARGET_64BIT"
-+  "#")
-+
-+(define_split
-+  [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
-+   (clobber (match_scratch 1))]
-+  "reload_completed"
-+  [(const_int 0)]
-+{
-+  riscv_set_return_address (operands[0], operands[1]);
-+  DONE;
-+})
-+
-+;;
-+;;  ....................
-+;;
-+;;	FUNCTION CALLS
-+;;
-+;;  ....................
-+
-+;; Sibling calls.  All these patterns use jump instructions.
-+
-+;; call_insn_operand will only accept constant
-+;; addresses if a direct jump is acceptable.  Since the 'S' constraint
-+;; is defined in terms of call_insn_operand, the same is true of the
-+;; constraints.
-+
-+;; When we use an indirect jump, we need a register that will be
-+;; preserved by the epilogue (constraint j).
-+
-+(define_expand "sibcall"
-+  [(parallel [(call (match_operand 0 "")
-+		    (match_operand 1 ""))
-+	      (use (match_operand 2 ""))	;; next_arg_reg
-+	      (use (match_operand 3 ""))])]	;; struct_value_size_rtx
-+  ""
-+{
-+  riscv_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]);
-+  DONE;
-+})
-+
-+(define_insn "sibcall_internal"
-+  [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
-+	 (match_operand 1 "" ""))]
-+  "SIBLING_CALL_P (insn)"
-+  { return REG_P (operands[0]) ? "jr\t%0"
-+	   : absolute_symbolic_operand (operands[0], VOIDmode) ? "tail\t%0"
-+	   : "tail\t%0@"; }
-+  [(set_attr "type" "call")])
-+
-+(define_expand "sibcall_value"
-+  [(parallel [(set (match_operand 0 "")
-+		   (call (match_operand 1 "")
-+			 (match_operand 2 "")))
-+	      (use (match_operand 3 ""))])]		;; next_arg_reg
-+  ""
-+{
-+  riscv_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]);
-+  DONE;
-+})
-+
-+(define_insn "sibcall_value_internal"
-+  [(set (match_operand 0 "register_operand" "")
-+        (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
-+              (match_operand 2 "" "")))]
-+  "SIBLING_CALL_P (insn)"
-+  { return REG_P (operands[1]) ? "jr\t%1"
-+	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
-+	   : "tail\t%1@"; }
-+  [(set_attr "type" "call")])
-+
-+(define_insn "sibcall_value_multiple_internal"
-+  [(set (match_operand 0 "register_operand" "")
-+        (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
-+              (match_operand 2 "" "")))
-+   (set (match_operand 3 "register_operand" "")
-+	(call (mem:SI (match_dup 1))
-+	      (match_dup 2)))]
-+  "SIBLING_CALL_P (insn)"
-+  { return REG_P (operands[1]) ? "jr\t%1"
-+	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
-+	   : "tail\t%1@"; }
-+  [(set_attr "type" "call")])
-+
-+(define_expand "call"
-+  [(parallel [(call (match_operand 0 "")
-+		    (match_operand 1 ""))
-+	      (use (match_operand 2 ""))	;; next_arg_reg
-+	      (use (match_operand 3 ""))])]	;; struct_value_size_rtx
-+  ""
-+{
-+  riscv_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]);
-+  DONE;
-+})
-+
-+(define_insn "call_internal"
-+  [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S"))
-+	 (match_operand 1 "" ""))
-+   (clobber (reg:SI RETURN_ADDR_REGNUM))]
-+  ""
-+  { return REG_P (operands[0]) ? "jalr\t%0"
-+	   : absolute_symbolic_operand (operands[0], VOIDmode) ? "call\t%0"
-+	   : "call\t%0@"; }
-+  [(set_attr "type" "call")])
-+
-+(define_expand "call_value"
-+  [(parallel [(set (match_operand 0 "")
-+		   (call (match_operand 1 "")
-+			 (match_operand 2 "")))
-+	      (use (match_operand 3 ""))])]		;; next_arg_reg
-+  ""
-+{
-+  riscv_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]);
-+  DONE;
-+})
-+
-+;; See comment for call_internal.
-+(define_insn "call_value_internal"
-+  [(set (match_operand 0 "register_operand" "")
-+        (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
-+              (match_operand 2 "" "")))
-+   (clobber (reg:SI RETURN_ADDR_REGNUM))]
-+  ""
-+  { return REG_P (operands[1]) ? "jalr\t%1"
-+	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
-+	   : "call\t%1@"; }
-+  [(set_attr "type" "call")])
-+
-+;; See comment for call_internal.
-+(define_insn "call_value_multiple_internal"
-+  [(set (match_operand 0 "register_operand" "")
-+        (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
-+              (match_operand 2 "" "")))
-+   (set (match_operand 3 "register_operand" "")
-+	(call (mem:SI (match_dup 1))
-+	      (match_dup 2)))
-+   (clobber (reg:SI RETURN_ADDR_REGNUM))]
-+  ""
-+  { return REG_P (operands[1]) ? "jalr\t%1"
-+	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
-+	   : "call\t%1@"; }
-+  [(set_attr "type" "call")])
-+
-+;; Call subroutine returning any type.
-+
-+(define_expand "untyped_call"
-+  [(parallel [(call (match_operand 0 "")
-+		    (const_int 0))
-+	      (match_operand 1 "")
-+	      (match_operand 2 "")])]
-+  ""
-+{
-+  int i;
-+
-+  emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
-+
-+  for (i = 0; i < XVECLEN (operands[2], 0); i++)
-+    {
-+      rtx set = XVECEXP (operands[2], 0, i);
-+      riscv_emit_move (SET_DEST (set), SET_SRC (set));
-+    }
-+
-+  emit_insn (gen_blockage ());
-+  DONE;
-+})
-+
-+(define_insn "nop"
-+  [(const_int 0)]
-+  ""
-+  "nop"
-+  [(set_attr "type"	"nop")
-+   (set_attr "mode"	"none")])
-+
-+(define_insn "trap"
-+  [(trap_if (const_int 1) (const_int 0))]
-+  ""
-+  "sbreak")
-+
-+(define_insn "gpr_save"
-+  [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_SAVE)
-+   (clobber (reg:SI T0_REGNUM))
-+   (clobber (reg:SI T1_REGNUM))]
-+  ""
-+  { return riscv_output_gpr_save (INTVAL (operands[0])); })
-+
-+(define_insn "gpr_restore"
-+  [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_RESTORE)]
-+  ""
-+  "tail\t__riscv_restore_%0")
-+
-+(define_insn "gpr_restore_return"
-+  [(return)
-+   (use (match_operand 0 "pmode_register_operand" ""))
-+   (const_int 0)]
-+  ""
-+  "")
-+
-+(include "sync.md")
-+(include "peephole.md")
-+(include "generic.md")
-diff -urN empty/gcc/config/riscv/riscv-modes.def gcc-5.3.0/gcc/config/riscv/riscv-modes.def
---- empty/gcc/config/riscv/riscv-modes.def	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv-modes.def	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,26 @@
-+/* Extra machine modes for RISC-V target.
-+   Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+FLOAT_MODE (TF, 16, ieee_quad_format);
-+
-+/* Vector modes.  */
-+VECTOR_MODES (INT, 4);        /*       V8QI V4HI V2SI */
-+VECTOR_MODES (FLOAT, 4);      /*            V4HF V2SF */
-diff -urN empty/gcc/config/riscv/riscv.opt gcc-5.3.0/gcc/config/riscv/riscv.opt
---- empty/gcc/config/riscv/riscv.opt	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv.opt	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,87 @@
-+; Options for the MIPS port of the compiler
-+;
-+; Copyright (C) 2005, 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
-+;
-+; This file is part of GCC.
-+;
-+; GCC is free software; you can redistribute it and/or modify it under
-+; the terms of the GNU General Public License as published by the Free
-+; Software Foundation; either version 3, or (at your option) any later
-+; version.
-+;
-+; GCC is distributed in the hope that it will be useful, but WITHOUT
-+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
-+; License for more details.
-+;
-+; You should have received a copy of the GNU General Public License
-+; along with GCC; see the file COPYING3.  If not see
-+; <http://www.gnu.org/licenses/>.
-+
-+m32
-+Target RejectNegative Mask(32BIT)
-+Generate RV32 code
-+
-+m64
-+Target RejectNegative InverseMask(32BIT, 64BIT)
-+Generate RV64 code
-+
-+mbranch-cost=
-+Target RejectNegative Joined UInteger Var(riscv_branch_cost)
-+-mbranch-cost=COST	Set the cost of branches to roughly COST instructions
-+
-+mhard-float
-+Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI)
-+Allow the use of hardware floating-point ABI and instructions
-+
-+mmemcpy
-+Target Report Mask(MEMCPY)
-+Don't optimize block moves
-+
-+mplt
-+Target Report Var(TARGET_PLT) Init(1)
-+When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
-+
-+msoft-float
-+Target Report RejectNegative Mask(SOFT_FLOAT_ABI)
-+Prevent the use of all hardware floating-point instructions
-+
-+mno-fdiv
-+Target Report RejectNegative Mask(NO_FDIV)
-+Don't use hardware floating-point divide and square root instructions
-+
-+mfdiv
-+Target Report RejectNegative InverseMask(NO_FDIV, FDIV)
-+Use hardware floating-point divide and square root instructions
-+
-+march=
-+Target RejectNegative Joined Var(riscv_arch_string)
-+-march=			Generate code for given RISC-V ISA (e.g. RV64IM)
-+
-+mtune=
-+Target RejectNegative Joined Var(riscv_tune_string)
-+-mtune=PROCESSOR	Optimize the output for PROCESSOR
-+
-+msmall-data-limit=
-+Target Joined Separate UInteger Var(g_switch_value) Init(8)
-+-msmall-data-limit=<number>	Put global and static data smaller than <number> bytes into a special section (on some targets)
-+
-+matomic
-+Target Report Mask(ATOMIC)
-+Use hardware atomic memory instructions.
-+
-+mmuldiv
-+Target Report Mask(MULDIV)
-+Use hardware instructions for integer multiplication and division.
-+
-+mrvc
-+Target Report Mask(RVC)
-+Use compressed instruction encoding
-+
-+msave-restore
-+Target Report Mask(SAVE_RESTORE)
-+Use smaller but slower prologue and epilogue code
-+
-+mcmodel=
-+Target RejectNegative Joined Var(riscv_cmodel_string)
-+Use given RISC-V code model (medlow or medany)
-diff -urN empty/gcc/config/riscv/riscv-protos.h gcc-5.3.0/gcc/config/riscv/riscv-protos.h
---- empty/gcc/config/riscv/riscv-protos.h	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv-protos.h	2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,94 @@
-+/* Definition of RISC-V target for GNU compiler.
-+   Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+   Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3.  If not see
-+<http://www.gnu.org/licenses/>.  */
-+
-+#ifndef GCC_RISCV_PROTOS_H
-+#define GCC_RISCV_PROTOS_H
-+
-+enum riscv_symbol_type {
-+  SYMBOL_ABSOLUTE,
-+  SYMBOL_GOT_DISP,
-+  SYMBOL_TLS,
-+  SYMBOL_TLS_LE,
-+  SYMBOL_TLS_IE,
-+  SYMBOL_TLS_GD
-+};
-+#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
-+
-+enum riscv_code_model {
-+  CM_MEDLOW,
-+  CM_MEDANY,
-+  CM_PIC
-+};
-+extern enum riscv_code_model riscv_cmodel;
-+
-+extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
-+extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
-+extern int riscv_address_insns (rtx, enum machine_mode, bool);
-+extern int riscv_const_insns (rtx);
-+extern int riscv_split_const_insns (rtx);
-+extern int riscv_load_store_insns (rtx, rtx_insn *);
-+extern rtx riscv_emit_move (rtx, rtx);
-+extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
-+extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
-+extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
-+extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
-+extern bool riscv_legitimize_vector_move (enum machine_mode, rtx, rtx);
-+
-+extern rtx riscv_subword (rtx, bool);
-+extern bool riscv_split_64bit_move_p (rtx, rtx);
-+extern void riscv_split_doubleword_move (rtx, rtx);
-+extern const char *riscv_output_move (rtx, rtx);
-+extern const char *riscv_output_gpr_save (unsigned);
-+#ifdef RTX_CODE
-+extern void riscv_expand_scc (rtx *);
-+extern void riscv_expand_conditional_branch (rtx *);
-+#endif
-+extern rtx riscv_expand_call (bool, rtx, rtx, rtx);
-+extern void riscv_expand_fcc_reload (rtx, rtx, rtx);
-+extern void riscv_set_return_address (rtx, rtx);
-+extern bool riscv_expand_block_move (rtx, rtx, rtx);
-+extern void riscv_expand_synci_loop (rtx, rtx);
-+
-+extern bool riscv_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
-+					       HOST_WIDE_INT);
-+extern bool riscv_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
-+						HOST_WIDE_INT);
-+extern void riscv_order_regs_for_local_alloc (void);
-+
-+extern rtx riscv_return_addr (int, rtx);
-+extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
-+extern void riscv_expand_prologue (void);
-+extern void riscv_expand_epilogue (bool);
-+extern bool riscv_can_use_return_insn (void);
-+extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
-+
-+extern enum reg_class riscv_secondary_reload_class (enum reg_class,
-+						   enum machine_mode,
-+						   rtx, bool);
-+extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
-+
-+extern void irix_asm_output_align (FILE *, unsigned);
-+extern const char *current_section_name (void);
-+extern unsigned int current_section_flags (void);
-+
-+extern void riscv_expand_vector_init (rtx, rtx);
-+
-+#endif /* ! GCC_RISCV_PROTOS_H */
-diff -urN empty/gcc/config/riscv/sync.md gcc-5.3.0/gcc/config/riscv/sync.md
---- empty/gcc/config/riscv/sync.md	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/sync.md	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,204 @@
-+;; Machine description for RISC-V atomic operations.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+
-+;; This file is part of GCC.
-+
-+;; GCC is free software; you can redistribute it and/or modify
-+;; it under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+
-+;; GCC is distributed in the hope that it will be useful,
-+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+;; GNU General Public License for more details.
-+
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3.  If not see
-+;; <http://www.gnu.org/licenses/>.
-+
-+(define_c_enum "unspec" [
-+  UNSPEC_COMPARE_AND_SWAP
-+  UNSPEC_SYNC_OLD_OP
-+  UNSPEC_SYNC_EXCHANGE
-+  UNSPEC_ATOMIC_STORE
-+  UNSPEC_MEMORY_BARRIER
-+])
-+
-+(define_code_iterator any_atomic [plus ior xor and])
-+(define_code_attr atomic_optab
-+  [(plus "add") (ior "or") (xor "xor") (and "and")])
-+
-+;; Memory barriers.
-+
-+(define_expand "mem_thread_fence"
-+  [(match_operand:SI 0 "const_int_operand" "")] ;; model
-+  ""
-+{
-+  if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
-+    {
-+      rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
-+      MEM_VOLATILE_P (mem) = 1;
-+      emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
-+    }
-+  DONE;
-+})
-+
-+(define_insn "mem_thread_fence_1"
-+  [(set (match_operand:BLK 0 "" "")
-+	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
-+   (match_operand:SI 1 "const_int_operand" "")] ;; model
-+  ""
-+{
-+  long model = INTVAL (operands[1]);
-+
-+  switch (model)
-+    {
-+    case MEMMODEL_SEQ_CST:
-+    case MEMMODEL_SYNC_SEQ_CST:
-+    case MEMMODEL_ACQ_REL:
-+      return "fence rw,rw";
-+    case MEMMODEL_ACQUIRE:
-+    case MEMMODEL_SYNC_ACQUIRE:
-+    case MEMMODEL_CONSUME:
-+      return "fence r,rw";
-+    case MEMMODEL_RELEASE:
-+    case MEMMODEL_SYNC_RELEASE:
-+      return "fence rw,w";
-+    default:
-+      fprintf(stderr, "mem_thread_fence_1(%ld)\n", model);
-+      gcc_unreachable();
-+    }
-+})
-+
-+;; Atomic memory operations.
-+
-+;; Implement atomic stores with amoswap.  Fall back to fences for atomic loads.
-+(define_insn "atomic_store<mode>"
-+  [(set (match_operand:GPR 0 "memory_operand" "=A")
-+    (unspec_volatile:GPR
-+      [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
-+       (match_operand:SI 2 "const_int_operand")]      ;; model
-+      UNSPEC_ATOMIC_STORE))]
-+  "TARGET_ATOMIC"
-+  "amoswap.<amo>%A2 zero,%z1,%0")
-+
-+(define_insn "atomic_<atomic_optab><mode>"
-+  [(set (match_operand:GPR 0 "memory_operand" "+A")
-+	(unspec_volatile:GPR
-+	  [(any_atomic:GPR (match_dup 0)
-+		     (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
-+	   (match_operand:SI 2 "const_int_operand")] ;; model
-+	 UNSPEC_SYNC_OLD_OP))]
-+  "TARGET_ATOMIC"
-+  "amo<insn>.<amo>%A2 zero,%z1,%0")
-+
-+(define_insn "atomic_fetch_<atomic_optab><mode>"
-+  [(set (match_operand:GPR 0 "register_operand" "=&r")
-+	(match_operand:GPR 1 "memory_operand" "+A"))
-+   (set (match_dup 1)
-+	(unspec_volatile:GPR
-+	  [(any_atomic:GPR (match_dup 1)
-+		     (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
-+	   (match_operand:SI 3 "const_int_operand")] ;; model
-+	 UNSPEC_SYNC_OLD_OP))]
-+  "TARGET_ATOMIC"
-+  "amo<insn>.<amo>%A3 %0,%z2,%1")
-+
-+(define_insn "atomic_exchange<mode>"
-+  [(set (match_operand:GPR 0 "register_operand" "=&r")
-+	(unspec_volatile:GPR
-+	  [(match_operand:GPR 1 "memory_operand" "+A")
-+	   (match_operand:SI 3 "const_int_operand")] ;; model
-+	  UNSPEC_SYNC_EXCHANGE))
-+   (set (match_dup 1)
-+        (match_operand:GPR 2 "register_operand" "0"))]
-+  "TARGET_ATOMIC"
-+  "amoswap.<amo>%A3 %0,%z2,%1")
-+
-+(define_insn "atomic_cas_value_strong<mode>"
-+  [(set (match_operand:GPR 0 "register_operand" "=&r")
-+	(match_operand:GPR 1 "memory_operand" "+A"))
-+   (set (match_dup 1)
-+	(unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
-+			      (match_operand:GPR 3 "reg_or_0_operand" "rJ")
-+			      (match_operand:SI 4 "const_int_operand")  ;; mod_s
-+			      (match_operand:SI 5 "const_int_operand")] ;; mod_f
-+	 UNSPEC_COMPARE_AND_SWAP))
-+   (clobber (match_scratch:GPR 6 "=&r"))]
-+  "TARGET_ATOMIC"
-+  "1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
-+  [(set (attr "length") (const_int 16))])
-+
-+(define_expand "atomic_compare_and_swap<mode>"
-+  [(match_operand:SI 0 "register_operand" "")   ;; bool output
-+   (match_operand:GPR 1 "register_operand" "")  ;; val output
-+   (match_operand:GPR 2 "memory_operand" "")    ;; memory
-+   (match_operand:GPR 3 "reg_or_0_operand" "")  ;; expected value
-+   (match_operand:GPR 4 "reg_or_0_operand" "")  ;; desired value
-+   (match_operand:SI 5 "const_int_operand" "")  ;; is_weak
-+   (match_operand:SI 6 "const_int_operand" "")  ;; mod_s
-+   (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
-+  "TARGET_ATOMIC"
-+{
-+  emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
-+						operands[3], operands[4],
-+						operands[6], operands[7]));
-+
-+  rtx compare = operands[1];
-+  if (operands[3] != const0_rtx)
-+    {
-+      rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
-+      compare = gen_reg_rtx (<MODE>mode);
-+      emit_insn (gen_rtx_SET (VOIDmode, compare, difference));
-+    }
-+
-+  rtx eq = gen_rtx_EQ (<MODE>mode, compare, const0_rtx);
-+  rtx result = gen_reg_rtx (<MODE>mode);
-+  emit_insn (gen_rtx_SET (VOIDmode, result, eq));
-+  emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_lowpart (SImode, result)));
-+  DONE;
-+})
-+
-+(define_expand "atomic_test_and_set"
-+  [(match_operand:QI 0 "register_operand" "")     ;; bool output
-+   (match_operand:QI 1 "memory_operand" "+A")    ;; memory
-+   (match_operand:SI 2 "const_int_operand" "")]   ;; model
-+  "TARGET_ATOMIC"
-+{
-+  /* We have no QImode atomics, so use the address LSBs to form a mask,
-+     then use an aligned SImode atomic. */
-+  rtx result = operands[0];
-+  rtx mem = operands[1];
-+  rtx model = operands[2];
-+  rtx addr = force_reg (Pmode, XEXP (mem, 0));
-+
-+  rtx aligned_addr = gen_reg_rtx (Pmode);
-+  emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
-+
-+  rtx aligned_mem = change_address (mem, SImode, aligned_addr);
-+  set_mem_alias_set (aligned_mem, 0);
-+
-+  rtx offset = gen_reg_rtx (SImode);
-+  emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
-+				       GEN_INT (3)));
-+
-+  rtx tmp = gen_reg_rtx (SImode);
-+  emit_move_insn (tmp, GEN_INT (1));
-+
-+  rtx shmt = gen_reg_rtx (SImode);
-+  emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
-+
-+  rtx word = gen_reg_rtx (SImode);
-+  emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
-+
-+  tmp = gen_reg_rtx (SImode);
-+  emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
-+
-+  emit_move_insn (gen_lowpart (SImode, result),
-+		  gen_rtx_LSHIFTRT (SImode, tmp,
-+				    gen_lowpart (SImode, shmt)));
-+  DONE;
-+})
-diff -urN empty/gcc/config/riscv/t-elf gcc-5.3.0/gcc/config/riscv/t-elf
---- empty/gcc/config/riscv/t-elf	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/t-elf	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,4 @@
-+# Build the libraries for both hard and soft floating point
-+
-+MULTILIB_OPTIONS = m64/m32 msoft-float mno-atomic
-+MULTILIB_DIRNAMES = 64 32 soft-float no-atomic
-diff -urN empty/gcc/config/riscv/t-linux64 gcc-5.3.0/gcc/config/riscv/t-linux64
---- empty/gcc/config/riscv/t-linux64	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/t-linux64	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,5 @@
-+# Build the libraries for both hard and soft floating point
-+
-+MULTILIB_OPTIONS = m64/m32 msoft-float mno-atomic
-+MULTILIB_DIRNAMES = 64 32 soft-float no-atomic
-+MULTILIB_OSDIRNAMES = ../lib ../lib32 soft-float no-atomic
-diff -urN empty/libgcc/config/riscv/crti.S gcc-5.3.0/libgcc/config/riscv/crti.S
---- empty/libgcc/config/riscv/crti.S	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/crti.S	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1 @@
-+/* crti.S is empty because .init_array/.fini_array are used exclusively. */
-diff -urN empty/libgcc/config/riscv/crtn.S gcc-5.3.0/libgcc/config/riscv/crtn.S
---- empty/libgcc/config/riscv/crtn.S	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/crtn.S	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1 @@
-+/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
-diff -urN empty/libgcc/config/riscv/div.S gcc-5.3.0/libgcc/config/riscv/div.S
---- empty/libgcc/config/riscv/div.S	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/div.S	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,121 @@
-+  .text
-+  .align 2
-+
-+#ifndef __riscv64
-+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines.  */
-+# define __udivdi3 __udivsi3
-+# define __umoddi3 __umodsi3
-+# define __divdi3 __divsi3
-+# define __moddi3 __modsi3
-+#else
-+  .globl __udivsi3
-+__udivsi3:
-+  /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t.  */
-+  sll    a0, a0, 32
-+  sll    a1, a1, 32
-+  move   t0, ra
-+  jal    __udivdi3
-+  sext.w a0, a0
-+  jr     t0
-+
-+  .globl __umodsi3
-+__umodsi3:
-+  /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t.  */
-+  sll    a0, a0, 32
-+  sll    a1, a1, 32
-+  srl    a0, a0, 32
-+  srl    a1, a1, 32
-+  move   t0, ra
-+  jal    __udivdi3
-+  sext.w a0, a1
-+  jr     t0
-+
-+  .globl __modsi3
-+  __modsi3 = __moddi3
-+
-+  .globl __divsi3
-+__divsi3:
-+  /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3.  */
-+  li    t0, -1
-+  beq   a1, t0, .L20
-+#endif
-+
-+  .globl __divdi3
-+__divdi3:
-+  bltz  a0, .L10
-+  bltz  a1, .L11
-+  /* Since the quotient is positive, fall into __udivdi3.  */
-+
-+  .globl __udivdi3
-+__udivdi3:
-+  mv    a2, a1
-+  mv    a1, a0
-+  li    a0, -1
-+  beqz  a2, .L5
-+  li    a3, 1
-+  bgeu  a2, a1, .L2
-+.L1:
-+  blez  a2, .L2
-+  slli  a2, a2, 1
-+  slli  a3, a3, 1
-+  bgtu  a1, a2, .L1
-+.L2:
-+  li    a0, 0
-+.L3:
-+  bltu  a1, a2, .L4
-+  sub   a1, a1, a2
-+  or    a0, a0, a3
-+.L4:
-+  srli  a3, a3, 1
-+  srli  a2, a2, 1
-+  bnez  a3, .L3
-+.L5:
-+  ret
-+
-+  .globl __umoddi3
-+__umoddi3:
-+  /* Call __udivdi3(a0, a1), then return the remainder, which is in a1.  */
-+  move  t0, ra
-+  jal   __udivdi3
-+  move  a0, a1
-+  jr    t0
-+
-+  /* Handle negative arguments to __divdi3.  */
-+.L10:
-+  neg   a0, a0 
-+  bgez  a1, .L12      /* Compute __udivdi3(-a0, a1), then negate the result.  */
-+  neg   a1, a1
-+  j     __divdi3      /* Compute __udivdi3(-a0, -a1).  */
-+.L11:                 /* Compute __udivdi3(a0, -a1), then negate the result.  */
-+  neg   a1, a1
-+.L12:
-+  move  t0, ra
-+  jal   __divdi3
-+  neg   a0, a0
-+  jr    t0
-+
-+  .globl __moddi3
-+__moddi3:
-+  move   t0, ra
-+  bltz   a1, .L31
-+  bltz   a0, .L32
-+.L30:
-+  jal    __udivdi3    /* The dividend is not negative.  */
-+  move   a0, a1
-+  jr     t0
-+.L31:
-+  neg    a1, a1
-+  bgez   a0, .L30
-+.L32:
-+  neg    a0, a0
-+  jal    __udivdi3    /* The dividend is hella negative.  */
-+  neg    a0, a1
-+  jr     t0
-+
-+#ifdef __riscv64
-+  /* continuation of __divsi3 */
-+.L20:
-+  sll   t0, t0, 31
-+  bne   a0, t0, __divdi3
-+  ret
-+#endif
-diff -urN empty/libgcc/config/riscv/muldi3.S gcc-5.3.0/libgcc/config/riscv/muldi3.S
---- empty/libgcc/config/riscv/muldi3.S	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/muldi3.S	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,21 @@
-+  .text
-+  .align 2
-+
-+#ifndef __riscv64
-+/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine.  */
-+# define __muldi3 __mulsi3
-+#endif
-+
-+  .globl __muldi3
-+__muldi3:
-+  mv     a2, a0
-+  li     a0, 0
-+.L1:
-+  andi   a3, a1, 1
-+  beqz   a3, .L2
-+  add    a0, a0, a2
-+.L2:
-+  srli   a1, a1, 1
-+  slli   a2, a2, 1
-+  bnez   a1, .L1
-+  ret
-diff -urN empty/libgcc/config/riscv/multi3.S gcc-5.3.0/libgcc/config/riscv/multi3.S
---- empty/libgcc/config/riscv/multi3.S	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/multi3.S	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,56 @@
-+  .text
-+  .align 2
-+
-+#ifndef __riscv64
-+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines.  */
-+# define __multi3 __muldi3
-+#endif
-+
-+  .globl __multi3
-+__multi3:
-+
-+#ifndef __riscv64
-+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines.  */
-+# define __muldi3 __mulsi3
-+#endif
-+
-+/* We rely on the fact that __muldi3 doesn't clobber the t-registers.  */
-+
-+  mv  t0, ra
-+  mv  t5, a0
-+  mv  a0, a1
-+  mv  t6, a3
-+  mv  a1, t5
-+  mv  a4, a2
-+  li  a5, 0
-+  li  t2, 0
-+  li  t4, 0
-+.L1:
-+  add  a6, t2, a1
-+  andi t3, a4, 1
-+  slli a7, a5, 1
-+  slti t1, a1, 0
-+  srli a4, a4, 1
-+  add  a5, t4, a5
-+  beqz t3, .L2
-+  sltu t3, a6, t2
-+  mv   t2, a6
-+  add  t4, t3, a5
-+.L2:
-+  slli a1, a1, 1
-+  or   a5, t1, a7
-+  bnez a4, .L1
-+  beqz a0, .L3
-+  mv   a1, a2
-+  call __muldi3
-+  add  t4, t4, a0
-+.L3:
-+  beqz t6, .L4
-+  mv   a1, t6
-+  mv   a0, t5
-+  call  __muldi3
-+  add  t4, t4, a0
-+.L4:
-+  mv  a0, t2
-+  mv  a1, t4
-+  jr  t0
-diff -urN empty/libgcc/config/riscv/riscv-fp.c gcc-5.3.0/libgcc/config/riscv/riscv-fp.c
---- empty/libgcc/config/riscv/riscv-fp.c	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/riscv-fp.c	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,178 @@
-+/* Functions needed for soft-float on riscv-linux.  Based on
-+   rs6000/ppc64-fp.c with TF types removed.
-+ 
-+   Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-+   2000, 2001, 2002, 2003, 2004, 2006, 2009  Free Software Foundation,
-+   Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify it under
-+the terms of the GNU General Public License as published by the Free
-+Software Foundation; either version 3, or (at your option) any later
-+version.
-+
-+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-+WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-+for more details.
-+
-+Under Section 7 of GPL version 3, you are granted additional
-+permissions described in the GCC Runtime Library Exception, version
-+3.1, as published by the Free Software Foundation.
-+
-+You should have received a copy of the GNU General Public License and
-+a copy of the GCC Runtime Library Exception along with this program;
-+see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
-+<http://www.gnu.org/licenses/>.  */
-+
-+#if defined(__riscv64)
-+#include "fp-bit.h"
-+
-+extern DItype __fixdfdi (DFtype);
-+extern DItype __fixsfdi (SFtype);
-+extern USItype __fixunsdfsi (DFtype);
-+extern USItype __fixunssfsi (SFtype);
-+extern DFtype __floatdidf (DItype);
-+extern DFtype __floatundidf (UDItype);
-+extern SFtype __floatdisf (DItype);
-+extern SFtype __floatundisf (UDItype);
-+
-+static DItype local_fixunssfdi (SFtype);
-+static DItype local_fixunsdfdi (DFtype);
-+
-+DItype
-+__fixdfdi (DFtype a)
-+{
-+  if (a < 0)
-+    return - local_fixunsdfdi (-a);
-+  return local_fixunsdfdi (a);
-+}
-+
-+DItype
-+__fixsfdi (SFtype a)
-+{
-+  if (a < 0)
-+    return - local_fixunssfdi (-a);
-+  return local_fixunssfdi (a);
-+}
-+
-+USItype
-+__fixunsdfsi (DFtype a)
-+{
-+  if (a >= - (DFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
-+    return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
-+                       - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
-+  return (SItype) a;
-+}
-+
-+USItype
-+__fixunssfsi (SFtype a)
-+{
-+  if (a >= - (SFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
-+    return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
-+                       - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
-+  return (SItype) a;
-+}
-+
-+DFtype
-+__floatdidf (DItype u)
-+{
-+  DFtype d;
-+
-+  d = (SItype) (u >> (sizeof (SItype) * 8));
-+  d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
-+  d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
-+
-+  return d;
-+}
-+
-+DFtype
-+__floatundidf (UDItype u)
-+{
-+  DFtype d;
-+
-+  d = (USItype) (u >> (sizeof (SItype) * 8));
-+  d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
-+  d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
-+
-+  return d;
-+}
-+
-+SFtype
-+__floatdisf (DItype u)
-+{
-+  DFtype f;
-+
-+  if (53 < (sizeof (DItype) * 8)
-+      && 53 > ((sizeof (DItype) * 8) - 53 + 24))
-+    {
-+      if (! (- ((DItype) 1 << 53) < u
-+             && u < ((DItype) 1 << 53)))
-+        {
-+          if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
-+            {
-+              u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
-+              u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
-+            }
-+        }
-+    }
-+  f = (SItype) (u >> (sizeof (SItype) * 8));
-+  f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
-+  f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
-+
-+  return (SFtype) f;
-+}
-+
-+SFtype
-+__floatundisf (UDItype u)
-+{
-+  DFtype f;
-+
-+  if (53 < (sizeof (DItype) * 8)
-+      && 53 > ((sizeof (DItype) * 8) - 53 + 24))
-+    {
-+      if (u >= ((UDItype) 1 << 53))
-+        {
-+          if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
-+            {
-+              u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
-+              u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
-+            }
-+        }
-+    }
-+  f = (USItype) (u >> (sizeof (SItype) * 8));
-+  f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
-+  f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
-+
-+  return (SFtype) f;
-+}
-+
-+/* This version is needed to prevent recursion; fixunsdfdi in libgcc
-+   calls fixdfdi, which in turn calls calls fixunsdfdi.  */
-+
-+static DItype
-+local_fixunsdfdi (DFtype a)
-+{
-+  USItype hi, lo;
-+
-+  hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
-+  lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
-+  return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
-+}
-+
-+/* This version is needed to prevent recursion; fixunssfdi in libgcc
-+   calls fixsfdi, which in turn calls calls fixunssfdi.  */
-+
-+static DItype
-+local_fixunssfdi (SFtype original_a)
-+{
-+  DFtype a = original_a;
-+  USItype hi, lo;
-+
-+  hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
-+  lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
-+  return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
-+}
-+
-+#endif
-diff -urN empty/libgcc/config/riscv/save-restore.S gcc-5.3.0/libgcc/config/riscv/save-restore.S
---- empty/libgcc/config/riscv/save-restore.S	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/save-restore.S	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,220 @@
-+  .text
-+
-+  .globl __riscv_save_12
-+  .globl __riscv_save_11
-+  .globl __riscv_save_10
-+  .globl __riscv_save_9
-+  .globl __riscv_save_8
-+  .globl __riscv_save_7
-+  .globl __riscv_save_6
-+  .globl __riscv_save_5
-+  .globl __riscv_save_4
-+  .globl __riscv_save_3
-+  .globl __riscv_save_2
-+  .globl __riscv_save_1
-+  .globl __riscv_save_0
-+
-+  .globl __riscv_restore_12
-+  .globl __riscv_restore_11
-+  .globl __riscv_restore_10
-+  .globl __riscv_restore_9
-+  .globl __riscv_restore_8
-+  .globl __riscv_restore_7
-+  .globl __riscv_restore_6
-+  .globl __riscv_restore_5
-+  .globl __riscv_restore_4
-+  .globl __riscv_restore_3
-+  .globl __riscv_restore_2
-+  .globl __riscv_restore_1
-+  .globl __riscv_restore_0
-+
-+#ifdef __riscv64
-+
-+__riscv_save_12:
-+  addi sp, sp, -112
-+  li t1, 0
-+  sd s11, 8(sp)
-+  j .Ls10
-+
-+__riscv_save_11:
-+__riscv_save_10:
-+  addi sp, sp, -112
-+  li t1, -16
-+.Ls10:
-+  sd s10, 16(sp)
-+  sd s9, 24(sp)
-+  j .Ls8
-+
-+__riscv_save_9:
-+__riscv_save_8:
-+  addi sp, sp, -112
-+  li t1, -32
-+.Ls8:
-+  sd s8, 32(sp)
-+  sd s7, 40(sp)
-+  j .Ls6
-+
-+__riscv_save_7:
-+__riscv_save_6:
-+  addi sp, sp, -112
-+  li t1, -48
-+.Ls6:
-+  sd s6, 48(sp)
-+  sd s5, 56(sp)
-+  j .Ls4
-+
-+__riscv_save_5:
-+__riscv_save_4:
-+  addi sp, sp, -112
-+  li t1, -64
-+.Ls4:
-+  sd s4, 64(sp)
-+  sd s3, 72(sp)
-+  j .Ls2
-+
-+__riscv_save_3:
-+__riscv_save_2:
-+  addi sp, sp, -112
-+  li t1, -80
-+.Ls2:
-+  sd s2, 80(sp)
-+  sd s1, 88(sp)
-+  sd s0, 96(sp)
-+  sd ra, 104(sp)
-+  sub sp, sp, t1
-+  jr t0
-+
-+__riscv_save_1:
-+__riscv_save_0:
-+  addi sp, sp, -16
-+  sd s0, 0(sp)
-+  sd ra, 8(sp)
-+  jr t0
-+
-+__riscv_restore_12:
-+  ld s11, 8(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_11:
-+__riscv_restore_10:
-+  ld s10, 0(sp)
-+  ld s9, 8(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_9:
-+__riscv_restore_8:
-+  ld s8, 0(sp)
-+  ld s7, 8(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_7:
-+__riscv_restore_6:
-+  ld s6, 0(sp)
-+  ld s5, 8(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_5:
-+__riscv_restore_4:
-+  ld s4, 0(sp)
-+  ld s3, 8(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_3:
-+__riscv_restore_2:
-+  ld s2, 0(sp)
-+  ld s1, 8(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_1:
-+__riscv_restore_0:
-+  ld s0, 0(sp)
-+  ld ra, 8(sp)
-+  addi sp, sp, 16
-+  ret
-+
-+#else
-+
-+__riscv_save_12:
-+  addi sp, sp, -64
-+  li t1, 0
-+  sw s11, 12(sp)
-+  j .Ls10
-+
-+__riscv_save_11:
-+__riscv_save_10:
-+__riscv_save_9:
-+__riscv_save_8:
-+  addi sp, sp, -64
-+  li t1, -16
-+.Ls10:
-+  sw s10, 16(sp)
-+  sw s9, 20(sp)
-+  sw s8, 24(sp)
-+  sw s7, 28(sp)
-+  j .Ls6
-+
-+__riscv_save_7:
-+__riscv_save_6:
-+__riscv_save_5:
-+__riscv_save_4:
-+  addi sp, sp, -64
-+  li t1, -32
-+.Ls6:
-+  sw s6, 32(sp)
-+  sw s5, 36(sp)
-+  sw s4, 40(sp)
-+  sw s3, 44(sp)
-+  sw s2, 48(sp)
-+  sw s1, 52(sp)
-+  sw s0, 56(sp)
-+  sw ra, 60(sp)
-+  sub sp, sp, t1
-+  jr t0
-+
-+__riscv_save_3:
-+__riscv_save_2:
-+__riscv_save_1:
-+__riscv_save_0:
-+  addi sp, sp, -16
-+  sw s2, 0(sp)
-+  sw s1, 4(sp)
-+  sw s0, 8(sp)
-+  sw ra, 12(sp)
-+  jr t0
-+
-+__riscv_restore_12:
-+  lw s11, 12(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_11:
-+__riscv_restore_10:
-+__riscv_restore_9:
-+__riscv_restore_8:
-+  lw s10, 0(sp)
-+  lw s9, 4(sp)
-+  lw s8, 8(sp)
-+  lw s7, 12(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_7:
-+__riscv_restore_6:
-+__riscv_restore_5:
-+__riscv_restore_4:
-+  lw s6, 0(sp)
-+  lw s5, 4(sp)
-+  lw s4, 8(sp)
-+  lw s3, 12(sp)
-+  addi sp, sp, 16
-+
-+__riscv_restore_3:
-+__riscv_restore_2:
-+__riscv_restore_1:
-+__riscv_restore_0:
-+  lw s2, 0(sp)
-+  lw s1, 4(sp)
-+  lw s0, 8(sp)
-+  lw ra, 12(sp)
-+  addi sp, sp, 16
-+  ret
-+
-+#endif
-diff -urN empty/libgcc/config/riscv/t-dpbit gcc-5.3.0/libgcc/config/riscv/t-dpbit
---- empty/libgcc/config/riscv/t-dpbit	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-dpbit	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,4 @@
-+LIB2ADD += dp-bit.c
-+
-+dp-bit.c: $(srcdir)/fp-bit.c
-+	cat $(srcdir)/fp-bit.c > dp-bit.c
-diff -urN empty/libgcc/config/riscv/t-elf gcc-5.3.0/libgcc/config/riscv/t-elf
---- empty/libgcc/config/riscv/t-elf	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-elf	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,5 @@
-+LIB2ADD += $(srcdir)/config/riscv/riscv-fp.c \
-+	   $(srcdir)/config/riscv/save-restore.S \
-+	   $(srcdir)/config/riscv/muldi3.S \
-+	   $(srcdir)/config/riscv/multi3.S \
-+	   $(srcdir)/config/riscv/div.S
-diff -urN empty/libgcc/config/riscv/t-elf32 gcc-5.3.0/libgcc/config/riscv/t-elf32
---- empty/libgcc/config/riscv/t-elf32	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-elf32	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,4 @@
-+LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
-+
-+HOST_LIBGCC2_CFLAGS += -m32
-+CRTSTUFF_CFLAGS += -m32
-diff -urN empty/libgcc/config/riscv/t-elf64 gcc-5.3.0/libgcc/config/riscv/t-elf64
---- empty/libgcc/config/riscv/t-elf64	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-elf64	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,2 @@
-+LIB2FUNCS_EXCLUDE += _divdi3 _moddi3 _udivdi3 _umoddi3 _muldi3 _multi3 \
-+		     _divsi3 _modsi3 _udivsi3 _umodsi3 \
-diff -urN empty/libgcc/config/riscv/t-fpbit gcc-5.3.0/libgcc/config/riscv/t-fpbit
---- empty/libgcc/config/riscv/t-fpbit	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-fpbit	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,5 @@
-+LIB2ADD += fp-bit.c
-+
-+fp-bit.c: $(srcdir)/fp-bit.c
-+	echo '#define FLOAT' > fp-bit.c
-+	cat $(srcdir)/fp-bit.c >> fp-bit.c
-diff -urN empty/libgcc/config/riscv/t-tpbit gcc-5.3.0/libgcc/config/riscv/t-tpbit
---- empty/libgcc/config/riscv/t-tpbit	1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-tpbit	2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,10 @@
-+LIB2ADD += tp-bit.c
-+
-+tp-bit.c: $(srcdir)/fp-bit.c
-+	echo '#ifdef _RISCVEL' > tp-bit.c
-+	echo '# define FLOAT_BIT_ORDER_MISMATCH' >> tp-bit.c
-+	echo '#endif' >> tp-bit.c
-+	echo '#if __LDBL_MANT_DIG__ == 113' >> tp-bit.c
-+	echo '# define TFLOAT' >> tp-bit.c
-+	cat $(srcdir)/fp-bit.c >> tp-bit.c
-+	echo '#endif' >> tp-bit.c
diff --git a/util/crossgcc/patches/gcc-6.3.0_elf_biarch.patch b/util/crossgcc/patches/gcc-6.3.0_elf_biarch.patch
new file mode 100644
index 0000000..226aed9
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.3.0_elf_biarch.patch
@@ -0,0 +1,87 @@
+diff -urN gcc-4.9.2/gcc/config/i386/t-elf64 gcc-4.9.2/gcc/config/i386/t-elf64
+--- gcc-4.9.2/gcc/config/i386/t-elf64	1969-12-31 16:00:00.000000000 -0800
++++ gcc-6.1.0/gcc/config/i386/t-elf64	2015-06-17 11:20:08.032513005 -0700
+@@ -0,0 +1,38 @@
++# Copyright (C) 2002-2014 Free Software Foundation, Inc.
++#
++# This file is part of GCC.
++#
++# GCC is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3, or (at your option)
++# any later version.
++#
++# GCC is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# <http://www.gnu.org/licenses/>.
++
++# On Debian, Ubuntu and other derivative distributions, the 32bit libraries
++# are found in /lib32 and /usr/lib32, /lib64 and /usr/lib64 are symlinks to
++# /lib and /usr/lib, while other distributions install libraries into /lib64
++# and /usr/lib64.  The LSB does not enforce the use of /lib64 and /usr/lib64,
++# it doesn't tell anything about the 32bit libraries on those systems.  Set
++# MULTILIB_OSDIRNAMES according to what is found on the target.
++
++# To support i386, x86-64 and x32 libraries, the directory structrue
++# should be:
++#
++# 	/lib has i386 libraries.
++# 	/lib64 has x86-64 libraries.
++# 	/libx32 has x32 libraries.
++#
++comma=,
++MULTILIB_OPTIONS    = $(subst $(comma),/,$(TM_MULTILIB_CONFIG))
++MULTILIB_DIRNAMES   = $(patsubst m%, %, $(subst /, ,$(MULTILIB_OPTIONS)))
++MULTILIB_OSDIRNAMES = m64=../lib64$(call if_multiarch,:x86_64-elf)
++MULTILIB_OSDIRNAMES+= m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:i386-elf)
++MULTILIB_OSDIRNAMES+= mx32=../libx32$(call if_multiarch,:x86_64-elf-x32)
+diff -urN gcc-4.9.2/gcc/config.gcc gcc-4.9.2/gcc/config.gcc
+--- gcc-4.9.2/gcc/config.gcc	2015-06-17 11:20:57.841008182 -0700
++++ gcc-6.1.0/gcc/config.gcc	2015-06-17 11:17:24.818890200 -0700
+@@ -1353,6 +1353,30 @@
+ 	;;
+ x86_64-*-elf*)
+ 	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h"
++	tmake_file="${tmake_file} i386/t-elf64"
++	x86_multilibs="${with_multilib_list}"
++	if test "$x86_multilibs" = "default"; then
++		case ${with_abi} in
++		x32 | mx32)
++			x86_multilibs="mx32"
++			;;
++		*)
++			x86_multilibs="m64,m32"
++			;;
++		esac
++	fi
++	x86_multilibs=`echo $x86_multilibs | sed -e 's/,/ /g'`
++	for x86_multilib in ${x86_multilibs}; do
++		case ${x86_multilib} in
++		m32 | m64 | mx32)
++			TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG},${x86_multilib}"
++			;;
++		*)
++			echo "--with-multilib-list=${x86_with_multilib} not supported."
++			exit 1
++		esac
++	done
++	TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'`
+ 	;;
+ i[34567]86-*-rdos*)
+     tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/rdos.h"
+--- gcc-6.1.0/gcc/config/i386/x86-64.h.orig	2015-08-20 17:17:34.555919593 +0200
++++ gcc-6.1.0/gcc/config/i386/x86-64.h	2015-08-20 17:17:42.615908670 +0200
+@@ -49,7 +49,7 @@
+ #define WCHAR_TYPE_SIZE 32
+ 
+ #undef ASM_SPEC
+-#define ASM_SPEC "%{m32:--32} %{m64:--64} %{mx32:--x32}"
++#define ASM_SPEC "%{m16|m32:--32} %{m64:--64} %{mx32:--x32}"
+ 
+ #undef ASM_OUTPUT_ALIGNED_BSS
+ #define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
diff --git a/util/crossgcc/patches/gcc-6.3.0_gnat.patch b/util/crossgcc/patches/gcc-6.3.0_gnat.patch
new file mode 100644
index 0000000..ac1e26a
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.3.0_gnat.patch
@@ -0,0 +1,11 @@
+--- gcc-6.1.0/gcc/ada/gcc-interface/Make-lang.in.bak	2015-08-24 16:23:25.004493665 +0200
++++ gcc-6.1.0/gcc/ada/gcc-interface/Make-lang.in	2015-08-24 17:53:52.496636113 +0200
+@@ -45,7 +45,7 @@
+ 

+ 
+ # Extra flags to pass to recursive makes.
+-COMMON_ADAFLAGS= -gnatpg
++COMMON_ADAFLAGS= -gnatpg -gnatwG
+ ifeq ($(TREECHECKING),)
+ CHECKING_ADAFLAGS=
+ else
diff --git a/util/crossgcc/patches/gcc-6.3.0_libgcc.patch b/util/crossgcc/patches/gcc-6.3.0_libgcc.patch
new file mode 100644
index 0000000..1b0b8a4
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.3.0_libgcc.patch
@@ -0,0 +1,57 @@
+diff -urN gcc-5.2.0.orig/libgcc/config/t-hardfp gcc-5.2.0/libgcc/config/t-hardfp
+--- gcc-5.2.0.orig/libgcc/config/t-hardfp	2015-01-05 04:33:28.000000000 -0800
++++ gcc-6.1.0/libgcc/config/t-hardfp	2016-04-06 12:04:51.000000000 -0700
+@@ -59,21 +59,52 @@
+ 
+ hardfp_func_list := $(filter-out $(hardfp_exclusions),$(hardfp_func_list))
+ 
++HOST_OS ?= $(shell uname)
++
+ # Regexp for matching a floating-point mode.
++ifeq ($(HOST_OS), Darwin)
++hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /|/g')
++else
++ifeq ($(HOST_OS), FreeBSD)
++hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /|/g')
++else
+ hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /\\|/g')
++endif
++endif
+ 
+ # Regexp for matching the end of a function name, after the last
+ # floating-point mode.
++ifeq ($(HOST_OS), Darwin)
++hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /|/g')
++else
++ifeq ($(HOST_OS), FreeBSD)
++hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /|/g')
++else
+ hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /\\|/g')
++endif
++endif
+ 
+ # Add -D options to define:
+ #   FUNC: the function name (e.g. __addsf3)
+ #   OP:   the function name without the leading __ and with the last
+ #            floating-point mode removed (e.g. add3)
+ #   TYPE: the last floating-point mode (e.g. sf)
++
++ifeq ($(HOST_OS), Darwin)
+ hardfp_defines_for = \
+   $(shell echo $1 | \
+-    sed 's/\(.*\)\($(hardfp_mode_regexp)\)\($(hardfp_suffix_regexp)\|\)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
++    sed -E 's/(.*)($(hardfp_mode_regexp))($(hardfp_suffix_regexp)|.*)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
++else
++ifeq ($(HOST_OS), FreeBSD)
++hardfp_defines_for = \
++  $(shell echo $1 | \
++    sed -r 's/(.*)($(hardfp_mode_regexp))($(hardfp_suffix_regexp)|.*)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
++else
++hardfp_defines_for = \
++  $(shell echo $1 | \
++    sed 's/\(.*\)\($(hardfp_mode_regexp)\)\($(hardfp_suffix_regexp)\|\)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
++endif
++endif
+ 
+ hardfp-o = $(patsubst %,%$(objext),$(hardfp_func_list))
+ $(hardfp-o): %$(objext): $(srcdir)/config/hardfp.c
diff --git a/util/crossgcc/patches/gcc-6.3.0_nds32.patch b/util/crossgcc/patches/gcc-6.3.0_nds32.patch
new file mode 100644
index 0000000..cdfb02f
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.3.0_nds32.patch
@@ -0,0 +1,17 @@
+diff -urN gcc-6.1.0.orig/gcc/config/nds32/nds32.md gcc-6.1.0/gcc/config/nds32/nds32.md
+--- gcc-6.1.0.orig/gcc/config/nds32/nds32.md	2015-01-15 22:45:09.000000000 -0800
++++ gcc-6.1.0/gcc/config/nds32/nds32.md	2016-04-14 22:09:09.000000000 -0700
+@@ -2289,11 +2289,11 @@
+   emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2],
+ 				  operands[4]));
+ 
+-  operands[5] = gen_reg_rtx (SImode);
++  rtx tmp = gen_reg_rtx (SImode);
+   /* Step C, D, E, and F, using another temporary register operands[5].  */
+   emit_jump_insn (gen_casesi_internal (operands[0],
+ 				       operands[3],
+-				       operands[5]));
++				       tmp));
+   DONE;
+ })
+ 
diff --git a/util/crossgcc/patches/gcc-6.3.0_riscv.patch b/util/crossgcc/patches/gcc-6.3.0_riscv.patch
new file mode 100644
index 0000000..b0e44b0
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.3.0_riscv.patch
@@ -0,0 +1,10428 @@
+diff --git original-gcc/gcc/common/config/riscv/riscv-common.c gcc-6.2.0/gcc/common/config/riscv/riscv-common.c
+new file mode 100644
+index 0000000..bb3b5c8
+--- /dev/null
++++ gcc-6.2.0/gcc/common/config/riscv/riscv-common.c
+@@ -0,0 +1,172 @@
++/* Common hooks for RISC-V.
++   Copyright (C) 1989-2014 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "common/common-target.h"
++#include "common/common-target-def.h"
++#include "opts.h"
++#include "flags.h"
++#include "errors.h"
++
++/* Parse a RISC-V ISA string into an option mask.  */
++
++static void
++riscv_parse_arch_string (const char *isa, int *flags)
++{
++  const char *p = isa;
++
++  if (strncasecmp (p, "RV32", 4) == 0)
++    *flags |= MASK_32BIT, p += 4;
++  else if (strncasecmp (p, "RV64", 4) == 0)
++    *flags &= ~MASK_32BIT, p += 4;
++  else if (strncasecmp (p, "RV", 2) == 0)
++    p += 2;
++
++  if (TOUPPER (*p) == 'G')
++    {
++      p++;
++
++      *flags |= MASK_MUL | MASK_DIV;
++      *flags |= MASK_ATOMIC;
++      *flags |= MASK_HARD_FLOAT;
++      *flags |= MASK_DOUBLE_FLOAT;
++    }
++  else if (TOUPPER (*p) == 'I')
++    {
++      p++;
++
++      *flags &= ~(MASK_MUL | MASK_DIV);
++      if (TOUPPER (*p) == 'M')
++	*flags |= (MASK_MUL | MASK_DIV), p++;
++
++      *flags &= ~MASK_ATOMIC;
++      if (TOUPPER (*p) == 'A')
++	*flags |= MASK_ATOMIC, p++;
++
++      *flags &= ~MASK_HARD_FLOAT;
++      if (TOUPPER (*p) == 'F')
++	{
++	  *flags |= MASK_HARD_FLOAT, p++;
++
++	  *flags &= ~MASK_DOUBLE_FLOAT;
++	  if (TOUPPER (*p) == 'D')
++	    {
++	      *flags |= MASK_DOUBLE_FLOAT;
++	      p++;
++	    }
++	}
++    }
++  else
++    {
++      error ("-march=%s: invalid ISA string", isa);
++      return;
++    }
++
++  *flags &= ~MASK_RVC;
++  if (TOUPPER (*p) == 'C')
++    *flags |= MASK_RVC, p++;
++
++  /* FIXME: For now we just stop parsing when faced with a
++     non-standard RISC-V ISA extension.  We might consider
++     ignoring it and passing it through to the assembler.  */
++  if (TOUPPER (*p) == 'X')
++    return;
++
++  if (*p)
++    {
++      error ("-march=%s: unsupported ISA substring %s", isa, p);
++      return;
++    }
++}
++
++static int
++riscv_flags_from_arch_string (const char *isa)
++{
++  int flags = 0;
++  riscv_parse_arch_string (isa, &flags);
++  return flags;
++}
++
++/* Implement TARGET_HANDLE_OPTION.  */
++
++static bool
++riscv_handle_option (struct gcc_options *opts,
++		     struct gcc_options *opts_set ATTRIBUTE_UNUSED,
++		     const struct cl_decoded_option *decoded,
++		     location_t loc ATTRIBUTE_UNUSED)
++{
++  switch (decoded->opt_index)
++    {
++    case OPT_march_:
++      riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
++      return true;
++
++    case OPT_mmuldiv:
++      if (decoded->value)
++	opts->x_target_flags |= (MASK_MUL | MASK_DIV);
++      else
++	opts->x_target_flags &= ~(MASK_MUL | MASK_DIV);
++      return true;
++
++    case OPT_mno_float:
++      opts->x_target_flags &= ~(MASK_HARD_FLOAT | MASK_DOUBLE_FLOAT);
++      return true;
++
++    case OPT_msingle_float:
++      /* In addition to enabling the F extension, disable the D extension.  */
++      opts->x_target_flags &= ~MASK_DOUBLE_FLOAT;
++      return true;
++
++    case OPT_mdouble_float:
++      opts->x_target_flags |= MASK_HARD_FLOAT;
++      return true;
++
++    default:
++      return true;
++    }
++}
++
++/* Implement TARGET_OPTION_OPTIMIZATION_TABLE.  */
++static const struct default_options riscv_option_optimization_table[] =
++  {
++    { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
++    { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
++    { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 },
++    { OPT_LEVELS_NONE, 0, NULL, 0 }
++  };
++
++#undef TARGET_OPTION_OPTIMIZATION_TABLE
++#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
++
++#define STR(x) #x
++#define XSTR(x) STR (x)
++
++#undef TARGET_DEFAULT_TARGET_FLAGS
++#define TARGET_DEFAULT_TARGET_FLAGS					\
++  (TARGET_DEFAULT							\
++   | riscv_flags_from_arch_string (XSTR (TARGET_ARCH_STRING_DEFAULT))	\
++   | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT))
++
++#undef TARGET_HANDLE_OPTION
++#define TARGET_HANDLE_OPTION riscv_handle_option
++
++struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
+diff --git original-gcc/gcc/config.gcc gcc-6.2.0/gcc/config.gcc
+index 82cc9a9..b797239 100644
+--- original-gcc/gcc/config.gcc
++++ gcc-6.2.0/gcc/config.gcc
+@@ -453,6 +453,9 @@ powerpc*-*-*)
+ 	esac
+ 	extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
+ 	;;
++riscv*)
++	cpu_type=riscv
++	;;
+ rs6000*-*-*)
+ 	extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
+ 	;;
+@@ -2028,6 +2031,20 @@ microblaze*-*-elf)
+ 	cxx_target_objs="${cxx_target_objs} microblaze-c.o"
+ 	tmake_file="${tmake_file} microblaze/t-microblaze"
+         ;;
++riscv*-*-linux*)
++	tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h"
++	tmake_file="${tmake_file} riscv/t-linux"
++	gnu_ld=yes
++	gas=yes
++	gcc_cv_initfini_array=yes
++	;;
++riscv*-*-elf*)
++	tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
++	tmake_file="${tmake_file} riscv/t-elf"
++	gnu_ld=yes
++	gas=yes
++	gcc_cv_initfini_array=yes
++	;;
+ mips*-*-netbsd*)			# NetBSD/mips, either endian.
+ 	target_cpu_default="MASK_ABICALLS"
+ 	tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
+@@ -3959,6 +3976,81 @@ case "${target}" in
+ 		done
+ 		;;
+ 
++	riscv*-*-*)
++		supported_defaults="arch float tune"
++
++		case "${with_arch}" in
++		"")
++			with_arch="G"
++			;;
++		*)
++			;;
++		esac
++
++		# Handle --with-float, or default to soft-float ABI unless the
++		# D extension is present; then, default to double-float ABI.
++		case "${with_float}" in
++		"")
++			case ${with_arch} in
++				"" | *g* | *G* | *d* | *D*)
++					with_float=double
++					;;
++				*)
++					with_float=soft
++					;;
++			esac
++			;;
++		soft | single | double)
++			# OK
++			;;
++		*)
++			echo "Unknown floating point type used in --with-float-abi=$with_float" 1>&2
++			exit 1
++			;;
++		esac
++
++		# Set TARGET_64BIT_DEFAULT from --target.
++		case "${target}" in
++		riscv32*)
++			rv64=0
++			;;
++		riscv64*)
++			rv64=1
++			;;
++		*)
++			rv64=""
++			;;
++		esac
++
++		# Or set TARGET_64BIT_DEFAULT from --with-arch.
++		case "`echo $with_arch | tr A-Z_ a-z-`" in
++		rv32)
++			if test "$rv64" = 1; then
++				echo "--with-arch and --target specify conflicting XLEN"
++				exit 1
++			fi
++			rv64=0
++			;;
++		rv64)
++			if test "$rv64" = 0; then
++				echo "--with-arch and --target specify conflicting XLEN"
++				exit 1
++			fi
++			rv64=1
++			;;
++		*)
++			;;
++		esac
++
++		# Or set TARGET_64BIT_DEFAULT to 1.
++		if test "$rv64" = ""; then
++			rv64=1
++		fi
++
++		tm_defines="${tm_defines} TARGET_64BIT_DEFAULT=${rv64}"
++		tm_defines="${tm_defines} TARGET_ARCH_STRING_DEFAULT=${with_arch}"
++		;;
++
+ 	mips*-*-*)
+ 		supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci"
+ 
+diff --git original-gcc/gcc/config/riscv/constraints.md gcc-6.2.0/gcc/config/riscv/constraints.md
+new file mode 100644
+index 0000000..19dbbd7
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/constraints.md
+@@ -0,0 +1,93 @@
++;; Constraint definitions for RISC-V target.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; <http://www.gnu.org/licenses/>.
++
++;; Register constraints
++
++(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
++  "A floating-point register (if available).")
++
++(define_register_constraint "b" "ALL_REGS"
++  "@internal")
++
++(define_register_constraint "j" "T_REGS"
++  "@internal")
++
++(define_register_constraint "l" "JALR_REGS"
++  "@internal")
++
++;; Integer constraints
++
++(define_constraint "Z"
++  "@internal"
++  (and (match_code "const_int")
++       (match_test "1")))
++
++(define_constraint "I"
++  "An I-type 12-bit signed immediate."
++  (and (match_code "const_int")
++       (match_test "SMALL_OPERAND (ival)")))
++
++(define_constraint "J"
++  "Integer zero."
++  (and (match_code "const_int")
++       (match_test "ival == 0")))
++
++;; Floating-point constraints
++
++(define_constraint "G"
++  "Floating-point zero."
++  (and (match_code "const_double")
++       (match_test "op == CONST0_RTX (mode)")))
++
++;; General constraints
++
++(define_constraint "Q"
++  "@internal"
++  (match_operand 0 "const_arith_operand"))
++
++(define_memory_constraint "A"
++  "An address that is held in a general-purpose register."
++  (and (match_code "mem")
++       (match_test "GET_CODE(XEXP(op,0)) == REG")))
++
++(define_constraint "S"
++  "@internal
++   A constant call address."
++  (and (match_operand 0 "call_insn_operand")
++       (match_test "CONSTANT_P (op)")))
++
++(define_constraint "T"
++  "@internal
++   A constant @code{move_operand}."
++  (and (match_operand 0 "move_operand")
++       (match_test "CONSTANT_P (op)")))
++
++(define_memory_constraint "W"
++  "@internal
++   A memory address based on a member of @code{BASE_REG_CLASS}."
++  (and (match_code "mem")
++       (match_operand 0 "memory_operand")))
++
++(define_constraint "YG"
++  "@internal
++   A vector zero."
++  (and (match_code "const_vector")
++       (match_test "op == CONST0_RTX (mode)")))
+diff --git original-gcc/gcc/config/riscv/elf.h gcc-6.2.0/gcc/config/riscv/elf.h
+new file mode 100644
+index 0000000..491ec8b
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/elf.h
+@@ -0,0 +1,31 @@
++/* Target macros for riscv*-elf targets.
++   Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010
++   Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++/* Leave the linker script to choose the appropriate libraries.  */
++#undef  LIB_SPEC
++#define LIB_SPEC ""
++
++#undef  STARTFILE_SPEC
++#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
++
++#undef  ENDFILE_SPEC
++#define ENDFILE_SPEC "crtend%O%s"
++
++#define NO_IMPLICIT_EXTERN_C 1
+diff --git original-gcc/gcc/config/riscv/generic.md gcc-6.2.0/gcc/config/riscv/generic.md
+new file mode 100644
+index 0000000..b2b0a42
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/generic.md
+@@ -0,0 +1,78 @@
++;; Generic DFA-based pipeline description for RISC-V targets.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published
++;; by the Free Software Foundation; either version 3, or (at your
++;; option) any later version.
++
++;; GCC is distributed in the hope that it will be useful, but WITHOUT
++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++;; License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; <http://www.gnu.org/licenses/>.
++
++
++(define_automaton "pipe0")
++(define_cpu_unit "alu" "pipe0")
++(define_cpu_unit "imuldiv" "pipe0")
++(define_cpu_unit "fdivsqrt" "pipe0")
++
++(define_insn_reservation "generic_alu" 1
++  (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
++  "alu")
++
++(define_insn_reservation "generic_load" 3
++  (eq_attr "type" "load,fpload")
++  "alu")
++
++(define_insn_reservation "generic_store" 1
++  (eq_attr "type" "store,fpstore")
++  "alu")
++
++(define_insn_reservation "generic_xfer" 3
++  (eq_attr "type" "mfc,mtc,fcvt,fmove,fcmp")
++  "alu")
++
++(define_insn_reservation "generic_branch" 1
++  (eq_attr "type" "branch,jump,call")
++  "alu")
++
++(define_insn_reservation "generic_imul" 10
++  (eq_attr "type" "imul")
++  "imuldiv*10")
++
++(define_insn_reservation "generic_idivsi" 34
++  (and (eq_attr "type" "idiv")
++       (eq_attr "mode" "SI"))
++  "imuldiv*34")
++
++(define_insn_reservation "generic_idivdi" 66
++  (and (eq_attr "type" "idiv")
++       (eq_attr "mode" "DI"))
++  "imuldiv*66")
++
++(define_insn_reservation "generic_fmul_single" 5
++  (and (eq_attr "type" "fadd,fmul,fmadd")
++       (eq_attr "mode" "SF"))
++  "alu")
++
++(define_insn_reservation "generic_fmul_double" 7
++  (and (eq_attr "type" "fadd,fmul,fmadd")
++       (eq_attr "mode" "DF"))
++  "alu")
++
++(define_insn_reservation "generic_fdiv" 20
++  (eq_attr "type" "fdiv")
++  "fdivsqrt*20")
++
++(define_insn_reservation "generic_fsqrt" 25
++  (eq_attr "type" "fsqrt")
++  "fdivsqrt*25")
+diff --git original-gcc/gcc/config/riscv/linux.h gcc-6.2.0/gcc/config/riscv/linux.h
+new file mode 100644
+index 0000000..4231212
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/linux.h
+@@ -0,0 +1,63 @@
++/* Definitions for RISC-V GNU/Linux systems with ELF format.
++   Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
++   2007, 2008, 2010, 2011 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++#undef WCHAR_TYPE
++#define WCHAR_TYPE "int"
++
++#undef WCHAR_TYPE_SIZE
++#define WCHAR_TYPE_SIZE 32
++
++#define TARGET_OS_CPP_BUILTINS()				\
++  do {								\
++    GNU_USER_TARGET_OS_CPP_BUILTINS();				\
++    /* The GNU C++ standard library requires this.  */		\
++    if (c_dialect_cxx ())					\
++      builtin_define ("_GNU_SOURCE");				\
++  } while (0)
++
++#undef SUBTARGET_CPP_SPEC
++#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
++
++#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1"
++#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1"
++
++#undef LINK_SPEC
++#define LINK_SPEC "\
++%{shared} \
++  %{!shared: \
++    %{!static: \
++      %{rdynamic:-export-dynamic} \
++      %{" OPT_ARCH64 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
++      %{" OPT_ARCH32 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
++    %{static:-static}} \
++%{" OPT_ARCH64 ":-melf64lriscv} \
++%{" OPT_ARCH32 ":-melf32lriscv}"
++
++#undef LIB_SPEC
++#define LIB_SPEC "\
++%{pthread:-lpthread} \
++%{shared:-lc} \
++%{!shared: \
++  %{profile:-lc_p} %{!profile:-lc}}"
++
++/* Similar to standard Linux, but adding -ffast-math support.  */
++#undef  ENDFILE_SPEC
++#define ENDFILE_SPEC \
++   "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
+diff --git original-gcc/gcc/config/riscv/peephole.md gcc-6.2.0/gcc/config/riscv/peephole.md
+new file mode 100644
+index 0000000..898cbbd
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/peephole.md
+@@ -0,0 +1,121 @@
++;;........................
++;; DI -> SI optimizations
++;;........................
++
++;; Simplify (int)(a + 1), etc.
++(define_peephole2
++  [(set (match_operand:DI 0 "register_operand")
++	(match_operator:DI 4 "modular_operator"
++	  [(match_operand:DI 1 "register_operand")
++	   (match_operand:DI 2 "arith_operand")]))
++   (set (match_operand:SI 3 "register_operand")
++	(truncate:SI (match_dup 0)))]
++  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
++   && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
++  [(set (match_dup 3)
++	  (truncate:SI
++	     (match_op_dup:DI 4 
++	       [(match_operand:DI 1 "register_operand")
++		(match_operand:DI 2 "arith_operand")])))])
++
++;; Simplify (int)a + 1, etc.
++(define_peephole2
++  [(set (match_operand:SI 0 "register_operand")
++	(truncate:SI (match_operand:DI 1 "register_operand")))
++   (set (match_operand:SI 3 "register_operand")
++	(match_operator:SI 4 "modular_operator"
++	  [(match_dup 0)
++	   (match_operand:SI 2 "arith_operand")]))]
++  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
++  [(set (match_dup 3)
++	(match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
++
++;; Simplify -(int)a, etc.
++(define_peephole2
++  [(set (match_operand:SI 0 "register_operand")
++	(truncate:SI (match_operand:DI 2 "register_operand")))
++   (set (match_operand:SI 3 "register_operand")
++	(match_operator:SI 4 "modular_operator"
++	  [(match_operand:SI 1 "reg_or_0_operand")
++	   (match_dup 0)]))]
++  "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
++  [(set (match_dup 3)
++	(match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
++
++;; Simplify (unsigned long)(unsigned int)a << const
++(define_peephole2
++  [(set (match_operand:DI 0 "register_operand")
++	(ashift:DI (match_operand:DI 1 "register_operand")
++		   (match_operand 2 "const_int_operand")))
++   (set (match_operand:DI 3 "register_operand")
++	(lshiftrt:DI (match_dup 0) (match_dup 2)))
++   (set (match_operand:DI 4 "register_operand")
++	(ashift:DI (match_dup 3) (match_operand 5 "const_int_operand")))]
++  "TARGET_64BIT
++   && INTVAL (operands[5]) < INTVAL (operands[2])
++   && (REGNO (operands[3]) == REGNO (operands[4])
++       || peep2_reg_dead_p (3, operands[3]))"
++  [(set (match_dup 0)
++	(ashift:DI (match_dup 1) (match_dup 2)))
++   (set (match_dup 4)
++	(lshiftrt:DI (match_dup 0) (match_operand 5)))]
++{
++  operands[5] = GEN_INT (INTVAL (operands[2]) - INTVAL (operands[5]));
++})
++
++;; Simplify PIC loads to static variables.
++;; These will go away once we figure out how to emit auipc discretely.
++(define_insn "*local_pic_load<mode>"
++  [(set (match_operand:ANYI 0 "register_operand" "=r")
++	(mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
++  "USE_LOAD_ADDRESS_MACRO (operands[1])"
++  "<load>\t%0,%1"
++  [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_load<mode>"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
++   (clobber (match_scratch:DI 2 "=&r"))]
++  "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
++  "<load>\t%0,%1,%2"
++  [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_load<mode>"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
++   (clobber (match_scratch:SI 2 "=&r"))]
++  "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
++  "<load>\t%0,%1,%2"
++  [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_loadu<mode>"
++  [(set (match_operand:SUPERQI 0 "register_operand" "=r")
++	(zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
++  "USE_LOAD_ADDRESS_MACRO (operands[1])"
++  "<load>u\t%0,%1"
++  [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storedi<mode>"
++  [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
++	(match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
++   (clobber (match_scratch:DI 2 "=&r"))]
++  "TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++  "<store>\t%z1,%0,%2"
++  [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storesi<mode>"
++  [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
++	(match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
++   (clobber (match_scratch:SI 2 "=&r"))]
++  "!TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++  "<store>\t%z1,%0,%2"
++  [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storedi<mode>"
++  [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
++	(match_operand:ANYF 1 "register_operand" "f"))
++   (clobber (match_scratch:DI 2 "=&r"))]
++  "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++  "<store>\t%1,%0,%2"
++  [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storesi<mode>"
++  [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
++	(match_operand:ANYF 1 "register_operand" "f"))
++   (clobber (match_scratch:SI 2 "=&r"))]
++  "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++  "<store>\t%1,%0,%2"
++  [(set (attr "length") (const_int 8))])
+diff --git original-gcc/gcc/config/riscv/predicates.md gcc-6.2.0/gcc/config/riscv/predicates.md
+new file mode 100644
+index 0000000..0ed8a4a
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/predicates.md
+@@ -0,0 +1,186 @@
++;; Predicate description for RISC-V target.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_predicate "const_arith_operand"
++  (and (match_code "const_int")
++       (match_test "SMALL_OPERAND (INTVAL (op))")))
++
++(define_predicate "arith_operand"
++  (ior (match_operand 0 "const_arith_operand")
++       (match_operand 0 "register_operand")))
++
++(define_predicate "sle_operand"
++  (and (match_code "const_int")
++       (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
++
++(define_predicate "sleu_operand"
++  (and (match_operand 0 "sle_operand")
++       (match_test "INTVAL (op) + 1 != 0")))
++
++(define_predicate "const_0_operand"
++  (and (match_code "const_int,const_double,const_vector")
++       (match_test "op == CONST0_RTX (GET_MODE (op))")))
++
++(define_predicate "reg_or_0_operand"
++  (ior (match_operand 0 "const_0_operand")
++       (match_operand 0 "register_operand")))
++
++(define_predicate "const_1_operand"
++  (and (match_code "const_int,const_double,const_vector")
++       (match_test "op == CONST1_RTX (GET_MODE (op))")))
++
++(define_predicate "reg_or_1_operand"
++  (ior (match_operand 0 "const_1_operand")
++       (match_operand 0 "register_operand")))
++
++;; Only use branch-on-bit sequences when the mask is not an ANDI immediate.
++(define_predicate "branch_on_bit_operand"
++  (and (match_code "const_int")
++       (match_test "INTVAL (op) >= IMM_BITS - 1")))
++
++;; This is used for indexing into vectors, and hence only accepts const_int.
++(define_predicate "const_0_or_1_operand"
++  (and (match_code "const_int")
++       (ior (match_test "op == CONST0_RTX (GET_MODE (op))")
++	    (match_test "op == CONST1_RTX (GET_MODE (op))"))))
++
++(define_special_predicate "pc_or_label_operand"
++  (match_code "pc,label_ref"))
++
++;; A legitimate CONST_INT operand that takes more than one instruction
++;; to load.
++(define_predicate "splittable_const_int_operand"
++  (match_code "const_int")
++{
++  /* Don't handle multi-word moves this way; we don't want to introduce
++     the individual word-mode moves until after reload.  */
++  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++    return false;
++
++  /* Otherwise check whether the constant can be loaded in a single
++     instruction.  */
++  return !LUI_OPERAND (INTVAL (op)) && !SMALL_OPERAND (INTVAL (op));
++})
++
++(define_predicate "move_operand"
++  (match_operand 0 "general_operand")
++{
++  enum riscv_symbol_type symbol_type;
++
++  /* The thinking here is as follows:
++
++     (1) The move expanders should split complex load sequences into
++	 individual instructions.  Those individual instructions can
++	 then be optimized by all rtl passes.
++
++     (2) The target of pre-reload load sequences should not be used
++	 to store temporary results.  If the target register is only
++	 assigned one value, reload can rematerialize that value
++	 on demand, rather than spill it to the stack.
++
++     (3) If we allowed pre-reload passes like combine and cse to recreate
++	 complex load sequences, we would want to be able to split the
++	 sequences before reload as well, so that the pre-reload scheduler
++	 can see the individual instructions.  This falls foul of (2);
++	 the splitter would be forced to reuse the target register for
++	 intermediate results.
++
++     (4) We want to define complex load splitters for combine.  These
++	 splitters can request a temporary scratch register, which avoids
++	 the problem in (2).  They allow things like:
++
++	      (set (reg T1) (high SYM))
++	      (set (reg T2) (low (reg T1) SYM))
++	      (set (reg X) (plus (reg T2) (const_int OFFSET)))
++
++	 to be combined into:
++
++	      (set (reg T3) (high SYM+OFFSET))
++	      (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
++
++	 if T2 is only used this once.  */
++  switch (GET_CODE (op))
++    {
++    case CONST_INT:
++      return !splittable_const_int_operand (op, mode);
++
++    case CONST:
++    case SYMBOL_REF:
++    case LABEL_REF:
++      return riscv_symbolic_constant_p (op, &symbol_type)
++	      && !riscv_split_symbol_type (symbol_type);
++
++    case HIGH:
++      op = XEXP (op, 0);
++      return riscv_symbolic_constant_p (op, &symbol_type)
++	      && riscv_split_symbol_type (symbol_type)
++	      && symbol_type != SYMBOL_PCREL;
++
++    default:
++      return true;
++    }
++})
++
++(define_predicate "consttable_operand"
++  (match_test "CONSTANT_P (op)"))
++
++(define_predicate "symbolic_operand"
++  (match_code "const,symbol_ref,label_ref")
++{
++  enum riscv_symbol_type type;
++  return riscv_symbolic_constant_p (op, &type);
++})
++
++(define_predicate "absolute_symbolic_operand"
++  (match_code "const,symbol_ref,label_ref")
++{
++  enum riscv_symbol_type type;
++  return (riscv_symbolic_constant_p (op, &type)
++	  && (type == SYMBOL_ABSOLUTE || type == SYMBOL_PCREL));
++})
++
++(define_predicate "plt_symbolic_operand"
++  (match_code "const,symbol_ref,label_ref")
++{
++  enum riscv_symbol_type type;
++  return (riscv_symbolic_constant_p (op, &type)
++	  && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
++})
++
++(define_predicate "call_insn_operand"
++  (ior (match_operand 0 "absolute_symbolic_operand")
++       (match_operand 0 "plt_symbolic_operand")
++       (match_operand 0 "register_operand")))
++
++(define_predicate "symbol_ref_operand"
++  (match_code "symbol_ref"))
++
++(define_predicate "modular_operator"
++  (match_code "plus,minus,mult,ashift"))
++
++(define_predicate "equality_operator"
++  (match_code "eq,ne"))
++
++(define_predicate "order_operator"
++  (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
++
++(define_predicate "fp_order_operator"
++  (match_code "eq,ne,lt,le,gt,ge"))
+diff --git original-gcc/gcc/config/riscv/riscv-ftypes.def gcc-6.2.0/gcc/config/riscv/riscv-ftypes.def
+new file mode 100644
+index 0000000..96a38f1
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv-ftypes.def
+@@ -0,0 +1,39 @@
++/* Definitions of prototypes for RISC-V built-in functions.
++   Copyright (C) 2011-2014 Free Software Foundation, Inc.
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
++   MIPS built-in functions, where:
++
++      NARGS is the number of arguments.
++      LIST contains the return-type code followed by the codes for each
++        argument type.
++
++   Argument- and return-type codes are either modes or one of the following:
++
++      VOID for void_type_node
++      INT for integer_type_node
++      POINTER for ptr_type_node
++
++   (we don't use PTR because that's a ANSI-compatibillity macro).
++
++   Please keep this list lexicographically sorted by the LIST argument.  */
++
++DEF_RISCV_FTYPE (1, (VOID, VOID))
+diff --git original-gcc/gcc/config/riscv/riscv-modes.def gcc-6.2.0/gcc/config/riscv/riscv-modes.def
+new file mode 100644
+index 0000000..bb42344
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv-modes.def
+@@ -0,0 +1,26 @@
++/* Extra machine modes for RISC-V target.
++   Copyright (C) 2011-2014 Free Software Foundation, Inc.
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++FLOAT_MODE (TF, 16, ieee_quad_format);
++
++/* Vector modes.  */
++VECTOR_MODES (INT, 4);        /*       V8QI V4HI V2SI */
++VECTOR_MODES (FLOAT, 4);      /*            V4HF V2SF */
+diff --git original-gcc/gcc/config/riscv/riscv-opts.h gcc-6.2.0/gcc/config/riscv/riscv-opts.h
+new file mode 100644
+index 0000000..2636a46
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv-opts.h
+@@ -0,0 +1,31 @@
++/* Definition of RISC-V target for GNU compiler.
++   Copyright (C) 2016 Free Software Foundation, Inc.
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++#ifndef GCC_RISCV_OPTS_H
++#define GCC_RISCV_OPTS_H
++
++enum riscv_float_abi_type {
++  FLOAT_ABI_SOFT,
++  FLOAT_ABI_SINGLE,
++  FLOAT_ABI_DOUBLE
++};
++extern enum riscv_float_abi_type riscv_float_abi;
++
++#endif /* ! GCC_RISCV_OPTS_H */
+diff --git original-gcc/gcc/config/riscv/riscv-protos.h gcc-6.2.0/gcc/config/riscv/riscv-protos.h
+new file mode 100644
+index 0000000..ef2ddca
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv-protos.h
+@@ -0,0 +1,98 @@
++/* Definition of RISC-V target for GNU compiler.
++   Copyright (C) 2011-2014 Free Software Foundation, Inc.
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++#ifndef GCC_RISCV_PROTOS_H
++#define GCC_RISCV_PROTOS_H
++
++enum riscv_symbol_type {
++  SYMBOL_ABSOLUTE,
++  SYMBOL_PCREL,
++  SYMBOL_GOT_DISP,
++  SYMBOL_TLS,
++  SYMBOL_TLS_LE,
++  SYMBOL_TLS_IE,
++  SYMBOL_TLS_GD
++};
++#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
++
++enum riscv_code_model {
++  CM_MEDLOW,
++  CM_MEDANY,
++  CM_PIC
++};
++extern enum riscv_code_model riscv_cmodel;
++
++extern enum riscv_symbol_type riscv_classify_symbolic_expression (rtx);
++extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
++extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
++extern bool riscv_hard_regno_mode_ok_p (unsigned int, enum machine_mode);
++extern int riscv_address_insns (rtx, enum machine_mode, bool);
++extern int riscv_const_insns (rtx);
++extern int riscv_split_const_insns (rtx);
++extern int riscv_load_store_insns (rtx, rtx_insn *);
++extern rtx riscv_emit_move (rtx, rtx);
++extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
++extern bool riscv_split_symbol_type (enum riscv_symbol_type);
++extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
++extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
++extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
++extern bool riscv_legitimize_vector_move (enum machine_mode, rtx, rtx);
++
++extern rtx riscv_subword (rtx, bool);
++extern bool riscv_split_64bit_move_p (rtx, rtx);
++extern void riscv_split_doubleword_move (rtx, rtx);
++extern const char *riscv_output_move (rtx, rtx);
++extern const char *riscv_output_gpr_save (unsigned);
++#ifdef RTX_CODE
++extern void riscv_expand_scc (rtx *);
++extern void riscv_expand_conditional_branch (rtx *);
++#endif
++extern rtx riscv_expand_call (bool, rtx, rtx, rtx);
++extern void riscv_expand_fcc_reload (rtx, rtx, rtx);
++extern void riscv_set_return_address (rtx, rtx);
++extern bool riscv_expand_block_move (rtx, rtx, rtx);
++extern void riscv_expand_synci_loop (rtx, rtx);
++
++extern bool riscv_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
++					       HOST_WIDE_INT);
++extern bool riscv_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
++						HOST_WIDE_INT);
++extern void riscv_order_regs_for_local_alloc (void);
++
++extern rtx riscv_return_addr (int, rtx);
++extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
++extern void riscv_expand_prologue (void);
++extern void riscv_expand_epilogue (bool);
++extern bool riscv_can_use_return_insn (void);
++extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
++
++extern enum reg_class riscv_secondary_reload_class (enum reg_class,
++						   enum machine_mode,
++						   rtx, bool);
++extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
++
++extern void irix_asm_output_align (FILE *, unsigned);
++extern const char *current_section_name (void);
++extern unsigned int current_section_flags (void);
++
++extern void riscv_expand_vector_init (rtx, rtx);
++
++#endif /* ! GCC_RISCV_PROTOS_H */
+diff --git original-gcc/gcc/config/riscv/riscv.c gcc-6.2.0/gcc/config/riscv/riscv.c
+new file mode 100644
+index 0000000..03c27cc
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv.c
+@@ -0,0 +1,4427 @@
++/* Subroutines used for code generation for RISC-V.
++   Copyright (C) 2011-2014 Free Software Foundation, Inc.
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "rtl.h"
++#include "regs.h"
++#include "hard-reg-set.h"
++#include "insn-config.h"
++#include "conditions.h"
++#include "insn-attr.h"
++#include "recog.h"
++#include "output.h"
++#include "hash-set.h"
++#include "machmode.h"
++#include "vec.h"
++#include "double-int.h"
++#include "input.h"
++#include "alias.h"
++#include "symtab.h"
++#include "wide-int.h"
++#include "inchash.h"
++#include "tree.h"
++#include "fold-const.h"
++#include "varasm.h"
++#include "stringpool.h"
++#include "stor-layout.h"
++#include "calls.h"
++#include "function.h"
++#include "hashtab.h"
++#include "flags.h"
++#include "statistics.h"
++#include "real.h"
++#include "fixed-value.h"
++#include "expmed.h"
++#include "dojump.h"
++#include "explow.h"
++#include "emit-rtl.h"
++#include "stmt.h"
++#include "expr.h"
++#include "insn-codes.h"
++#include "optabs.h"
++#include "libfuncs.h"
++#include "reload.h"
++#include "tm_p.h"
++#include "ggc.h"
++#include "gstab.h"
++#include "hash-table.h"
++#include "debug.h"
++#include "target.h"
++#include "target-def.h"
++#include "common/common-target.h"
++#include "langhooks.h"
++#include "dominance.h"
++#include "cfg.h"
++#include "cfgrtl.h"
++#include "cfganal.h"
++#include "lcm.h"
++#include "cfgbuild.h"
++#include "cfgcleanup.h"
++#include "predict.h"
++#include "basic-block.h"
++#include "bitmap.h"
++#include "regset.h"
++#include "df.h"
++#include "sched-int.h"
++#include "tree-ssa-alias.h"
++#include "internal-fn.h"
++#include "gimple-fold.h"
++#include "tree-eh.h"
++#include "gimple-expr.h"
++#include "is-a.h"
++#include "gimple.h"
++#include "gimplify.h"
++#include "diagnostic.h"
++#include "target-globals.h"
++#include "opts.h"
++#include "tree-pass.h"
++#include "context.h"
++#include "hash-map.h"
++#include "plugin-api.h"
++#include "ipa-ref.h"
++#include "cgraph.h"
++#include "builtins.h"
++#include "rtl-iter.h"
++#include <stdint.h>
++
++/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF.  */
++#define UNSPEC_ADDRESS_P(X)					\
++  (GET_CODE (X) == UNSPEC					\
++   && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST			\
++   && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
++
++/* Extract the symbol or label from UNSPEC wrapper X.  */
++#define UNSPEC_ADDRESS(X) \
++  XVECEXP (X, 0, 0)
++
++/* Extract the symbol type from UNSPEC wrapper X.  */
++#define UNSPEC_ADDRESS_TYPE(X) \
++  ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
++
++/* The maximum distance between the top of the stack frame and the
++   value sp has when we save and restore registers.  This is set by the
++   range  of load/store offsets and must also preserve stack alignment. */
++#define RISCV_MAX_FIRST_STACK_STEP (IMM_REACH/2 - 16)
++
++/* True if INSN is a riscv.md pattern or asm statement.  */
++#define USEFUL_INSN_P(INSN)						\
++  (NONDEBUG_INSN_P (INSN)						\
++   && GET_CODE (PATTERN (INSN)) != USE					\
++   && GET_CODE (PATTERN (INSN)) != CLOBBER				\
++   && GET_CODE (PATTERN (INSN)) != ADDR_VEC				\
++   && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
++
++/* True if bit BIT is set in VALUE.  */
++#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
++
++/* Classifies an address.
++
++   ADDRESS_REG
++       A natural register + offset address.  The register satisfies
++       riscv_valid_base_register_p and the offset is a const_arith_operand.
++
++   ADDRESS_LO_SUM
++       A LO_SUM rtx.  The first operand is a valid base register and
++       the second operand is a symbolic address.
++
++   ADDRESS_CONST_INT
++       A signed 16-bit constant address.
++
++   ADDRESS_SYMBOLIC:
++       A constant symbolic address.  */
++enum riscv_address_type {
++  ADDRESS_REG,
++  ADDRESS_LO_SUM,
++  ADDRESS_CONST_INT,
++  ADDRESS_SYMBOLIC
++};
++
++enum riscv_code_model riscv_cmodel = TARGET_DEFAULT_CMODEL;
++
++/* Macros to create an enumeration identifier for a function prototype.  */
++#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
++#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
++#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
++#define RISCV_FTYPE_NAME4(A, B, C, D, E) RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
++
++/* Classifies the prototype of a built-in function.  */
++enum riscv_function_type {
++#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
++#include "config/riscv/riscv-ftypes.def"
++#undef DEF_RISCV_FTYPE
++  RISCV_MAX_FTYPE_MAX
++};
++
++/* Specifies how a built-in function should be converted into rtl.  */
++enum riscv_builtin_type {
++  /* The function corresponds directly to an .md pattern.  The return
++     value is mapped to operand 0 and the arguments are mapped to
++     operands 1 and above.  */
++  RISCV_BUILTIN_DIRECT,
++
++  /* The function corresponds directly to an .md pattern.  There is no return
++     value and the arguments are mapped to operands 0 and above.  */
++  RISCV_BUILTIN_DIRECT_NO_TARGET
++};
++
++/* Information about a function's frame layout.  */
++struct GTY(())  riscv_frame_info {
++  /* The size of the frame in bytes.  */
++  HOST_WIDE_INT total_size;
++
++  /* Bit X is set if the function saves or restores GPR X.  */
++  unsigned int mask;
++
++  /* Likewise FPR X.  */
++  unsigned int fmask;
++
++  /* How much the GPR save/restore routines adjust sp (or 0 if unused).  */
++  unsigned save_libcall_adjustment;
++
++  /* Offsets of fixed-point and floating-point save areas from frame bottom */
++  HOST_WIDE_INT gp_sp_offset;
++  HOST_WIDE_INT fp_sp_offset;
++
++  /* Offset of virtual frame pointer from stack pointer/frame bottom */
++  HOST_WIDE_INT frame_pointer_offset;
++
++  /* Offset of hard frame pointer from stack pointer/frame bottom */
++  HOST_WIDE_INT hard_frame_pointer_offset;
++
++  /* The offset of arg_pointer_rtx from the bottom of the frame.  */
++  HOST_WIDE_INT arg_pointer_offset;
++};
++
++struct GTY(())  machine_function {
++  /* The number of extra stack bytes taken up by register varargs.
++     This area is allocated by the callee at the very top of the frame.  */
++  int varargs_size;
++
++  /* Cached return value of leaf_function_p.  <0 if false, >0 if true.  */
++  int is_leaf;
++
++  /* The current frame information, calculated by riscv_compute_frame_info.  */
++  struct riscv_frame_info frame;
++};
++
++/* Information about a single argument.  */
++struct riscv_arg_info {
++  /* True if the argument is passed in a floating-point register, or
++     would have been if we hadn't run out of registers.  */
++  bool fpr_p;
++
++  /* The number of words passed in registers, rounded up.  */
++  unsigned int reg_words;
++
++  /* For EABI, the offset of the first register from GP_ARG_FIRST or
++     FP_ARG_FIRST.  For other ABIs, the offset of the first register from
++     the start of the ABI's argument structure (see the CUMULATIVE_ARGS
++     comment for details).
++
++     The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
++     on the stack.  */
++  unsigned int reg_offset;
++
++  /* The number of words that must be passed on the stack, rounded up.  */
++  unsigned int stack_words;
++
++  /* The offset from the start of the stack overflow area of the argument's
++     first stack word.  Only meaningful when STACK_WORDS is nonzero.  */
++  unsigned int stack_offset;
++};
++
++/* Information about an address described by riscv_address_type.
++
++   ADDRESS_CONST_INT
++       No fields are used.
++
++   ADDRESS_REG
++       REG is the base register and OFFSET is the constant offset.
++
++   ADDRESS_LO_SUM
++       REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
++       is the type of symbol it references.
++
++   ADDRESS_SYMBOLIC
++       SYMBOL_TYPE is the type of symbol that the address references.  */
++struct riscv_address_info {
++  enum riscv_address_type type;
++  rtx reg;
++  rtx offset;
++  enum riscv_symbol_type symbol_type;
++};
++
++/* One stage in a constant building sequence.  These sequences have
++   the form:
++
++	A = VALUE[0]
++	A = A CODE[1] VALUE[1]
++	A = A CODE[2] VALUE[2]
++	...
++
++   where A is an accumulator, each CODE[i] is a binary rtl operation
++   and each VALUE[i] is a constant integer.  CODE[0] is undefined.  */
++struct riscv_integer_op {
++  enum rtx_code code;
++  unsigned HOST_WIDE_INT value;
++};
++
++/* The largest number of operations needed to load an integer constant.
++   The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI,
++   but we may attempt and reject even worse sequences.  */
++#define RISCV_MAX_INTEGER_OPS 32
++
++/* Costs of various operations on the different architectures.  */
++
++struct riscv_tune_info
++{
++  unsigned short fp_add[2];
++  unsigned short fp_mul[2];
++  unsigned short fp_div[2];
++  unsigned short int_mul[2];
++  unsigned short int_div[2];
++  unsigned short issue_rate;
++  unsigned short branch_cost;
++  unsigned short memory_cost;
++};
++
++/* Information about one CPU we know about.  */
++struct riscv_cpu_info {
++  /* This CPU's canonical name.  */
++  const char *name;
++
++  /* The RISC-V ISA and extensions supported by this CPU.  */
++  const char *isa;
++
++  /* Tuning parameters for this CPU.  */
++  const struct riscv_tune_info *tune_info;
++};
++
++/* Global variables for machine-dependent things.  */
++
++/* Which tuning parameters to use.  */
++static const struct riscv_tune_info *tune_info;
++
++/* Index R is the smallest register class that contains register R.  */
++const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
++  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
++  GR_REGS,	T_REGS,		T_REGS,		T_REGS,
++  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
++  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
++  GR_REGS,	GR_REGS, 	GR_REGS,	GR_REGS,
++  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
++  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS,
++  T_REGS,	T_REGS,		T_REGS,		T_REGS,
++  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
++  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
++  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
++  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
++  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
++  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
++  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
++  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
++  FRAME_REGS,	FRAME_REGS,
++};
++
++/* Costs to use when optimizing for size.  */
++static const struct riscv_tune_info rocket_tune_info = {
++  {COSTS_N_INSNS (4), COSTS_N_INSNS (5)},	/* fp_add */
++  {COSTS_N_INSNS (4), COSTS_N_INSNS (5)},	/* fp_mul */
++  {COSTS_N_INSNS (20), COSTS_N_INSNS (20)},	/* fp_div */
++  {COSTS_N_INSNS (4), COSTS_N_INSNS (4)},	/* int_mul */
++  {COSTS_N_INSNS (6), COSTS_N_INSNS (6)},	/* int_div */
++  1,						/* issue_rate */
++  3,						/* branch_cost */
++  5						/* memory_cost */
++};
++
++/* Costs to use when optimizing for size.  */
++static const struct riscv_tune_info optimize_size_tune_info = {
++  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* fp_add */
++  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* fp_mul */
++  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* fp_div */
++  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* int_mul */
++  {COSTS_N_INSNS (1), COSTS_N_INSNS (1)},	/* int_div */
++  1,						/* issue_rate */
++  1,						/* branch_cost */
++  1						/* memory_cost */
++};
++
++/* A table describing all the processors GCC knows about.  */
++static const struct riscv_cpu_info riscv_cpu_info_table[] = {
++  /* Entries for generic ISAs.  */
++  { "rocket", "IMAFD", &rocket_tune_info },
++};
++
++/* Return the riscv_cpu_info entry for the given name string.  */
++
++static const struct riscv_cpu_info *
++riscv_parse_cpu (const char *cpu_string)
++{
++  unsigned int i;
++
++  for (i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
++    if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
++      return riscv_cpu_info_table + i;
++
++  error ("unknown cpu `%s' for -mtune", cpu_string);
++  return riscv_cpu_info_table;
++}
++
++/* Fill CODES with a sequence of rtl operations to load VALUE.
++   Return the number of operations needed.  */
++
++static int
++riscv_build_integer_1 (struct riscv_integer_op *codes, HOST_WIDE_INT value,
++		       enum machine_mode mode)
++{
++  HOST_WIDE_INT low_part = CONST_LOW_PART (value);
++  int cost = INT_MAX, alt_cost;
++  struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
++
++  if (SMALL_OPERAND (value) || LUI_OPERAND (value))
++    {
++      /* Simply ADDI or LUI */
++      codes[0].code = UNKNOWN;
++      codes[0].value = value;
++      return 1;
++    }
++
++  /* End with ADDI */
++  if (low_part != 0
++      && !(mode == HImode && (int16_t)(value - low_part) != (value - low_part)))
++    {
++      cost = 1 + riscv_build_integer_1 (codes, value - low_part, mode);
++      codes[cost-1].code = PLUS;
++      codes[cost-1].value = low_part;
++    }
++
++  /* End with XORI */
++  if (cost > 2 && (low_part < 0 || mode == HImode))
++    {
++      alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
++      alt_codes[alt_cost-1].code = XOR;
++      alt_codes[alt_cost-1].value = low_part;
++      if (alt_cost < cost)
++	cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++    }
++
++  /* Eliminate trailing zeros and end with SLLI */
++  if (cost > 2 && (value & 1) == 0)
++    {
++      int shift = 0;
++      while ((value & 1) == 0)
++	shift++, value >>= 1;
++      alt_cost = 1 + riscv_build_integer_1 (alt_codes, value, mode);
++      alt_codes[alt_cost-1].code = ASHIFT;
++      alt_codes[alt_cost-1].value = shift;
++      if (alt_cost < cost)
++	cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++    }
++
++  gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
++  return cost;
++}
++
++static int
++riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
++		     enum machine_mode mode)
++{
++  int cost = riscv_build_integer_1 (codes, value, mode);
++
++  /* Eliminate leading zeros and end with SRLI */
++  if (value > 0 && cost > 2)
++    {
++      struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
++      int alt_cost, shift = 0;
++      HOST_WIDE_INT shifted_val;
++
++      /* Try filling trailing bits with 1s */
++      while ((value << shift) >= 0)
++	shift++;
++      shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
++      alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
++      alt_codes[alt_cost-1].code = LSHIFTRT;
++      alt_codes[alt_cost-1].value = shift;
++      if (alt_cost < cost)
++	cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++
++      /* Try filling trailing bits with 0s */
++      shifted_val = value << shift;
++      alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
++      alt_codes[alt_cost-1].code = LSHIFTRT;
++      alt_codes[alt_cost-1].value = shift;
++      if (alt_cost < cost)
++	cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++    }
++
++  return cost;
++}
++
++static int
++riscv_split_integer_cost (HOST_WIDE_INT val)
++{
++  int cost;
++  int32_t loval = val, hival = (val - (int32_t)val) >> 32;
++  struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++
++  cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
++  if (loval != hival)
++    cost += riscv_build_integer (codes, hival, VOIDmode);
++
++  return cost;
++}
++
++static int
++riscv_integer_cost (HOST_WIDE_INT val)
++{
++  struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++  return MIN (riscv_build_integer (codes, val, VOIDmode),
++	      riscv_split_integer_cost (val));
++}
++
++/* Try to split a 64b integer into 32b parts, then reassemble. */
++
++static rtx
++riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
++{
++  int32_t loval = val, hival = (val - (int32_t)val) >> 32;
++  rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
++
++  riscv_move_integer (hi, hi, hival);
++  riscv_move_integer (lo, lo, loval);
++
++  hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
++  hi = force_reg (mode, hi);
++
++  return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
++}
++
++/* Return true if X is a thread-local symbol.  */
++
++static bool
++riscv_tls_symbol_p (const_rtx x)
++{
++  return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
++}
++
++static bool
++riscv_symbol_binds_local_p (const_rtx x)
++{
++  return (SYMBOL_REF_DECL (x)
++	  ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
++	  : SYMBOL_REF_LOCAL_P (x));
++}
++
++/* Return the method that should be used to access SYMBOL_REF or
++   LABEL_REF X in context CONTEXT.  */
++
++static enum riscv_symbol_type
++riscv_classify_symbol (const_rtx x)
++{
++  if (riscv_tls_symbol_p (x))
++    return SYMBOL_TLS;
++
++  switch (GET_CODE (x))
++    {
++    case LABEL_REF:
++      if (LABEL_REF_NONLOCAL_P (x))
++	return SYMBOL_GOT_DISP;
++      break;
++
++    case SYMBOL_REF:
++      if (flag_pic && !riscv_symbol_binds_local_p (x))
++	return SYMBOL_GOT_DISP;
++      break;
++
++    default:
++      gcc_unreachable ();
++    }
++
++  return riscv_cmodel == CM_MEDLOW ? SYMBOL_ABSOLUTE : SYMBOL_PCREL;
++}
++
++/* Classify the base of symbolic expression X, given that X appears in
++   context CONTEXT.  */
++
++enum riscv_symbol_type
++riscv_classify_symbolic_expression (rtx x)
++{
++  rtx offset;
++
++  split_const (x, &x, &offset);
++  if (UNSPEC_ADDRESS_P (x))
++    return UNSPEC_ADDRESS_TYPE (x);
++
++  return riscv_classify_symbol (x);
++}
++
++/* Return true if X is a symbolic constant that can be used in context
++   CONTEXT.  If it is, store the type of the symbol in *SYMBOL_TYPE.  */
++
++bool
++riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
++{
++  rtx offset;
++
++  split_const (x, &x, &offset);
++  if (UNSPEC_ADDRESS_P (x))
++    {
++      *symbol_type = UNSPEC_ADDRESS_TYPE (x);
++      x = UNSPEC_ADDRESS (x);
++    }
++  else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
++    *symbol_type = riscv_classify_symbol (x);
++  else
++    return false;
++
++  if (offset == const0_rtx)
++    return true;
++
++  /* Check whether a nonzero offset is valid for the underlying
++     relocations.  */
++  switch (*symbol_type)
++    {
++    case SYMBOL_ABSOLUTE:
++    case SYMBOL_PCREL:
++    case SYMBOL_TLS_LE:
++      return (int32_t) INTVAL (offset) == INTVAL (offset);
++
++    default:
++      return false;
++    }
++  gcc_unreachable ();
++}
++
++/* Returns the number of instructions necessary to reference a symbol. */
++
++static int riscv_symbol_insns (enum riscv_symbol_type type)
++{
++  switch (type)
++    {
++    case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
++    case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference itself */
++    case SYMBOL_PCREL: return 2; /* AUIPC + the reference itself */
++    case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference itself */
++    case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference itself */
++    default: gcc_unreachable();
++    }
++}
++
++/* Implement TARGET_LEGITIMATE_CONSTANT_P.  */
++
++static bool
++riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++  return riscv_const_insns (x) > 0;
++}
++
++/* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */
++
++static bool
++riscv_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++  enum riscv_symbol_type type;
++  rtx base, offset;
++
++  /* There is no assembler syntax for expressing an address-sized
++     high part.  */
++  if (GET_CODE (x) == HIGH)
++    return true;
++
++  split_const (x, &base, &offset);
++  if (riscv_symbolic_constant_p (base, &type))
++    {
++      /* As an optimization, don't spill symbolic constants that are as
++	 cheap to rematerialize as to access in the constant pool.  */
++      if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
++	return true;
++
++      /* As an optimization, avoid needlessly generate dynamic relocations.  */
++      if (flag_pic)
++	return true;
++    }
++
++  /* TLS symbols must be computed by riscv_legitimize_move.  */
++  if (tls_referenced_p (x))
++    return true;
++
++  return false;
++}
++
++/* Return true if register REGNO is a valid base register for mode MODE.
++   STRICT_P is true if REG_OK_STRICT is in effect.  */
++
++int
++riscv_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
++			       bool strict_p)
++{
++  if (!HARD_REGISTER_NUM_P (regno))
++    {
++      if (!strict_p)
++	return true;
++      regno = reg_renumber[regno];
++    }
++
++  /* These fake registers will be eliminated to either the stack or
++     hard frame pointer, both of which are usually valid base registers.
++     Reload deals with the cases where the eliminated form isn't valid.  */
++  if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
++    return true;
++
++  return GP_REG_P (regno);
++}
++
++/* Return true if X is a valid base register for mode MODE.
++   STRICT_P is true if REG_OK_STRICT is in effect.  */
++
++static bool
++riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
++{
++  if (!strict_p && GET_CODE (x) == SUBREG)
++    x = SUBREG_REG (x);
++
++  return (REG_P (x)
++	  && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
++}
++
++/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
++   can address a value of mode MODE.  */
++
++static bool
++riscv_valid_offset_p (rtx x, enum machine_mode mode)
++{
++  /* Check that X is a signed 12-bit number.  */
++  if (!const_arith_operand (x, Pmode))
++    return false;
++
++  /* We may need to split multiword moves, so make sure that every word
++     is accessible.  */
++  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++      && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
++    return false;
++
++  return true;
++}
++
++/* Should a symbol of type SYMBOL_TYPE should be split in two?  */
++
++bool
++riscv_split_symbol_type (enum riscv_symbol_type symbol_type)
++{
++  if (symbol_type == SYMBOL_TLS_LE)
++    return true;
++
++  if (!TARGET_EXPLICIT_RELOCS)
++    return false;
++
++  return symbol_type == SYMBOL_ABSOLUTE || symbol_type == SYMBOL_PCREL;
++}
++
++/* Return true if a LO_SUM can address a value of mode MODE when the
++   LO_SUM symbol has type SYMBOL_TYPE.  */
++
++static bool
++riscv_valid_lo_sum_p (enum riscv_symbol_type symbol_type, enum machine_mode mode)
++{
++  /* Check that symbols of type SYMBOL_TYPE can be used to access values
++     of mode MODE.  */
++  if (riscv_symbol_insns (symbol_type) == 0)
++    return false;
++
++  /* Check that there is a known low-part relocation.  */
++  if (!riscv_split_symbol_type (symbol_type))
++    return false;
++
++  /* We may need to split multiword moves, so make sure that each word
++     can be accessed without inducing a carry.  This is mainly needed
++     for o64, which has historically only guaranteed 64-bit alignment
++     for 128-bit types.  */
++  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++      && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
++    return false;
++
++  return true;
++}
++
++/* Return true if X is a valid address for machine mode MODE.  If it is,
++   fill in INFO appropriately.  STRICT_P is true if REG_OK_STRICT is in
++   effect.  */
++
++static bool
++riscv_classify_address (struct riscv_address_info *info, rtx x,
++		       enum machine_mode mode, bool strict_p)
++{
++  switch (GET_CODE (x))
++    {
++    case REG:
++    case SUBREG:
++      info->type = ADDRESS_REG;
++      info->reg = x;
++      info->offset = const0_rtx;
++      return riscv_valid_base_register_p (info->reg, mode, strict_p);
++
++    case PLUS:
++      info->type = ADDRESS_REG;
++      info->reg = XEXP (x, 0);
++      info->offset = XEXP (x, 1);
++      return (riscv_valid_base_register_p (info->reg, mode, strict_p)
++	      && riscv_valid_offset_p (info->offset, mode));
++
++    case LO_SUM:
++      info->type = ADDRESS_LO_SUM;
++      info->reg = XEXP (x, 0);
++      info->offset = XEXP (x, 1);
++      /* We have to trust the creator of the LO_SUM to do something vaguely
++	 sane.  Target-independent code that creates a LO_SUM should also
++	 create and verify the matching HIGH.  Target-independent code that
++	 adds an offset to a LO_SUM must prove that the offset will not
++	 induce a carry.  Failure to do either of these things would be
++	 a bug, and we are not required to check for it here.  The RISCV
++	 backend itself should only create LO_SUMs for valid symbolic
++	 constants, with the high part being either a HIGH or a copy
++	 of _gp. */
++      info->symbol_type
++	= riscv_classify_symbolic_expression (info->offset);
++      return (riscv_valid_base_register_p (info->reg, mode, strict_p)
++	      && riscv_valid_lo_sum_p (info->symbol_type, mode));
++
++    case CONST_INT:
++      /* Small-integer addresses don't occur very often, but they
++	 are legitimate if $0 is a valid base register.  */
++      info->type = ADDRESS_CONST_INT;
++      return SMALL_OPERAND (INTVAL (x));
++
++    default:
++      return false;
++    }
++}
++
++/* Implement TARGET_LEGITIMATE_ADDRESS_P.  */
++
++static bool
++riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
++{
++  struct riscv_address_info addr;
++
++  return riscv_classify_address (&addr, x, mode, strict_p);
++}
++
++/* Return the number of instructions needed to load or store a value
++   of mode MODE at address X.  Return 0 if X isn't valid for MODE.
++   Assume that multiword moves may need to be split into word moves
++   if MIGHT_SPLIT_P, otherwise assume that a single load or store is
++   enough. */
++
++int
++riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
++{
++  struct riscv_address_info addr;
++  int n = 1;
++
++  if (!riscv_classify_address (&addr, x, mode, false))
++    return 0;
++
++  /* BLKmode is used for single unaligned loads and stores and should
++     not count as a multiword mode. */
++  if (mode != BLKmode && might_split_p)
++    n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++
++  if (addr.type == ADDRESS_LO_SUM)
++    n += riscv_symbol_insns (addr.symbol_type) - 1;
++
++  return n;
++}
++
++/* Return the number of instructions needed to load constant X.
++   Return 0 if X isn't a valid constant.  */
++
++int
++riscv_const_insns (rtx x)
++{
++  enum riscv_symbol_type symbol_type;
++  rtx offset;
++
++  switch (GET_CODE (x))
++    {
++    case HIGH:
++      if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
++	  || !riscv_split_symbol_type (symbol_type))
++	return 0;
++
++      /* This is simply an LUI. */
++      return 1;
++
++    case CONST_INT:
++      {
++	int cost = riscv_integer_cost (INTVAL (x));
++	/* Force complicated constants to memory. */
++	return cost < 4 ? cost : 0;
++      }
++
++    case CONST_DOUBLE:
++    case CONST_VECTOR:
++      /* Allow zeros for normal mode, where we can use x0.  */
++      return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
++
++    case CONST:
++      /* See if we can refer to X directly.  */
++      if (riscv_symbolic_constant_p (x, &symbol_type))
++	return riscv_symbol_insns (symbol_type);
++
++      /* Otherwise try splitting the constant into a base and offset.  */
++      split_const (x, &x, &offset);
++      if (offset != 0)
++	{
++	  int n = riscv_const_insns (x);
++	  if (n != 0)
++	    return n + riscv_integer_cost (INTVAL (offset));
++	}
++      return 0;
++
++    case SYMBOL_REF:
++    case LABEL_REF:
++      return riscv_symbol_insns (riscv_classify_symbol (x));
++
++    default:
++      return 0;
++    }
++}
++
++/* X is a doubleword constant that can be handled by splitting it into
++   two words and loading each word separately.  Return the number of
++   instructions required to do this.  */
++
++int
++riscv_split_const_insns (rtx x)
++{
++  unsigned int low, high;
++
++  low = riscv_const_insns (riscv_subword (x, false));
++  high = riscv_const_insns (riscv_subword (x, true));
++  gcc_assert (low > 0 && high > 0);
++  return low + high;
++}
++
++/* Return the number of instructions needed to implement INSN,
++   given that it loads from or stores to MEM. */
++
++int
++riscv_load_store_insns (rtx mem, rtx_insn *insn)
++{
++  enum machine_mode mode;
++  bool might_split_p;
++  rtx set;
++
++  gcc_assert (MEM_P (mem));
++  mode = GET_MODE (mem);
++
++  /* Try to prove that INSN does not need to be split.  */
++  might_split_p = true;
++  if (GET_MODE_BITSIZE (mode) == 64)
++    {
++      set = single_set (insn);
++      if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
++	might_split_p = false;
++    }
++
++  return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
++}
++
++/* Emit a move from SRC to DEST.  Assume that the move expanders can
++   handle all moves if !can_create_pseudo_p ().  The distinction is
++   important because, unlike emit_move_insn, the move expanders know
++   how to force Pmode objects into the constant pool even when the
++   constant pool address is not itself legitimate.  */
++
++rtx
++riscv_emit_move (rtx dest, rtx src)
++{
++  return (can_create_pseudo_p ()
++	  ? emit_move_insn (dest, src)
++	  : emit_move_insn_1 (dest, src));
++}
++
++/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)).  */
++
++static void
++riscv_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
++{
++  emit_insn (gen_rtx_SET (target,
++			  gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
++}
++
++/* Compute (CODE OP0 OP1) and store the result in a new register
++   of mode MODE.  Return that new register.  */
++
++static rtx
++riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
++{
++  rtx reg;
++
++  reg = gen_reg_rtx (mode);
++  riscv_emit_binary (code, reg, op0, op1);
++  return reg;
++}
++
++/* Copy VALUE to a register and return that register.  If new pseudos
++   are allowed, copy it into a new register, otherwise use DEST.  */
++
++static rtx
++riscv_force_temporary (rtx dest, rtx value)
++{
++  if (can_create_pseudo_p ())
++    return force_reg (Pmode, value);
++  else
++    {
++      riscv_emit_move (dest, value);
++      return dest;
++    }
++}
++
++/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
++   then add CONST_INT OFFSET to the result.  */
++
++static rtx
++riscv_unspec_address_offset (rtx base, rtx offset,
++			    enum riscv_symbol_type symbol_type)
++{
++  base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
++			 UNSPEC_ADDRESS_FIRST + symbol_type);
++  if (offset != const0_rtx)
++    base = gen_rtx_PLUS (Pmode, base, offset);
++  return gen_rtx_CONST (Pmode, base);
++}
++
++/* Return an UNSPEC address with underlying address ADDRESS and symbol
++   type SYMBOL_TYPE.  */
++
++rtx
++riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
++{
++  rtx base, offset;
++
++  split_const (address, &base, &offset);
++  return riscv_unspec_address_offset (base, offset, symbol_type);
++}
++
++/* If OP is an UNSPEC address, return the address to which it refers,
++   otherwise return OP itself.  */
++
++static rtx
++riscv_strip_unspec_address (rtx op)
++{
++  rtx base, offset;
++
++  split_const (op, &base, &offset);
++  if (UNSPEC_ADDRESS_P (base))
++    op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
++  return op;
++}
++
++/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
++   high part to BASE and return the result.  Just return BASE otherwise.
++   TEMP is as for riscv_force_temporary.
++
++   The returned expression can be used as the first operand to a LO_SUM.  */
++
++static rtx
++riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
++{
++  addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
++  return riscv_force_temporary (temp, addr);
++}
++
++/* Load an entry from the GOT. */
++static rtx riscv_got_load_tls_gd(rtx dest, rtx sym)
++{
++  return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym));
++}
++
++static rtx riscv_got_load_tls_ie(rtx dest, rtx sym)
++{
++  return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym));
++}
++
++static rtx riscv_tls_add_tp_le(rtx dest, rtx base, rtx sym)
++{
++  rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++  return (Pmode == DImode ? gen_tls_add_tp_ledi(dest, base, tp, sym) : gen_tls_add_tp_lesi(dest, base, tp, sym));
++}
++
++/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
++   it appears in a MEM of that mode.  Return true if ADDR is a legitimate
++   constant in that context and can be split into high and low parts.
++   If so, and if LOW_OUT is nonnull, emit the high part and store the
++   low part in *LOW_OUT.  Leave *LOW_OUT unchanged otherwise.
++
++   TEMP is as for riscv_force_temporary and is used to load the high
++   part into a register.
++
++   When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
++   a legitimize SET_SRC for an .md pattern, otherwise the low part
++   is guaranteed to be a legitimate address for mode MODE.  */
++
++bool
++riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
++{
++  enum riscv_symbol_type symbol_type;
++
++  if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
++      || !riscv_symbolic_constant_p (addr, &symbol_type)
++      || riscv_symbol_insns (symbol_type) == 0
++      || !riscv_split_symbol_type (symbol_type))
++    return false;
++
++  if (low_out)
++    switch (symbol_type)
++      {
++      case SYMBOL_ABSOLUTE:
++	{
++	  rtx high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
++	  high = riscv_force_temporary (temp, high);
++	  *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
++	}
++	break;
++
++      case SYMBOL_PCREL:
++	{
++	  static int seqno;
++	  char buf[32];
++	  rtx label;
++
++	  sprintf (buf, ".LA%d", seqno);
++	  label = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
++	  SYMBOL_REF_FLAGS (label) |= SYMBOL_FLAG_LOCAL;
++
++	  if (temp == NULL)
++	    temp = gen_reg_rtx (Pmode);
++
++	  if (Pmode == DImode)
++	    emit_insn (gen_auipcdi (temp, copy_rtx (addr), GEN_INT (seqno)));
++	  else
++	    emit_insn (gen_auipcsi (temp, copy_rtx (addr), GEN_INT (seqno)));
++
++	  *low_out = gen_rtx_LO_SUM (Pmode, temp, label);
++
++	  seqno++;
++	}
++	break;
++
++      default:
++	gcc_unreachable ();
++      }
++
++  return true;
++}
++
++/* Return a legitimate address for REG + OFFSET.  TEMP is as for
++   riscv_force_temporary; it is only needed when OFFSET is not a
++   SMALL_OPERAND.  */
++
++static rtx
++riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
++{
++  if (!SMALL_OPERAND (offset))
++    {
++      rtx high;
++
++      /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
++         The addition inside the macro CONST_HIGH_PART may cause an
++         overflow, so we need to force a sign-extension check.  */
++      high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
++      offset = CONST_LOW_PART (offset);
++      high = riscv_force_temporary (temp, high);
++      reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
++    }
++  return plus_constant (Pmode, reg, offset);
++}
++
++/* The __tls_get_attr symbol.  */
++static GTY(()) rtx riscv_tls_symbol;
++
++/* Return an instruction sequence that calls __tls_get_addr.  SYM is
++   the TLS symbol we are referencing and TYPE is the symbol type to use
++   (either global dynamic or local dynamic).  RESULT is an RTX for the
++   return value location.  */
++
++static rtx
++riscv_call_tls_get_addr (rtx sym, rtx result)
++{
++  rtx insn, a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
++
++  if (!riscv_tls_symbol)
++    riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
++
++  start_sequence ();
++  
++  emit_insn (riscv_got_load_tls_gd (a0, sym));
++  insn = riscv_expand_call (false, result, riscv_tls_symbol, const0_rtx);
++  RTL_CONST_CALL_P (insn) = 1;
++  use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
++  insn = get_insns ();
++
++  end_sequence ();
++
++  return insn;
++}
++
++/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
++   its address.  The return value will be both a valid address and a valid
++   SET_SRC (either a REG or a LO_SUM).  */
++
++static rtx
++riscv_legitimize_tls_address (rtx loc)
++{
++  rtx dest, insn, tp, tmp1;
++  enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
++
++  /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE.  */
++  if (!flag_pic)
++    model = TLS_MODEL_LOCAL_EXEC;
++
++  switch (model)
++    {
++    case TLS_MODEL_LOCAL_DYNAMIC:
++      /* Rely on section anchors for the optimization that LDM TLS
++	 provides.  The anchor's address is loaded with GD TLS. */
++    case TLS_MODEL_GLOBAL_DYNAMIC:
++      tmp1 = gen_rtx_REG (Pmode, GP_RETURN);
++      insn = riscv_call_tls_get_addr (loc, tmp1);
++      dest = gen_reg_rtx (Pmode);
++      emit_libcall_block (insn, dest, tmp1, loc);
++      break;
++
++    case TLS_MODEL_INITIAL_EXEC:
++      /* la.tls.ie; tp-relative add */
++      tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++      tmp1 = gen_reg_rtx (Pmode);
++      emit_insn (riscv_got_load_tls_ie (tmp1, loc));
++      dest = gen_reg_rtx (Pmode);
++      emit_insn (gen_add3_insn (dest, tmp1, tp));
++      break;
++
++    case TLS_MODEL_LOCAL_EXEC:
++      tmp1 = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
++      dest = gen_reg_rtx (Pmode);
++      emit_insn (riscv_tls_add_tp_le (dest, tmp1, loc));
++      dest = gen_rtx_LO_SUM (Pmode, dest,
++			     riscv_unspec_address (loc, SYMBOL_TLS_LE));
++      break;
++
++    default:
++      gcc_unreachable ();
++    }
++  return dest;
++}
++

++/* If X is not a valid address for mode MODE, force it into a register.  */
++
++static rtx
++riscv_force_address (rtx x, enum machine_mode mode)
++{
++  if (!riscv_legitimate_address_p (mode, x, false))
++    x = force_reg (Pmode, x);
++  return x;
++}
++
++/* This function is used to implement LEGITIMIZE_ADDRESS.  If X can
++   be legitimized in a way that the generic machinery might not expect,
++   return a new address, otherwise return NULL.  MODE is the mode of
++   the memory being accessed.  */
++
++static rtx
++riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
++			 enum machine_mode mode)
++{
++  rtx addr;
++
++  if (riscv_tls_symbol_p (x))
++    return riscv_legitimize_tls_address (x);
++
++  /* See if the address can split into a high part and a LO_SUM.  */
++  if (riscv_split_symbol (NULL, x, mode, &addr))
++    return riscv_force_address (addr, mode);
++
++  /* Handle BASE + OFFSET using riscv_add_offset.  */
++  if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
++      && INTVAL (XEXP (x, 1)) != 0)
++    {
++      rtx base = XEXP (x, 0);
++      HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
++
++      if (!riscv_valid_base_register_p (base, mode, false))
++	base = copy_to_mode_reg (Pmode, base);
++      addr = riscv_add_offset (NULL, base, offset);
++      return riscv_force_address (addr, mode);
++    }
++
++  return x;
++}
++
++/* Load VALUE into DEST.  TEMP is as for riscv_force_temporary.  */
++
++void
++riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
++{
++  struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++  enum machine_mode mode;
++  int i, num_ops;
++  rtx x;
++
++  mode = GET_MODE (dest);
++  num_ops = riscv_build_integer (codes, value, mode);
++
++  if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
++      && num_ops >= riscv_split_integer_cost (value))
++    x = riscv_split_integer (value, mode);
++  else
++    {
++      /* Apply each binary operation to X. */
++      x = GEN_INT (codes[0].value);
++
++      for (i = 1; i < num_ops; i++)
++        {
++          if (!can_create_pseudo_p ())
++            {
++              emit_insn (gen_rtx_SET (temp, x));
++              x = temp;
++            }
++          else
++            x = force_reg (mode, x);
++
++          x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
++        }
++    }
++
++  emit_insn (gen_rtx_SET (dest, x));
++}
++
++/* Subroutine of riscv_legitimize_move.  Move constant SRC into register
++   DEST given that SRC satisfies immediate_operand but doesn't satisfy
++   move_operand.  */
++
++static void
++riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
++{
++  rtx base, offset;
++
++  /* Split moves of big integers into smaller pieces.  */
++  if (splittable_const_int_operand (src, mode))
++    {
++      riscv_move_integer (dest, dest, INTVAL (src));
++      return;
++    }
++
++  /* Split moves of symbolic constants into high/low pairs.  */
++  if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
++    {
++      emit_insn (gen_rtx_SET (dest, src));
++      return;
++    }
++
++  /* Generate the appropriate access sequences for TLS symbols.  */
++  if (riscv_tls_symbol_p (src))
++    {
++      riscv_emit_move (dest, riscv_legitimize_tls_address (src));
++      return;
++    }
++
++  /* If we have (const (plus symbol offset)), and that expression cannot
++     be forced into memory, load the symbol first and add in the offset.  Also
++     prefer to do this even if the constant _can_ be forced into memory, as it
++     usually produces better code.  */
++  split_const (src, &base, &offset);
++  if (offset != const0_rtx
++      && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
++    {
++      base = riscv_force_temporary (dest, base);
++      riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
++      return;
++    }
++
++  src = force_const_mem (mode, src);
++
++  /* When using explicit relocs, constant pool references are sometimes
++     not legitimate addresses.  */
++  riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
++  riscv_emit_move (dest, src);
++}
++
++/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
++   sequence that is valid.  */
++
++bool
++riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
++{
++  if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
++    {
++      riscv_emit_move (dest, force_reg (mode, src));
++      return true;
++    }
++
++  /* We need to deal with constants that would be legitimate
++     immediate_operands but aren't legitimate move_operands.  */
++  if (CONSTANT_P (src) && !move_operand (src, mode))
++    {
++      riscv_legitimize_const_move (mode, dest, src);
++      set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
++      return true;
++    }
++  return false;
++}
++
++/* Return true if there is an instruction that implements CODE and accepts
++   X as an immediate operand. */
++
++static int
++riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
++{
++  switch (code)
++    {
++    case ASHIFT:
++    case ASHIFTRT:
++    case LSHIFTRT:
++      /* All shift counts are truncated to a valid constant.  */
++      return true;
++
++    case AND:
++    case IOR:
++    case XOR:
++    case PLUS:
++    case LT:
++    case LTU:
++      /* These instructions take 12-bit signed immediates.  */
++      return SMALL_OPERAND (x);
++
++    case LE:
++      /* We add 1 to the immediate and use SLT.  */
++      return SMALL_OPERAND (x + 1);
++
++    case LEU:
++      /* Likewise SLTU, but reject the always-true case.  */
++      return SMALL_OPERAND (x + 1) && x + 1 != 0;
++
++    case GE:
++    case GEU:
++      /* We can emulate an immediate of 1 by using GT/GTU against x0. */
++      return x == 1;
++
++    default:
++      /* By default assume that x0 can be used for 0.  */
++      return x == 0;
++    }
++}
++
++/* Return the cost of binary operation X, given that the instruction
++   sequence for a word-sized or smaller operation takes SIGNLE_INSNS
++   instructions and that the sequence of a double-word operation takes
++   DOUBLE_INSNS instructions.  */
++
++static int
++riscv_binary_cost (rtx x, int single_insns, int double_insns)
++{
++  if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
++    return COSTS_N_INSNS (double_insns);
++  return COSTS_N_INSNS (single_insns);
++}
++
++/* Return the cost of sign-extending OP to mode MODE, not including the
++   cost of OP itself.  */
++
++static int
++riscv_sign_extend_cost (enum machine_mode mode, rtx op)
++{
++  if (MEM_P (op))
++    /* Extended loads are as cheap as unextended ones.  */
++    return 0;
++
++  if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
++    /* A sign extension from SImode to DImode in 64-bit mode is free.  */
++    return 0;
++
++  /* We need to use a shift left and a shift right.  */
++  return COSTS_N_INSNS (2);
++}
++
++/* Return the cost of zero-extending OP to mode MODE, not including the
++   cost of OP itself.  */
++
++static int
++riscv_zero_extend_cost (enum machine_mode mode, rtx op)
++{
++  if (MEM_P (op))
++    /* Extended loads are as cheap as unextended ones.  */
++    return 0;
++
++  if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ||
++      ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode))
++    /* We need a shift left by 32 bits and a shift right by 32 bits.  */
++    return COSTS_N_INSNS (2);
++
++  /* We can use ANDI.  */
++  return COSTS_N_INSNS (1);
++}
++
++/* Implement TARGET_RTX_COSTS.  */
++
++static bool
++riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UNUSED,
++		 int *total, bool speed)
++{
++  int code = GET_CODE(x);
++  bool float_mode_p = FLOAT_MODE_P (mode);
++  int cost;
++
++  switch (code)
++    {
++    case CONST_INT:
++      if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
++	{
++	  *total = 0;
++	  return true;
++	}
++      /* Fall through.  */
++
++    case SYMBOL_REF:
++    case LABEL_REF:
++    case CONST_DOUBLE:
++    case CONST:
++      if (speed)
++	*total = 1;
++      else if ((cost = riscv_const_insns (x)) > 0)
++	*total = COSTS_N_INSNS (cost);
++      else /* The instruction will be fetched from the constant pool.  */
++	*total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
++      return true;
++
++    case MEM:
++      /* If the address is legitimate, return the number of
++	 instructions it needs.  */
++      if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
++	{
++	  *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
++	  return true;
++	}
++      /* Otherwise use the default handling.  */
++      return false;
++
++    case NOT:
++      *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
++      return false;
++
++    case AND:
++    case IOR:
++    case XOR:
++      /* Double-word operations use two single-word operations.  */
++      *total = riscv_binary_cost (x, 1, 2);
++      return false;
++
++    case ASHIFT:
++    case ASHIFTRT:
++    case LSHIFTRT:
++      *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
++      return false;
++
++    case ABS:
++      *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
++      return false;
++
++    case LO_SUM:
++      *total = set_src_cost (XEXP (x, 0), mode, speed);
++      return true;
++
++    case LT:
++    case LTU:
++    case LE:
++    case LEU:
++    case GT:
++    case GTU:
++    case GE:
++    case GEU:
++    case EQ:
++    case NE:
++    case UNORDERED:
++    case LTGT:
++      /* Branch comparisons have VOIDmode, so use the first operand's
++	 mode instead.  */
++      mode = GET_MODE (XEXP (x, 0));
++      if (float_mode_p)
++	*total = tune_info->fp_add[mode == DFmode];
++      else
++	*total = riscv_binary_cost (x, 1, 3);
++      return false;
++
++    case MINUS:
++      if (float_mode_p
++	  && !HONOR_NANS (mode)
++	  && !HONOR_SIGNED_ZEROS (mode))
++	{
++	  /* See if we can use NMADD or NMSUB.  See riscv.md for the
++	     associated patterns.  */
++	  rtx op0 = XEXP (x, 0);
++	  rtx op1 = XEXP (x, 1);
++	  if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
++	    {
++	      *total = (tune_info->fp_mul[mode == DFmode]
++			+ set_src_cost (XEXP (XEXP (op0, 0), 0), mode, speed)
++			+ set_src_cost (XEXP (op0, 1), mode, speed)
++			+ set_src_cost (op1, mode, speed));
++	      return true;
++	    }
++	  if (GET_CODE (op1) == MULT)
++	    {
++	      *total = (tune_info->fp_mul[mode == DFmode]
++			+ set_src_cost (op0, mode, speed)
++			+ set_src_cost (XEXP (op1, 0), mode, speed)
++			+ set_src_cost (XEXP (op1, 1), mode, speed));
++	      return true;
++	    }
++	}
++      /* Fall through.  */
++
++    case PLUS:
++      if (float_mode_p)
++	*total = tune_info->fp_add[mode == DFmode];
++      else
++	*total = riscv_binary_cost (x, 1, 4);
++      return false;
++
++    case NEG:
++      if (float_mode_p
++	  && !HONOR_NANS (mode)
++	  && HONOR_SIGNED_ZEROS (mode))
++	{
++	  /* See if we can use NMADD or NMSUB.  See riscv.md for the
++	     associated patterns.  */
++	  rtx op = XEXP (x, 0);
++	  if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
++	      && GET_CODE (XEXP (op, 0)) == MULT)
++	    {
++	      *total = (tune_info->fp_mul[mode == DFmode]
++			+ set_src_cost (XEXP (XEXP (op, 0), 0), mode, speed)
++			+ set_src_cost (XEXP (XEXP (op, 0), 1), mode, speed)
++			+ set_src_cost (XEXP (op, 1), mode, speed));
++	      return true;
++	    }
++	}
++
++      if (float_mode_p)
++	*total = tune_info->fp_add[mode == DFmode];
++      else
++	*total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
++      return false;
++
++    case MULT:
++      if (float_mode_p)
++	*total = tune_info->fp_mul[mode == DFmode];
++      else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++	*total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
++      else if (!speed)
++	*total = COSTS_N_INSNS (1);
++      else
++	*total = tune_info->int_mul[mode == DImode];
++      return false;
++
++    case DIV:
++    case SQRT:
++    case MOD:
++      if (float_mode_p)
++	{
++	  *total = tune_info->fp_div[mode == DFmode];
++	  return false;
++	}
++      /* Fall through.  */
++
++    case UDIV:
++    case UMOD:
++      if (speed)
++	*total = tune_info->int_div[mode == DImode];
++      else
++	*total = COSTS_N_INSNS (1);
++      return false;
++
++    case SIGN_EXTEND:
++      *total = riscv_sign_extend_cost (mode, XEXP (x, 0));
++      return false;
++
++    case ZERO_EXTEND:
++      *total = riscv_zero_extend_cost (mode, XEXP (x, 0));
++      return false;
++
++    case FLOAT:
++    case UNSIGNED_FLOAT:
++    case FIX:
++    case FLOAT_EXTEND:
++    case FLOAT_TRUNCATE:
++      *total = tune_info->fp_add[mode == DFmode];
++      return false;
++
++    case UNSPEC:
++      if (XINT (x, 1) == UNSPEC_AUIPC)
++	{
++	  /* Make AUIPC cheap to avoid spilling its result to the stack.  */
++	  *total = 1;
++	  return true;
++	}
++      return false;
++
++    default:
++      return false;
++    }
++}
++
++/* Implement TARGET_ADDRESS_COST.  */
++
++static int
++riscv_address_cost (rtx addr, enum machine_mode mode,
++		    addr_space_t as ATTRIBUTE_UNUSED,
++		    bool speed ATTRIBUTE_UNUSED)
++{
++  return riscv_address_insns (addr, mode, false);
++}
++
++/* Return one word of double-word value OP.  HIGH_P is true to select the
++   high part or false to select the low part. */
++
++rtx
++riscv_subword (rtx op, bool high_p)
++{
++  unsigned int byte;
++  enum machine_mode mode;
++
++  mode = GET_MODE (op);
++  if (mode == VOIDmode)
++    mode = TARGET_64BIT ? TImode : DImode;
++
++  byte = high_p ? UNITS_PER_WORD : 0;
++
++  if (FP_REG_RTX_P (op))
++    return gen_rtx_REG (word_mode, REGNO (op) + high_p);
++
++  if (MEM_P (op))
++    return adjust_address (op, word_mode, byte);
++
++  return simplify_gen_subreg (word_mode, op, mode, byte);
++}
++
++/* Return true if a 64-bit move from SRC to DEST should be split into two.  */
++
++bool
++riscv_split_64bit_move_p (rtx dest, rtx src)
++{
++  /* All 64b moves are legal in 64b mode.  All 64b FPR <-> FPR and
++     FPR <-> MEM moves are legal in 32b mode, too.  Although
++     FPR <-> GPR moves are not available in general in 32b mode,
++     we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */
++  return !(TARGET_64BIT
++	   || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
++	   || (FP_REG_RTX_P (dest) && MEM_P (src))
++	   || (FP_REG_RTX_P (src) && MEM_P (dest))
++	   || (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src))));
++}
++
++/* Split a doubleword move from SRC to DEST.  On 32-bit targets,
++   this function handles 64-bit moves for which riscv_split_64bit_move_p
++   holds.  For 64-bit targets, this function handles 128-bit moves.  */
++
++void
++riscv_split_doubleword_move (rtx dest, rtx src)
++{
++  rtx low_dest;
++
++   /* The operation can be split into two normal moves.  Decide in
++      which order to do them.  */
++   low_dest = riscv_subword (dest, false);
++   if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
++     {
++       riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
++       riscv_emit_move (low_dest, riscv_subword (src, false));
++     }
++   else
++     {
++       riscv_emit_move (low_dest, riscv_subword (src, false));
++       riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
++     }
++}
++

++/* Return the appropriate instructions to move SRC into DEST.  Assume
++   that SRC is operand 1 and DEST is operand 0.  */
++
++const char *
++riscv_output_move (rtx dest, rtx src)
++{
++  enum rtx_code dest_code, src_code;
++  enum machine_mode mode;
++  bool dbl_p;
++
++  dest_code = GET_CODE (dest);
++  src_code = GET_CODE (src);
++  mode = GET_MODE (dest);
++  dbl_p = (GET_MODE_SIZE (mode) == 8);
++
++  if (dbl_p && riscv_split_64bit_move_p (dest, src))
++    return "#";
++
++  if (dest_code == REG && GP_REG_P (REGNO (dest)))
++    {
++      if (src_code == REG && FP_REG_P (REGNO (src)))
++	return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
++
++      if (src_code == MEM)
++	switch (GET_MODE_SIZE (mode))
++	  {
++	  case 1: return "lbu\t%0,%1";
++	  case 2: return "lhu\t%0,%1";
++	  case 4: return "lw\t%0,%1";
++	  case 8: return "ld\t%0,%1";
++	  }
++
++      if (src_code == CONST_INT)
++	return "li\t%0,%1";
++
++      if (src_code == HIGH)
++	return "lui\t%0,%h1";
++
++      if (symbolic_operand (src, VOIDmode))
++	switch (riscv_classify_symbolic_expression (src))
++	  {
++	  case SYMBOL_GOT_DISP: return "la\t%0,%1";
++	  case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
++	  case SYMBOL_PCREL: return "lla\t%0,%1";
++	  default: gcc_unreachable();
++	  }
++    }
++  if ((src_code == REG && GP_REG_P (REGNO (src)))
++      || (src == CONST0_RTX (mode)))
++    {
++      if (dest_code == REG)
++	{
++	  if (GP_REG_P (REGNO (dest)))
++	    return "mv\t%0,%z1";
++
++	  if (FP_REG_P (REGNO (dest)))
++	    {
++	      if (!dbl_p)
++		return "fmv.s.x\t%0,%z1";
++	      if (TARGET_64BIT)
++		return "fmv.d.x\t%0,%z1";
++	      /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
++	      gcc_assert (src == CONST0_RTX (mode));
++	      return "fcvt.d.w\t%0,x0";
++	    }
++	}
++      if (dest_code == MEM)
++	switch (GET_MODE_SIZE (mode))
++	  {
++	  case 1: return "sb\t%z1,%0";
++	  case 2: return "sh\t%z1,%0";
++	  case 4: return "sw\t%z1,%0";
++	  case 8: return "sd\t%z1,%0";
++	  }
++    }
++  if (src_code == REG && FP_REG_P (REGNO (src)))
++    {
++      if (dest_code == REG && FP_REG_P (REGNO (dest)))
++	return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
++
++      if (dest_code == MEM)
++	return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
++    }
++  if (dest_code == REG && FP_REG_P (REGNO (dest)))
++    {
++      if (src_code == MEM)
++	return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
++    }
++  gcc_unreachable ();
++}
++

++/* Return true if CMP1 is a suitable second operand for integer ordering
++   test CODE.  See also the *sCC patterns in riscv.md.  */
++
++static bool
++riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
++{
++  switch (code)
++    {
++    case GT:
++    case GTU:
++      return reg_or_0_operand (cmp1, VOIDmode);
++
++    case GE:
++    case GEU:
++      return cmp1 == const1_rtx;
++
++    case LT:
++    case LTU:
++      return arith_operand (cmp1, VOIDmode);
++
++    case LE:
++      return sle_operand (cmp1, VOIDmode);
++
++    case LEU:
++      return sleu_operand (cmp1, VOIDmode);
++
++    default:
++      gcc_unreachable ();
++    }
++}
++
++/* Return true if *CMP1 (of mode MODE) is a valid second operand for
++   integer ordering test *CODE, or if an equivalent combination can
++   be formed by adjusting *CODE and *CMP1.  When returning true, update
++   *CODE and *CMP1 with the chosen code and operand, otherwise leave
++   them alone.  */
++
++static bool
++riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
++				  enum machine_mode mode)
++{
++  HOST_WIDE_INT plus_one;
++
++  if (riscv_int_order_operand_ok_p (*code, *cmp1))
++    return true;
++
++  if (CONST_INT_P (*cmp1))
++    switch (*code)
++      {
++      case LE:
++	plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
++	if (INTVAL (*cmp1) < plus_one)
++	  {
++	    *code = LT;
++	    *cmp1 = force_reg (mode, GEN_INT (plus_one));
++	    return true;
++	  }
++	break;
++
++      case LEU:
++	plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
++	if (plus_one != 0)
++	  {
++	    *code = LTU;
++	    *cmp1 = force_reg (mode, GEN_INT (plus_one));
++	    return true;
++	  }
++	break;
++
++      default:
++	break;
++      }
++  return false;
++}
++
++/* Compare CMP0 and CMP1 using ordering test CODE and store the result
++   in TARGET.  CMP0 and TARGET are register_operands.  If INVERT_PTR
++   is nonnull, it's OK to set TARGET to the inverse of the result and
++   flip *INVERT_PTR instead.  */
++
++static void
++riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
++			  rtx target, rtx cmp0, rtx cmp1)
++{
++  enum machine_mode mode;
++
++  /* First see if there is a RISCV instruction that can do this operation.
++     If not, try doing the same for the inverse operation.  If that also
++     fails, force CMP1 into a register and try again.  */
++  mode = GET_MODE (cmp0);
++  if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
++    riscv_emit_binary (code, target, cmp0, cmp1);
++  else
++    {
++      enum rtx_code inv_code = reverse_condition (code);
++      if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
++	{
++	  cmp1 = force_reg (mode, cmp1);
++	  riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
++	}
++      else if (invert_ptr == 0)
++	{
++	  rtx inv_target;
++
++	  inv_target = riscv_force_binary (GET_MODE (target),
++					  inv_code, cmp0, cmp1);
++	  riscv_emit_binary (XOR, target, inv_target, const1_rtx);
++	}
++      else
++	{
++	  *invert_ptr = !*invert_ptr;
++	  riscv_emit_binary (inv_code, target, cmp0, cmp1);
++	}
++    }
++}
++
++/* Return a register that is zero iff CMP0 and CMP1 are equal.
++   The register will have the same mode as CMP0.  */
++
++static rtx
++riscv_zero_if_equal (rtx cmp0, rtx cmp1)
++{
++  if (cmp1 == const0_rtx)
++    return cmp0;
++
++  return expand_binop (GET_MODE (cmp0), sub_optab,
++		       cmp0, cmp1, 0, 0, OPTAB_DIRECT);
++}
++
++/* Return false if we can easily emit code for the FP comparison specified
++   by *CODE.  If not, set *CODE to its inverse and return true. */
++
++static bool
++riscv_reversed_fp_cond (enum rtx_code *code)
++{
++  switch (*code)
++    {
++    case EQ:
++    case LT:
++    case LE:
++    case GT:
++    case GE:
++    case LTGT:
++    case ORDERED:
++      /* We know how to emit code for these cases... */
++      return false;
++
++    default:
++      /* ...but we must invert these and rely on the others. */
++      *code = reverse_condition_maybe_unordered (*code);
++      return true;
++    }
++}
++
++/* Convert a comparison into something that can be used in a branch or
++   conditional move.  On entry, *OP0 and *OP1 are the values being
++   compared and *CODE is the code used to compare them.
++
++   Update *CODE, *OP0 and *OP1 so that they describe the final comparison. */
++
++static void
++riscv_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1)
++{
++  rtx cmp_op0 = *op0;
++  rtx cmp_op1 = *op1;
++
++  if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
++    {
++      if (splittable_const_int_operand (cmp_op1, VOIDmode))
++	{
++	  HOST_WIDE_INT rhs = INTVAL (cmp_op1), new_rhs;
++	  enum rtx_code new_code;
++
++	  switch (*code)
++	    {
++	    case LTU: new_rhs = rhs - 1; new_code = LEU; goto try_new_rhs;
++	    case LEU: new_rhs = rhs + 1; new_code = LTU; goto try_new_rhs;
++	    case GTU: new_rhs = rhs + 1; new_code = GEU; goto try_new_rhs;
++	    case GEU: new_rhs = rhs - 1; new_code = GTU; goto try_new_rhs;
++	    case LT: new_rhs = rhs - 1; new_code = LE; goto try_new_rhs;
++	    case LE: new_rhs = rhs + 1; new_code = LT; goto try_new_rhs;
++	    case GT: new_rhs = rhs + 1; new_code = GE; goto try_new_rhs;
++	    case GE: new_rhs = rhs - 1; new_code = GT;
++	    try_new_rhs:
++	      /* Convert e.g. OP0 > 4095 into OP0 >= 4096.  */
++	      if ((rhs < 0) == (new_rhs < 0)
++		  && riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs))
++		{
++		  *op1 = GEN_INT (new_rhs);
++		  *code = new_code;
++		}
++	      break;
++
++	    case EQ:
++	    case NE:
++	      /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0.  */
++	      if (SMALL_OPERAND (-rhs))
++		{
++		  *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
++		  riscv_emit_binary (PLUS, *op0, cmp_op0, GEN_INT (-rhs));
++		  *op1 = const0_rtx;
++		}
++	    default:
++	      break;
++	    }
++	}
++
++      if (*op1 != const0_rtx)
++	*op1 = force_reg (GET_MODE (cmp_op0), *op1);
++    }
++  else
++    {
++      /* For FP comparisons, set an integer register with the result of the
++	 comparison, then branch on it. */
++      rtx tmp0, tmp1, final_op;
++      enum rtx_code fp_code = *code;
++      *code = riscv_reversed_fp_cond (&fp_code) ? EQ : NE;
++
++      switch (fp_code)
++	{
++	case ORDERED:
++	  /* a == a && b == b */
++	  tmp0 = gen_reg_rtx (SImode);
++	  riscv_emit_binary (EQ, tmp0, cmp_op0, cmp_op0);
++	  tmp1 = gen_reg_rtx (SImode);
++	  riscv_emit_binary (EQ, tmp1, cmp_op1, cmp_op1);
++	  final_op = gen_reg_rtx (SImode);
++	  riscv_emit_binary (AND, final_op, tmp0, tmp1);
++	  break;
++
++	case LTGT:
++	  /* a < b || a > b */
++	  tmp0 = gen_reg_rtx (SImode);
++	  riscv_emit_binary (LT, tmp0, cmp_op0, cmp_op1);
++	  tmp1 = gen_reg_rtx (SImode);
++	  riscv_emit_binary (GT, tmp1, cmp_op0, cmp_op1);
++	  final_op = gen_reg_rtx (SImode);
++	  riscv_emit_binary (IOR, final_op, tmp0, tmp1);
++	  break;
++
++	case EQ:
++	case LE:
++	case LT:
++	case GE:
++	case GT:
++	  /* We have instructions for these cases. */
++	  final_op = gen_reg_rtx (SImode);
++	  riscv_emit_binary (fp_code, final_op, cmp_op0, cmp_op1);
++	  break;
++
++	default:
++	  gcc_unreachable ();
++	}
++
++      /* Compare the binary result against 0. */
++      *op0 = final_op;
++      *op1 = const0_rtx;
++    }
++}
++
++/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
++   and OPERAND[3].  Store the result in OPERANDS[0].
++
++   On 64-bit targets, the mode of the comparison and target will always be
++   SImode, thus possibly narrower than that of the comparison's operands.  */
++
++void
++riscv_expand_scc (rtx operands[])
++{
++  rtx target = operands[0];
++  enum rtx_code code = GET_CODE (operands[1]);
++  rtx op0 = operands[2];
++  rtx op1 = operands[3];
++
++  gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
++
++  if (code == EQ || code == NE)
++    {
++      rtx zie = riscv_zero_if_equal (op0, op1);
++      riscv_emit_binary (code, target, zie, const0_rtx);
++    }
++  else
++    riscv_emit_int_order_test (code, 0, target, op0, op1);
++}
++
++/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
++   CODE and jump to OPERANDS[3] if the condition holds.  */
++
++void
++riscv_expand_conditional_branch (rtx *operands)
++{
++  enum rtx_code code = GET_CODE (operands[0]);
++  rtx op0 = operands[1];
++  rtx op1 = operands[2];
++  rtx condition;
++
++  riscv_emit_compare (&code, &op0, &op1);
++  condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
++  emit_jump_insn (gen_condjump (condition, operands[3]));
++}
++
++/* Implement TARGET_FUNCTION_ARG_BOUNDARY.  Every parameter gets at
++   least PARM_BOUNDARY bits of alignment, but will be given anything up
++   to STACK_BOUNDARY bits if the type requires it.  */
++
++static unsigned int
++riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
++{
++  unsigned int alignment;
++
++  /* Use natural alignment if the type is not aggregate data.  */
++  if (type && !AGGREGATE_TYPE_P (type))
++    alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type));
++  else
++    alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
++
++  if (alignment < PARM_BOUNDARY)
++    alignment = PARM_BOUNDARY;
++  if (alignment > STACK_BOUNDARY)
++    alignment = STACK_BOUNDARY;
++  return alignment;
++}
++
++/* Fill INFO with information about a single argument.  CUM is the
++   cumulative state for earlier arguments.  MODE is the mode of this
++   argument and TYPE is its type (if known).  NAMED is true if this
++   is a named (fixed) argument rather than a variable one.  */
++
++static void
++riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
++		   enum machine_mode mode, const_tree type, bool named)
++{
++  bool doubleword_aligned_p;
++  unsigned int num_bytes, num_words, max_regs;
++
++  /* Work out the size of the argument.  */
++  num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
++  num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++
++  /* Scalar, complex and vector floating-point types are passed in
++     floating-point registers, as long as this is a named rather
++     than a variable argument.  */
++  info->fpr_p = (named
++		 && (type == 0 || FLOAT_TYPE_P (type))
++		 && (GET_MODE_CLASS (mode) == MODE_FLOAT
++		     || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
++		     || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
++		 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG);
++
++  /* Complex floats should only go into FPRs if there are two FPRs free,
++     otherwise they should be passed in the same way as a struct
++     containing two floats.  */
++  if (info->fpr_p
++      && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
++      && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FP_ARG)
++    {
++      if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
++        info->fpr_p = false;
++      else
++        num_words = 2;
++    }
++
++  /* See whether the argument has doubleword alignment,
++     and do not align for zero size type.   */
++  doubleword_aligned_p = (riscv_function_arg_boundary (mode, type)
++			  > BITS_PER_WORD)
++			 && (num_bytes != 0);
++
++  /* Set REG_OFFSET to the register count we're interested in.
++     The EABI allocates the floating-point registers separately,
++     but the other ABIs allocate them like integer registers.  */
++  info->reg_offset = cum->num_gprs;
++
++  /* Advance to an even register if the argument is doubleword-aligned.  */
++  if (doubleword_aligned_p)
++    info->reg_offset += info->reg_offset & 1;
++
++  /* Work out the offset of a stack argument.  */
++  info->stack_offset = cum->stack_words;
++  if (doubleword_aligned_p)
++    info->stack_offset += info->stack_offset & 1;
++
++  max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
++
++  /* Partition the argument between registers and stack.  */
++  info->reg_words = MIN (num_words, max_regs);
++  info->stack_words = num_words - info->reg_words;
++}
++
++/* INFO describes a register argument that has the normal format for the
++   argument's mode.  Return the register it uses.  */
++
++static unsigned int
++riscv_arg_regno (const struct riscv_arg_info *info)
++{
++  if (!info->fpr_p || riscv_float_abi == FLOAT_ABI_SOFT)
++    return GP_ARG_FIRST + info->reg_offset;
++  else
++    return FP_ARG_FIRST + info->reg_offset;
++}
++
++/* Implement TARGET_FUNCTION_ARG.  */
++
++static rtx
++riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
++		    const_tree type, bool named)
++{
++  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++  struct riscv_arg_info info;
++
++  if (mode == VOIDmode)
++    return NULL;
++
++  riscv_get_arg_info (&info, cum, mode, type, named);
++
++  /* Return straight away if the whole argument is passed on the stack.  */
++  if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
++    return NULL;
++
++  /* If any XLEN-bit chunk of a structure contains an XLEN-bit floating-point
++     number in its entirety, and the floating-point ABI can return XLEN-bit
++     values in FPRs, then pass the chunk in an FPR.  */
++  if (named
++      && type != 0
++      && TREE_CODE (type) == RECORD_TYPE
++      && TYPE_SIZE_UNIT (type)
++      && UNITS_PER_FP_ARG >= UNITS_PER_WORD
++      && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
++    {
++      enum machine_mode fmode = TARGET_64BIT ? DFmode : SFmode;
++      enum machine_mode imode = TARGET_64BIT ? DImode : SImode;
++      tree field;
++
++      /* First check to see if there is any such field.  */
++      for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
++	if (TREE_CODE (field) == FIELD_DECL
++	    && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
++	    && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
++	    && tree_fits_shwi_p (bit_position (field))
++	    && int_bit_position (field) % BITS_PER_WORD == 0)
++	  break;
++
++      if (field != 0)
++	{
++	  /* Now handle the special case by returning a PARALLEL
++	     indicating where each 64-bit chunk goes.  INFO.REG_WORDS
++	     chunks are passed in registers.  */
++	  unsigned int i;
++	  HOST_WIDE_INT bitpos;
++	  rtx ret;
++
++	  /* assign_parms checks the mode of ENTRY_PARM, so we must
++	     use the actual mode here.  */
++	  ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
++
++	  bitpos = 0;
++	  field = TYPE_FIELDS (type);
++	  for (i = 0; i < info.reg_words; i++)
++	    {
++	      rtx reg;
++
++	      for (; field; field = DECL_CHAIN (field))
++		if (TREE_CODE (field) == FIELD_DECL
++		    && int_bit_position (field) >= bitpos)
++		  break;
++
++	      if (field
++		  && int_bit_position (field) == bitpos
++		  && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
++		  && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
++		reg = gen_rtx_REG (fmode, FP_ARG_FIRST + info.reg_offset + i);
++	      else
++		reg = gen_rtx_REG (imode, GP_ARG_FIRST + info.reg_offset + i);
++
++	      XVECEXP (ret, 0, i)
++		= gen_rtx_EXPR_LIST (VOIDmode, reg,
++				     GEN_INT (bitpos / BITS_PER_UNIT));
++
++	      bitpos += BITS_PER_WORD;
++	    }
++	  return ret;
++	}
++    }
++
++  /* Pass complex floating-point arguments in FPR pairs, with the real part
++     in the lower register and the imaginary part in the upper register.  */
++  if (info.fpr_p && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++    {
++      rtx real, imag;
++      enum machine_mode inner = GET_MODE_INNER (mode);
++      unsigned int regno = FP_ARG_FIRST + info.reg_offset;
++
++      gcc_assert (info.stack_words == 0 && info.reg_words == 2);
++      real = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (inner, regno),
++				const0_rtx);
++      imag = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (inner, regno + 1),
++				GEN_INT (GET_MODE_SIZE (inner)));
++      return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
++    }
++
++  return gen_rtx_REG (mode, riscv_arg_regno (&info));
++}
++
++/* Implement TARGET_FUNCTION_ARG_ADVANCE.  */
++
++static void
++riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
++			    const_tree type, bool named)
++{
++  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++  struct riscv_arg_info info;
++
++  riscv_get_arg_info (&info, cum, mode, type, named);
++
++  /* Advance the register count.  This has the effect of setting
++     num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
++     argument required us to skip the final GPR and pass the whole
++     argument on the stack.  */
++  cum->num_gprs = info.reg_offset + info.reg_words;
++
++  /* Advance the stack word count.  */
++  if (info.stack_words > 0)
++    cum->stack_words = info.stack_offset + info.stack_words;
++}
++
++/* Implement TARGET_ARG_PARTIAL_BYTES.  */
++
++static int
++riscv_arg_partial_bytes (cumulative_args_t cum,
++			 enum machine_mode mode, tree type, bool named)
++{
++  struct riscv_arg_info info;
++
++  riscv_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
++  return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
++}
++
++/* See whether VALTYPE is a record whose fields should be returned in
++   floating-point registers.  If so, return the number of fields and
++   list them in FIELDS (which should have two elements).  Return 0
++   otherwise.
++
++   For n32 & n64, a structure with one or two fields is returned in
++   floating-point registers as long as every field has a floating-point
++   type.  */
++
++static int
++riscv_fpr_return_fields (const_tree valtype, tree fields[2])
++{
++  tree field;
++  int i;
++
++  if (TREE_CODE (valtype) != RECORD_TYPE)
++    return 0;
++
++  i = 0;
++  for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
++    {
++      if (TREE_CODE (field) != FIELD_DECL)
++	continue;
++
++      if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
++	return 0;
++
++      if (i == 2)
++	return 0;
++
++      fields[i++] = field;
++    }
++  return i;
++}
++
++/* Return true if the function return value MODE will get returned in a
++   floating-point register.  */
++
++static bool
++riscv_return_mode_in_fpr_p (enum machine_mode mode)
++{
++  return ((GET_MODE_CLASS (mode) == MODE_FLOAT
++	   || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
++	   || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++	  && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG);
++}
++
++/* Return the representation of an FPR return register when the
++   value being returned in FP_RETURN has mode VALUE_MODE and the
++   return type itself has mode TYPE_MODE.  On NewABI targets,
++   the two modes may be different for structures like:
++
++       struct __attribute__((packed)) foo { float f; }
++
++   where we return the SFmode value of "f" in FP_RETURN, but where
++   the structure itself has mode BLKmode.  */
++
++static rtx
++riscv_return_fpr_single (enum machine_mode type_mode,
++			enum machine_mode value_mode)
++{
++  rtx x;
++
++  x = gen_rtx_REG (value_mode, FP_RETURN);
++  if (type_mode != value_mode)
++    {
++      x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
++      x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
++    }
++  return x;
++}
++
++/* Return a composite value in a pair of floating-point registers.
++   MODE1 and OFFSET1 are the mode and byte offset for the first value,
++   likewise MODE2 and OFFSET2 for the second.  MODE is the mode of the
++   complete value.  */
++
++static rtx
++riscv_return_fpr_pair (enum machine_mode mode,
++		      enum machine_mode mode1, HOST_WIDE_INT offset1,
++		      enum machine_mode mode2, HOST_WIDE_INT offset2)
++{
++  return gen_rtx_PARALLEL
++    (mode,
++     gen_rtvec (2,
++		gen_rtx_EXPR_LIST (VOIDmode,
++				   gen_rtx_REG (mode1, FP_RETURN),
++				   GEN_INT (offset1)),
++		gen_rtx_EXPR_LIST (VOIDmode,
++				   gen_rtx_REG (mode2, FP_RETURN + 1),
++				   GEN_INT (offset2))));
++
++}
++
++/* Implement FUNCTION_VALUE and LIBCALL_VALUE.  For normal calls,
++   VALTYPE is the return type and MODE is VOIDmode.  For libcalls,
++   VALTYPE is null and MODE is the mode of the return value.  */
++
++rtx
++riscv_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
++{
++  if (valtype)
++    {
++      tree fields[2];
++      int unsigned_p;
++
++      mode = TYPE_MODE (valtype);
++      unsigned_p = TYPE_UNSIGNED (valtype);
++
++      /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
++	 return values, promote the mode here too.  */
++      mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
++
++      /* Handle structures whose fields are returned in fa0/fa1.  */
++      switch (riscv_fpr_return_fields (valtype, fields))
++	{
++	case 1:
++	  return riscv_return_fpr_single (mode,
++					 TYPE_MODE (TREE_TYPE (fields[0])));
++
++	case 2:
++	  return riscv_return_fpr_pair (mode,
++				       TYPE_MODE (TREE_TYPE (fields[0])),
++				       int_byte_position (fields[0]),
++				       TYPE_MODE (TREE_TYPE (fields[1])),
++				       int_byte_position (fields[1]));
++	}
++
++      /* Only use FPRs for scalar, complex or vector types.  */
++      if (!FLOAT_TYPE_P (valtype))
++	return gen_rtx_REG (mode, GP_RETURN);
++    }
++
++  if (riscv_return_mode_in_fpr_p (mode))
++    {
++      if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++        return riscv_return_fpr_pair (mode,
++    				 GET_MODE_INNER (mode), 0,
++    				 GET_MODE_INNER (mode),
++    				 GET_MODE_SIZE (mode) / 2);
++      else
++        return gen_rtx_REG (mode, FP_RETURN);
++    }
++
++  return gen_rtx_REG (mode, GP_RETURN);
++}
++
++/* Implement TARGET_RETURN_IN_MEMORY.  Scalars and small structures
++   that fit in two registers are returned in a0/a1. */
++
++static bool
++riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
++{
++  /* TFmode alwyas pass by reference.  */
++  if (TYPE_MODE (type) == TFmode)
++    {
++      return true;
++    }
++
++  if (TREE_CODE (type) == RECORD_TYPE)
++    {
++      tree field;
++      /* Check if this struc only TFmode, then it's still pass in memory.  */
++      for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
++	if (TREE_CODE (field) == FIELD_DECL
++	    && !error_operand_p (field)
++	    && TYPE_MODE (TREE_TYPE (field)) == TFmode)
++	  return true;
++    }
++
++  return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD);
++}
++
++/* Implement TARGET_PASS_BY_REFERENCE. */
++
++static bool
++riscv_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
++			 enum machine_mode mode, const_tree type,
++			 bool named ATTRIBUTE_UNUSED)
++{
++  /* TFmode alwyas pass by reference.  */
++  if (mode == TFmode)
++    {
++      return true;
++    }
++  if (type && riscv_return_in_memory (type, NULL_TREE))
++    return true;
++  return targetm.calls.must_pass_in_stack (mode, type);
++}
++
++/* Implement TARGET_SETUP_INCOMING_VARARGS.  */
++
++static void
++riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
++			     tree type, int *pretend_size ATTRIBUTE_UNUSED,
++			     int no_rtl)
++{
++  CUMULATIVE_ARGS local_cum;
++  int gp_saved;
++
++  /* The caller has advanced CUM up to, but not beyond, the last named
++     argument.  Advance a local copy of CUM past the last "real" named
++     argument, to find out how many registers are left over.  */
++  local_cum = *get_cumulative_args (cum);
++  riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
++
++  /* Found out how many registers we need to save.  */
++  gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
++
++  if (!no_rtl && gp_saved > 0)
++    {
++      rtx ptr, mem;
++
++      ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
++			   REG_PARM_STACK_SPACE (cfun->decl)
++			   - gp_saved * UNITS_PER_WORD);
++      mem = gen_frame_mem (BLKmode, ptr);
++      set_mem_alias_set (mem, get_varargs_alias_set ());
++
++      move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
++			   mem, gp_saved);
++    }
++  if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
++    cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
++}
++
++/* Implement TARGET_EXPAND_BUILTIN_VA_START.  */
++
++static void
++riscv_va_start (tree valist, rtx nextarg)
++{
++  nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
++  std_expand_builtin_va_start (valist, nextarg);
++}
++
++/* Expand a call of type TYPE.  RESULT is where the result will go (null
++   for "call"s and "sibcall"s), ADDR is the address of the function,
++   ARGS_SIZE is the size of the arguments and AUX is the value passed
++   to us by riscv_function_arg.  Return the call itself.  */
++
++rtx
++riscv_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size)
++{
++  rtx pattern;
++
++  if (!call_insn_operand (addr, VOIDmode))
++    {
++      rtx reg = RISCV_PROLOGUE_TEMP (Pmode);
++      riscv_emit_move (reg, addr);
++      addr = reg;
++    }
++
++  if (result == 0)
++    {
++      rtx (*fn) (rtx, rtx);
++
++      if (sibcall_p)
++	fn = gen_sibcall_internal;
++      else
++	fn = gen_call_internal;
++
++      pattern = fn (addr, args_size);
++    }
++  else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
++    {
++      /* Handle return values created by riscv_return_fpr_pair.  */
++      rtx (*fn) (rtx, rtx, rtx, rtx);
++      rtx reg1, reg2;
++
++      if (sibcall_p)
++	fn = gen_sibcall_value_multiple_internal;
++      else
++	fn = gen_call_value_multiple_internal;
++
++      reg1 = XEXP (XVECEXP (result, 0, 0), 0);
++      reg2 = XEXP (XVECEXP (result, 0, 1), 0);
++      pattern = fn (reg1, addr, args_size, reg2);
++    }
++  else
++    {
++      rtx (*fn) (rtx, rtx, rtx);
++
++      if (sibcall_p)
++	fn = gen_sibcall_value_internal;
++      else
++	fn = gen_call_value_internal;
++
++      /* Handle return values created by riscv_return_fpr_single.  */
++      if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
++	result = XEXP (XVECEXP (result, 0, 0), 0);
++      pattern = fn (result, addr, args_size);
++    }
++
++  return emit_call_insn (pattern);
++}
++
++/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
++   Assume that the areas do not overlap.  */
++
++static void
++riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
++{
++  HOST_WIDE_INT offset, delta;
++  unsigned HOST_WIDE_INT bits;
++  int i;
++  enum machine_mode mode;
++  rtx *regs;
++
++  bits = MAX( BITS_PER_UNIT,
++             MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) );
++
++  mode = mode_for_size (bits, MODE_INT, 0);
++  delta = bits / BITS_PER_UNIT;
++
++  /* Allocate a buffer for the temporary registers.  */
++  regs = XALLOCAVEC (rtx, length / delta);
++
++  /* Load as many BITS-sized chunks as possible.  Use a normal load if
++     the source has enough alignment, otherwise use left/right pairs.  */
++  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
++    {
++      regs[i] = gen_reg_rtx (mode);
++	riscv_emit_move (regs[i], adjust_address (src, mode, offset));
++    }
++
++  /* Copy the chunks to the destination.  */
++  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
++      riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
++
++  /* Mop up any left-over bytes.  */
++  if (offset < length)
++    {
++      src = adjust_address (src, BLKmode, offset);
++      dest = adjust_address (dest, BLKmode, offset);
++      move_by_pieces (dest, src, length - offset,
++		      MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
++    }
++}
++
++/* Helper function for doing a loop-based block operation on memory
++   reference MEM.  Each iteration of the loop will operate on LENGTH
++   bytes of MEM.
++
++   Create a new base register for use within the loop and point it to
++   the start of MEM.  Create a new memory reference that uses this
++   register.  Store them in *LOOP_REG and *LOOP_MEM respectively.  */
++
++static void
++riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
++		       rtx *loop_reg, rtx *loop_mem)
++{
++  *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
++
++  /* Although the new mem does not refer to a known location,
++     it does keep up to LENGTH bytes of alignment.  */
++  *loop_mem = change_address (mem, BLKmode, *loop_reg);
++  set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
++}
++
++/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
++   bytes at a time.  LENGTH must be at least BYTES_PER_ITER.  Assume that
++   the memory regions do not overlap.  */
++
++static void
++riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
++		      HOST_WIDE_INT bytes_per_iter)
++{
++  rtx label, src_reg, dest_reg, final_src, test;
++  HOST_WIDE_INT leftover;
++
++  leftover = length % bytes_per_iter;
++  length -= leftover;
++
++  /* Create registers and memory references for use within the loop.  */
++  riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
++  riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
++
++  /* Calculate the value that SRC_REG should have after the last iteration
++     of the loop.  */
++  final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
++				   0, 0, OPTAB_WIDEN);
++
++  /* Emit the start of the loop.  */
++  label = gen_label_rtx ();
++  emit_label (label);
++
++  /* Emit the loop body.  */
++  riscv_block_move_straight (dest, src, bytes_per_iter);
++
++  /* Move on to the next block.  */
++  riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
++  riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
++
++  /* Emit the loop condition.  */
++  test = gen_rtx_NE (VOIDmode, src_reg, final_src);
++  if (Pmode == DImode)
++    emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
++  else
++    emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
++
++  /* Mop up any left-over bytes.  */
++  if (leftover)
++    riscv_block_move_straight (dest, src, leftover);
++}
++
++/* Expand a movmemsi instruction, which copies LENGTH bytes from
++   memory reference SRC to memory reference DEST.  */
++
++bool
++riscv_expand_block_move (rtx dest, rtx src, rtx length)
++{
++  if (CONST_INT_P (length))
++    {
++      HOST_WIDE_INT factor, align;
++      
++      align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
++      factor = BITS_PER_WORD / align;
++
++      if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
++	{
++	  riscv_block_move_straight (dest, src, INTVAL (length));
++	  return true;
++	}
++      else if (optimize && align >= BITS_PER_WORD)
++	{
++	  riscv_block_move_loop (dest, src, INTVAL (length),
++				RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / factor);
++	  return true;
++	}
++    }
++  return false;
++}
++
++/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
++   in context CONTEXT.  HI_RELOC indicates a high-part reloc.  */
++
++static void
++riscv_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
++{
++  const char *reloc;
++
++  switch (riscv_classify_symbolic_expression (op))
++    {
++      case SYMBOL_ABSOLUTE:
++	reloc = hi_reloc ? "%hi" : "%lo";
++	break;
++
++      case SYMBOL_PCREL:
++	reloc = hi_reloc ? "%pcrel_hi" : "%pcrel_lo";
++	break;
++
++      case SYMBOL_TLS_LE:
++	reloc = hi_reloc ? "%tprel_hi" : "%tprel_lo";
++	break;
++
++      default:
++	gcc_unreachable ();
++    }
++
++  fprintf (file, "%s(", reloc);
++  output_addr_const (file, riscv_strip_unspec_address (op));
++  fputc (')', file);
++}
++
++static const char *
++riscv_memory_model_suffix (enum memmodel model)
++{
++  switch (model)
++    {
++      case MEMMODEL_ACQ_REL:
++      case MEMMODEL_SEQ_CST:
++      case MEMMODEL_SYNC_SEQ_CST:
++	return ".sc";
++      case MEMMODEL_ACQUIRE:
++      case MEMMODEL_CONSUME:
++      case MEMMODEL_SYNC_ACQUIRE:
++	return ".aq";
++      case MEMMODEL_RELEASE:
++      case MEMMODEL_SYNC_RELEASE:
++	return ".rl";
++      case MEMMODEL_RELAXED:
++	return "";
++      default:
++        gcc_unreachable();
++    }
++}
++
++/* Implement TARGET_PRINT_OPERAND.  The RISCV-specific operand codes are:
++
++   'h'	Print the high-part relocation associated with OP, after stripping
++	  any outermost HIGH.
++   'R'	Print the low-part relocation associated with OP.
++   'C'	Print the integer branch condition for comparison OP.
++   'A'	Print the atomic operation suffix for memory model OP.
++   'z'	Print $0 if OP is zero, otherwise print OP normally.  */
++
++static void
++riscv_print_operand (FILE *file, rtx op, int letter)
++{
++  enum machine_mode mode = GET_MODE(op);
++  enum rtx_code code;
++
++  gcc_assert (op);
++  code = GET_CODE (op);
++
++  switch (letter)
++    {
++    case 'h':
++      if (code == HIGH)
++	op = XEXP (op, 0);
++      riscv_print_operand_reloc (file, op, true);
++      break;
++
++    case 'R':
++      riscv_print_operand_reloc (file, op, false);
++      break;
++
++    case 'C':
++      /* The RTL names match the instruction names. */
++      fputs (GET_RTX_NAME (code), file);
++      break;
++
++    case 'A':
++      fputs (riscv_memory_model_suffix ((enum memmodel)INTVAL (op)), file);
++      break;
++
++    default:
++      switch (code)
++	{
++	case REG:
++	  if (letter && letter != 'z')
++	    output_operand_lossage ("invalid use of '%%%c'", letter);
++	  fprintf (file, "%s", reg_names[REGNO (op)]);
++	  break;
++
++	case MEM:
++	  if (letter == 'y')
++	    fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]);
++	  else if (letter && letter != 'z')
++	    output_operand_lossage ("invalid use of '%%%c'", letter);
++	  else
++	    output_address (mode, XEXP (op, 0));
++	  break;
++
++	default:
++	  if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
++	    fputs (reg_names[GP_REG_FIRST], file);
++	  else if (letter && letter != 'z')
++	    output_operand_lossage ("invalid use of '%%%c'", letter);
++	  else
++	    output_addr_const (file, riscv_strip_unspec_address (op));
++	  break;
++	}
++    }
++}
++
++/* Implement TARGET_PRINT_OPERAND_ADDRESS.  */
++
++static void
++riscv_print_operand_address (FILE *file, machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++  struct riscv_address_info addr;
++
++  if (riscv_classify_address (&addr, x, word_mode, true))
++    switch (addr.type)
++      {
++      case ADDRESS_REG:
++	riscv_print_operand (file, addr.offset, 0);
++	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
++	return;
++
++      case ADDRESS_LO_SUM:
++	riscv_print_operand_reloc (file, addr.offset, false);
++	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
++	return;
++
++      case ADDRESS_CONST_INT:
++	output_addr_const (file, x);
++	fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
++	return;
++
++      case ADDRESS_SYMBOLIC:
++	output_addr_const (file, riscv_strip_unspec_address (x));
++	return;
++      }
++  gcc_unreachable ();
++}
++
++static bool
++riscv_size_ok_for_small_data_p (int size)
++{
++  return g_switch_value && IN_RANGE (size, 1, g_switch_value);
++}
++
++/* Return true if EXP should be placed in the small data section. */
++
++static bool
++riscv_in_small_data_p (const_tree x)
++{
++  if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
++    return false;
++
++  if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
++    {
++      const char *sec = DECL_SECTION_NAME (x);
++      return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
++    }
++
++  return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
++}
++
++/* Return a section for X, handling small data. */
++
++static section *
++riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
++			      unsigned HOST_WIDE_INT align)
++{
++  section *s = default_elf_select_rtx_section (mode, x, align);
++
++  if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
++    {
++      if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
++	{
++	  /* Rename .rodata.cst* to .srodata.cst*. */
++	  char *name = (char *) alloca (strlen (s->named.name) + 2);
++	  sprintf (name, ".s%s", s->named.name + 1);
++	  return get_section (name, s->named.common.flags, NULL);
++	}
++
++      if (s == data_section)
++	return sdata_section;
++    }
++
++  return s;
++}
++
++/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL.  */
++
++static void ATTRIBUTE_UNUSED
++riscv_output_dwarf_dtprel (FILE *file, int size, rtx x)
++{
++  switch (size)
++    {
++    case 4:
++      fputs ("\t.dtprelword\t", file);
++      break;
++
++    case 8:
++      fputs ("\t.dtpreldword\t", file);
++      break;
++
++    default:
++      gcc_unreachable ();
++    }
++  output_addr_const (file, x);
++  fputs ("+0x800", file);
++}
++
++/* Make the last instruction frame-related and note that it performs
++   the operation described by FRAME_PATTERN.  */
++
++static void
++riscv_set_frame_expr (rtx frame_pattern)
++{
++  rtx insn;
++
++  insn = get_last_insn ();
++  RTX_FRAME_RELATED_P (insn) = 1;
++  REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
++				      frame_pattern,
++				      REG_NOTES (insn));
++}
++
++/* Return a frame-related rtx that stores REG at MEM.
++   REG must be a single register.  */
++
++static rtx
++riscv_frame_set (rtx mem, rtx reg)
++{
++  rtx set;
++
++  set = gen_rtx_SET (mem, reg);
++  RTX_FRAME_RELATED_P (set) = 1;
++
++  return set;
++}
++
++/* Return true if the current function must save register REGNO.  */
++
++static bool
++riscv_save_reg_p (unsigned int regno)
++{
++  bool call_saved = !global_regs[regno] && !call_really_used_regs[regno];
++  bool might_clobber = crtl->saves_all_registers
++		       || df_regs_ever_live_p (regno)
++		       || (regno == HARD_FRAME_POINTER_REGNUM
++			   && frame_pointer_needed);
++
++  return (call_saved && might_clobber)
++	 || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return);
++}
++
++/* Determine whether to call GPR save/restore routines.  */
++static bool
++riscv_use_save_libcall (const struct riscv_frame_info *frame)
++{
++  if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
++    return false;
++
++  return frame->save_libcall_adjustment != 0;
++}
++
++/* Determine which GPR save/restore routine to call.  */
++
++static unsigned
++riscv_save_libcall_count (unsigned mask)
++{
++  for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
++    if (BITSET_P (mask, n))
++      return CALLEE_SAVED_REG_NUMBER (n) + 1;
++  abort ();
++}
++
++/* Populate the current function's riscv_frame_info structure.
++
++   RISC-V stack frames grown downward.  High addresses are at the top.
++
++	+-------------------------------+
++	|                               |
++	|  incoming stack arguments     |
++	|                               |
++	+-------------------------------+ <-- incoming stack pointer
++	|                               |
++	|  callee-allocated save area   |
++	|  for arguments that are       |
++	|  split between registers and  |
++	|  the stack                    |
++	|                               |
++	+-------------------------------+ <-- arg_pointer_rtx
++	|                               |
++	|  callee-allocated save area   |
++	|  for register varargs         |
++	|                               |
++	+-------------------------------+ <-- hard_frame_pointer_rtx;
++	|                               |     stack_pointer_rtx + gp_sp_offset
++	|  GPR save area                |       + UNITS_PER_WORD
++	|                               |
++	+-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
++	|                               |       + UNITS_PER_HWVALUE
++	|  FPR save area                |
++	|                               |
++	+-------------------------------+ <-- frame_pointer_rtx (virtual)
++	|                               |
++	|  local variables              |
++	|                               |
++      P +-------------------------------+
++	|                               |
++	|  outgoing stack arguments     |
++	|                               |
++	+-------------------------------+ <-- stack_pointer_rtx
++
++   Dynamic stack allocations such as alloca insert data at point P.
++   They decrease stack_pointer_rtx but leave frame_pointer_rtx and
++   hard_frame_pointer_rtx unchanged.  */
++
++static void
++riscv_compute_frame_info (void)
++{
++  struct riscv_frame_info *frame;
++  HOST_WIDE_INT offset;
++  unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
++
++  frame = &cfun->machine->frame;
++  memset (frame, 0, sizeof (*frame));
++
++  /* Find out which GPRs we need to save.  */
++  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
++    if (riscv_save_reg_p (regno))
++      frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
++
++  /* If this function calls eh_return, we must also save and restore the
++     EH data registers.  */
++  if (crtl->calls_eh_return)
++    for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
++      frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
++
++  /* Find out which FPRs we need to save.  This loop must iterate over
++     the same space as its companion in riscv_for_each_saved_reg.  */
++  if (TARGET_HARD_FLOAT)
++    for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++      if (riscv_save_reg_p (regno))
++        frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
++
++  /* At the bottom of the frame are any outgoing stack arguments. */
++  offset = crtl->outgoing_args_size;
++  /* Next are local stack variables. */
++  offset += RISCV_STACK_ALIGN (get_frame_size ());
++  /* The virtual frame pointer points above the local variables. */
++  frame->frame_pointer_offset = offset;
++  /* Next are the callee-saved FPRs. */
++  if (frame->fmask)
++    {
++      offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG);
++      frame->fp_sp_offset = offset - UNITS_PER_FP_REG;
++    }
++  /* Next are the callee-saved GPRs. */
++  if (frame->mask)
++    {
++      unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
++      unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
++
++      /* Only use save/restore routines if they don't alter the stack size.  */
++      if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
++	frame->save_libcall_adjustment = x_save_size;
++
++      offset += x_save_size;
++      frame->gp_sp_offset = offset - UNITS_PER_WORD;
++    }
++  /* The hard frame pointer points above the callee-saved GPRs. */
++  frame->hard_frame_pointer_offset = offset;
++  /* Above the hard frame pointer is the callee-allocated varags save area. */
++  offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
++  frame->arg_pointer_offset = offset;
++  /* Next is the callee-allocated area for pretend stack arguments.  */
++  offset += crtl->args.pretend_args_size;
++  frame->total_size = offset;
++  /* Next points the incoming stack pointer and any incoming arguments. */
++
++  /* Only use save/restore routines when the GPRs are atop the frame.  */
++  if (frame->hard_frame_pointer_offset != frame->total_size)
++    frame->save_libcall_adjustment = 0;
++}
++
++/* Make sure that we're not trying to eliminate to the wrong hard frame
++   pointer.  */
++
++static bool
++riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
++{
++  return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
++}
++
++/* Implement INITIAL_ELIMINATION_OFFSET.  FROM is either the frame pointer
++   or argument pointer.  TO is either the stack pointer or hard frame
++   pointer.  */
++
++HOST_WIDE_INT
++riscv_initial_elimination_offset (int from, int to)
++{
++  HOST_WIDE_INT src, dest;
++
++  riscv_compute_frame_info ();
++
++  if (to == HARD_FRAME_POINTER_REGNUM)
++    dest = cfun->machine->frame.hard_frame_pointer_offset;
++  else if (to == STACK_POINTER_REGNUM)
++    dest = 0; /* this is the base of all offsets */
++  else
++    gcc_unreachable ();
++
++  if (from == FRAME_POINTER_REGNUM)
++    src = cfun->machine->frame.frame_pointer_offset;
++  else if (from == ARG_POINTER_REGNUM)
++    src = cfun->machine->frame.arg_pointer_offset;
++  else
++    gcc_unreachable ();
++
++  return src - dest;
++}
++
++/* Implement RETURN_ADDR_RTX.  We do not support moving back to a
++   previous frame.  */
++
++rtx
++riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
++{
++  if (count != 0)
++    return const0_rtx;
++
++  return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
++}
++
++/* Emit code to change the current function's return address to
++   ADDRESS.  SCRATCH is available as a scratch register, if needed.
++   ADDRESS and SCRATCH are both word-mode GPRs.  */
++
++void
++riscv_set_return_address (rtx address, rtx scratch)
++{
++  rtx slot_address;
++
++  gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
++  slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
++				  cfun->machine->frame.gp_sp_offset);
++  riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
++}
++
++/* A function to save or store a register.  The first argument is the
++   register and the second is the stack slot.  */
++typedef void (*riscv_save_restore_fn) (rtx, rtx);
++
++/* Use FN to save or restore register REGNO.  MODE is the register's
++   mode and OFFSET is the offset of its save slot from the current
++   stack pointer.  */
++
++static void
++riscv_save_restore_reg (enum machine_mode mode, int regno,
++		       HOST_WIDE_INT offset, riscv_save_restore_fn fn)
++{
++  rtx mem;
++
++  mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
++  fn (gen_rtx_REG (mode, regno), mem);
++}
++
++/* Call FN for each register that is saved by the current function.
++   SP_OFFSET is the offset of the current stack pointer from the start
++   of the frame.  */
++
++static void
++riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn)
++{
++  HOST_WIDE_INT offset;
++  int regno;
++
++  /* Save the link register and s-registers. */
++  offset = cfun->machine->frame.gp_sp_offset - sp_offset;
++  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
++    if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
++      {
++        riscv_save_restore_reg (word_mode, regno, offset, fn);
++        offset -= UNITS_PER_WORD;
++      }
++
++  /* This loop must iterate over the same space as its companion in
++     riscv_compute_frame_info.  */
++  offset = cfun->machine->frame.fp_sp_offset - sp_offset;
++  for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++    if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
++      {
++	enum machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
++
++	riscv_save_restore_reg (mode, regno, offset, fn);
++	offset -= GET_MODE_SIZE (mode);
++      }
++}
++
++/* Save register REG to MEM.  Make the instruction frame-related.  */
++
++static void
++riscv_save_reg (rtx reg, rtx mem)
++{
++  riscv_emit_move (mem, reg);
++  riscv_set_frame_expr (riscv_frame_set (mem, reg));
++}
++
++/* Restore register REG from MEM.  */
++
++static void
++riscv_restore_reg (rtx reg, rtx mem)
++{
++  rtx insn = riscv_emit_move (reg, mem);
++  rtx dwarf = NULL_RTX;
++  dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
++  REG_NOTES (insn) = dwarf;
++
++  RTX_FRAME_RELATED_P (insn) = 1;
++}
++
++/* Return the code to invoke the GPR save routine.  */
++
++const char *
++riscv_output_gpr_save (unsigned mask)
++{
++  static char buf[GP_REG_NUM * 32];
++  size_t len = 0;
++  unsigned n = riscv_save_libcall_count (mask), i;
++  unsigned frame_size = RISCV_STACK_ALIGN ((n + 1) * UNITS_PER_WORD);
++
++  len += sprintf (buf + len, "call\tt0,__riscv_save_%u", n);
++
++#ifdef DWARF2_UNWIND_INFO
++  /* Describe the effect of the call to __riscv_save_X.  */
++  if (dwarf2out_do_cfi_asm ())
++    {
++      len += sprintf (buf + len, "\n\t.cfi_def_cfa_offset %u", frame_size);
++
++      for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
++	if (BITSET_P (cfun->machine->frame.mask, i))
++	  len += sprintf (buf + len, "\n\t.cfi_offset %u,%d", i,
++			  (CALLEE_SAVED_REG_NUMBER (i) + 2) * -UNITS_PER_WORD);
++    }
++#endif
++
++  return buf;
++}
++
++/* Expand the "prologue" pattern.  */
++
++void
++riscv_expand_prologue (void)
++{
++  struct riscv_frame_info *frame = &cfun->machine->frame;
++  HOST_WIDE_INT size = frame->total_size;
++  unsigned mask = frame->mask;
++  rtx insn;
++
++  if (flag_stack_usage_info)
++    current_function_static_stack_size = size;
++
++  /* When optimizing for size, call a subroutine to save the registers.  */
++  if (riscv_use_save_libcall (frame))
++    {
++      frame->mask = 0; /* Temporarily fib that we need not save GPRs.  */
++      size -= frame->save_libcall_adjustment;
++      emit_insn (gen_gpr_save (GEN_INT (mask)));
++    }
++
++  /* Save the registers.  Allocate up to RISCV_MAX_FIRST_STACK_STEP
++     bytes beforehand; this is enough to cover the register save area
++     without going out of range.  */
++  if ((frame->mask | frame->fmask) != 0)
++    {
++      HOST_WIDE_INT step1;
++
++      step1 = MIN (size, RISCV_MAX_FIRST_STACK_STEP);
++      insn = gen_add3_insn (stack_pointer_rtx,
++			    stack_pointer_rtx,
++			    GEN_INT (-step1));
++      RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++      size -= step1;
++      riscv_for_each_saved_reg (size, riscv_save_reg);
++    }
++
++  frame->mask = mask; /* Undo the above fib.  */
++
++  /* Set up the frame pointer, if we're using one.  */
++  if (frame_pointer_needed)
++    {
++      insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
++                            GEN_INT (frame->hard_frame_pointer_offset - size));
++      RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++    }
++
++  /* Allocate the rest of the frame.  */
++  if (size > 0)
++    {
++      if (SMALL_OPERAND (-size))
++	{
++	  insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++				GEN_INT (-size));
++	  RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++	}
++      else
++	{
++	  riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
++	  emit_insn (gen_add3_insn (stack_pointer_rtx,
++				    stack_pointer_rtx,
++				    RISCV_PROLOGUE_TEMP (Pmode)));
++
++	  /* Describe the effect of the previous instructions.  */
++	  insn = plus_constant (Pmode, stack_pointer_rtx, -size);
++	  insn = gen_rtx_SET (stack_pointer_rtx, insn);
++	  riscv_set_frame_expr (insn);
++	}
++    }
++}
++
++/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
++   says which.  */
++
++void
++riscv_expand_epilogue (bool sibcall_p)
++{
++  /* Split the frame into two.  STEP1 is the amount of stack we should
++     deallocate before restoring the registers.  STEP2 is the amount we
++     should deallocate afterwards.
++
++     Start off by assuming that no registers need to be restored.  */
++  struct riscv_frame_info *frame = &cfun->machine->frame;
++  unsigned mask = frame->mask;
++  HOST_WIDE_INT step1 = frame->total_size;
++  HOST_WIDE_INT step2 = 0;
++  bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
++  rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
++  rtx insn;
++
++  if (!sibcall_p && riscv_can_use_return_insn ())
++    {
++      emit_jump_insn (gen_return ());
++      return;
++    }
++
++  /* Move past any dynamic stack allocations.  */
++  if (cfun->calls_alloca)
++    {
++      rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
++      if (!SMALL_OPERAND (INTVAL (adjust)))
++	{
++	  riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
++	  adjust = RISCV_PROLOGUE_TEMP (Pmode);
++	}
++
++      emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
++				adjust));
++    }
++
++  /* If we need to restore registers, deallocate as much stack as
++     possible in the second step without going out of range.  */
++  if ((frame->mask | frame->fmask) != 0)
++    {
++      step2 = MIN (step1, RISCV_MAX_FIRST_STACK_STEP);
++      step1 -= step2;
++    }
++
++  /* Set TARGET to BASE + STEP1.  */
++  if (step1 > 0)
++    {
++      /* Get an rtx for STEP1 that we can add to BASE.  */
++      rtx adjust = GEN_INT (step1);
++      if (!SMALL_OPERAND (step1))
++	{
++	  riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
++	  adjust = RISCV_PROLOGUE_TEMP (Pmode);
++	}
++
++      insn = emit_insn (
++	       gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
++
++      rtx dwarf = NULL_RTX;
++      rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
++                                         const0_rtx);
++      dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
++      RTX_FRAME_RELATED_P (insn) = 1;
++
++      REG_NOTES (insn) = dwarf;
++    }
++
++  if (use_restore_libcall)
++    frame->mask = 0; /* Temporarily fib that we need not save GPRs.  */
++
++  /* Restore the registers.  */
++  riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg);
++
++  if (use_restore_libcall)
++    {
++      frame->mask = mask; /* Undo the above fib.  */
++      gcc_assert (step2 >= frame->save_libcall_adjustment);
++      step2 -= frame->save_libcall_adjustment;
++    }
++
++  /* Deallocate the final bit of the frame.  */
++  if (step2 > 0)
++    {
++      insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++				       GEN_INT (step2)));
++
++      rtx dwarf = NULL_RTX;
++      rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
++                                         const0_rtx);
++      dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
++      RTX_FRAME_RELATED_P (insn) = 1;
++
++      REG_NOTES (insn) = dwarf;
++    }
++
++  if (use_restore_libcall)
++    {
++      emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
++      emit_jump_insn (gen_gpr_restore_return (ra));
++      return;
++    }
++
++  /* Add in the __builtin_eh_return stack adjustment. */
++  if (crtl->calls_eh_return)
++    emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++			      EH_RETURN_STACKADJ_RTX));
++
++  if (!sibcall_p)
++    emit_jump_insn (gen_simple_return_internal (ra));
++}
++
++/* Return nonzero if this function is known to have a null epilogue.
++   This allows the optimizer to omit jumps to jumps if no stack
++   was created.  */
++
++bool
++riscv_can_use_return_insn (void)
++{
++  return reload_completed && cfun->machine->frame.total_size == 0;
++}
++
++/* Implement TARGET_REGISTER_MOVE_COST.  */
++
++static int
++riscv_register_move_cost (enum machine_mode mode,
++			  reg_class_t from, reg_class_t to)
++{
++  return SECONDARY_MEMORY_NEEDED (from, to, mode) ? 8 : 1;
++}
++
++/* Return true if register REGNO can store a value of mode MODE.  */
++
++bool
++riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
++{
++  unsigned int size = GET_MODE_SIZE (mode);
++  enum mode_class mclass = GET_MODE_CLASS (mode);
++
++  if (GP_REG_P (regno))
++    {
++      if (size <= UNITS_PER_WORD)
++	return true;
++
++      /* Double-word values must be even-register-aligned.  */
++      if (size <= 2 * UNITS_PER_WORD)
++	return regno % 2 == 0;
++
++      /* For __complex__ long long(CDImode) in 32 bit mode,
++	 it's equal to two double-word.  */
++      if (size <= 4 * UNITS_PER_WORD)
++	return regno % 2 == 0;
++    }
++
++  if (FP_REG_P (regno))
++    {
++      unsigned max_size = UNITS_PER_FP_REG;
++
++      /* Only use callee-saved registers if a potential callee is guaranteed
++	 to spill the requisite width.  */
++      if (UNITS_PER_FP_ARG < UNITS_PER_FP_REG && !call_used_regs[regno])
++	max_size = UNITS_PER_FP_ARG;
++
++      if (mclass == MODE_FLOAT
++	  || mclass == MODE_COMPLEX_FLOAT
++	  || mclass == MODE_VECTOR_FLOAT)
++	return size <= max_size;
++    }
++
++  return false;
++}
++
++/* Implement HARD_REGNO_NREGS.  */
++
++unsigned int
++riscv_hard_regno_nregs (int regno, enum machine_mode mode)
++{
++  if (FP_REG_P (regno))
++    return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;
++
++  /* All other registers are word-sized.  */
++  return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++}
++
++/* Implement CLASS_MAX_NREGS.  */
++
++static unsigned char
++riscv_class_max_nregs (reg_class_t rclass, enum machine_mode mode)
++{
++  if (reg_class_subset_p (FP_REGS, rclass))
++    return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
++
++  if (reg_class_subset_p (GR_REGS, rclass))
++    return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
++
++  return 0;
++}
++
++/* Implement TARGET_PREFERRED_RELOAD_CLASS.  */
++
++static reg_class_t
++riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
++{
++  return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
++         reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
++	 rclass;
++}
++
++/* Implement TARGET_MEMORY_MOVE_COST.  */
++
++static int
++riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
++{
++  return (tune_info->memory_cost
++	  + memory_move_secondary_cost (mode, rclass, in));
++} 
++
++/* Implement TARGET_MODE_REP_EXTENDED.  */
++
++static int
++riscv_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
++{
++  /* On 64-bit targets, SImode register values are sign-extended to DImode.  */
++  if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
++    return SIGN_EXTEND;
++
++  return UNKNOWN;
++}
++
++/* Implement TARGET_SCALAR_MODE_SUPPORTED_P.  */
++
++static bool
++riscv_scalar_mode_supported_p (enum machine_mode mode)
++{
++  if (ALL_FIXED_POINT_MODE_P (mode)
++      && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
++    return true;
++
++  return default_scalar_mode_supported_p (mode);
++}
++
++/* Return the number of instructions that can be issued per cycle.  */
++
++static int
++riscv_issue_rate (void)
++{
++  return tune_info->issue_rate;
++}
++
++/* Implement TARGET_ASM_FILE_START.  */
++
++static void
++riscv_file_start (void)
++{
++  default_file_start ();
++
++  /* Instruct GAS to generate position-[in]dependent code.  */
++  fprintf (asm_out_file, "\t.option %spic\n", (flag_pic ? "" : "no"));
++}
++
++/* This structure describes a single built-in function.  */
++struct riscv_builtin_description {
++  /* The code of the main .md file instruction.  See riscv_builtin_type
++     for more information.  */
++  enum insn_code icode;
++
++  /* The name of the built-in function.  */
++  const char *name;
++
++  /* Specifies how the function should be expanded.  */
++  enum riscv_builtin_type builtin_type;
++
++  /* The function's prototype.  */
++  enum riscv_function_type function_type;
++
++  /* Whether the function is available.  */
++  unsigned int (*avail) (void);
++};
++
++static unsigned int
++riscv_builtin_avail_riscv (void)
++{
++  return 1;
++}
++
++/* Construct a riscv_builtin_description from the given arguments.
++
++   INSN is the name of the associated instruction pattern, without the
++   leading CODE_FOR_riscv_.
++
++   CODE is the floating-point condition code associated with the
++   function.  It can be 'f' if the field is not applicable.
++
++   NAME is the name of the function itself, without the leading
++   "__builtin_riscv_".
++
++   BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
++
++   AVAIL is the name of the availability predicate, without the leading
++   riscv_builtin_avail_.  */
++#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL)	\
++  { CODE_FOR_ ## INSN, "__builtin_riscv_" NAME,				\
++    BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
++
++/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT function
++   mapped to instruction CODE_FOR_<INSN>,  FUNCTION_TYPE and AVAIL
++   are as for RISCV_BUILTIN.  */
++#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)			\
++  RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
++
++/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
++   function mapped to instruction CODE_FOR_<INSN>,  FUNCTION_TYPE
++   and AVAIL are as for RISCV_BUILTIN.  */
++#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)		\
++  RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET,		\
++		FUNCTION_TYPE, AVAIL)
++
++static const struct riscv_builtin_description riscv_builtins[] = {
++  DIRECT_NO_TARGET_BUILTIN (nop, RISCV_VOID_FTYPE_VOID, riscv),
++};
++
++/* Index I is the function declaration for riscv_builtins[I], or null if the
++   function isn't defined on this target.  */
++static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
++
++
++/* Source-level argument types.  */
++#define RISCV_ATYPE_VOID void_type_node
++#define RISCV_ATYPE_INT integer_type_node
++#define RISCV_ATYPE_POINTER ptr_type_node
++#define RISCV_ATYPE_CPOINTER const_ptr_type_node
++
++/* Standard mode-based argument types.  */
++#define RISCV_ATYPE_UQI unsigned_intQI_type_node
++#define RISCV_ATYPE_SI intSI_type_node
++#define RISCV_ATYPE_USI unsigned_intSI_type_node
++#define RISCV_ATYPE_DI intDI_type_node
++#define RISCV_ATYPE_UDI unsigned_intDI_type_node
++#define RISCV_ATYPE_SF float_type_node
++#define RISCV_ATYPE_DF double_type_node
++
++/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
++   their associated RISCV_ATYPEs.  */
++#define RISCV_FTYPE_ATYPES1(A, B) \
++  RISCV_ATYPE_##A, RISCV_ATYPE_##B
++
++#define RISCV_FTYPE_ATYPES2(A, B, C) \
++  RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
++
++#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
++  RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
++
++#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
++  RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
++  RISCV_ATYPE_##E
++
++/* Return the function type associated with function prototype TYPE.  */
++
++static tree
++riscv_build_function_type (enum riscv_function_type type)
++{
++  static tree types[(int) RISCV_MAX_FTYPE_MAX];
++
++  if (types[(int) type] == NULL_TREE)
++    switch (type)
++      {
++#define DEF_RISCV_FTYPE(NUM, ARGS)					\
++  case RISCV_FTYPE_NAME##NUM ARGS:					\
++    types[(int) type]							\
++      = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS,		\
++				  NULL_TREE);				\
++    break;
++#include "config/riscv/riscv-ftypes.def"
++#undef DEF_RISCV_FTYPE
++      default:
++	gcc_unreachable ();
++      }
++
++  return types[(int) type];
++}
++
++/* Implement TARGET_INIT_BUILTINS.  */
++
++static void
++riscv_init_builtins (void)
++{
++  const struct riscv_builtin_description *d;
++  unsigned int i;
++
++  /* Iterate through all of the bdesc arrays, initializing all of the
++     builtin functions.  */
++  for (i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
++    {
++      d = &riscv_builtins[i];
++      if (d->avail ())
++	riscv_builtin_decls[i]
++	  = add_builtin_function (d->name,
++				  riscv_build_function_type (d->function_type),
++				  i, BUILT_IN_MD, NULL, NULL);
++    }
++}
++
++/* Implement TARGET_BUILTIN_DECL.  */
++
++static tree
++riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
++{
++  if (code >= ARRAY_SIZE (riscv_builtins))
++    return error_mark_node;
++  return riscv_builtin_decls[code];
++}
++
++/* Take argument ARGNO from EXP's argument list and convert it into a
++   form suitable for input operand OPNO of instruction ICODE.  Return the
++   value.  */
++
++static rtx
++riscv_prepare_builtin_arg (enum insn_code icode,
++			  unsigned int opno, tree exp, unsigned int argno)
++{
++  tree arg;
++  rtx value;
++  enum machine_mode mode;
++
++  arg = CALL_EXPR_ARG (exp, argno);
++  value = expand_normal (arg);
++  mode = insn_data[icode].operand[opno].mode;
++  if (!insn_data[icode].operand[opno].predicate (value, mode))
++    {
++      /* We need to get the mode from ARG for two reasons:
++
++	   - to cope with address operands, where MODE is the mode of the
++	     memory, rather than of VALUE itself.
++
++	   - to cope with special predicates like pmode_register_operand,
++	     where MODE is VOIDmode.  */
++      value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
++
++      /* Check the predicate again.  */
++      if (!insn_data[icode].operand[opno].predicate (value, mode))
++	{
++	  error ("invalid argument to built-in function");
++	  return const0_rtx;
++	}
++    }
++
++  return value;
++}
++
++/* Return an rtx suitable for output operand OP of instruction ICODE.
++   If TARGET is non-null, try to use it where possible.  */
++
++static rtx
++riscv_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
++{
++  enum machine_mode mode;
++
++  mode = insn_data[icode].operand[op].mode;
++  if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
++    target = gen_reg_rtx (mode);
++
++  return target;
++}
++
++/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
++   HAS_TARGET_P says which.  EXP is the CALL_EXPR that calls the function
++   and ICODE is the code of the associated .md pattern.  TARGET, if nonnull,
++   suggests a good place to put the result.  */
++
++static rtx
++riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
++			    bool has_target_p)
++{
++  rtx ops[MAX_RECOG_OPERANDS];
++  int opno, argno;
++
++  /* Map any target to operand 0.  */
++  opno = 0;
++  if (has_target_p)
++    {
++      target = riscv_prepare_builtin_target (icode, opno, target);
++      ops[opno] = target;
++      opno++;
++    }
++
++  /* Map the arguments to the other operands.  The n_operands value
++     for an expander includes match_dups and match_scratches as well as
++     match_operands, so n_operands is only an upper bound on the number
++     of arguments to the expander function.  */
++  gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
++  for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
++    ops[opno] = riscv_prepare_builtin_arg (icode, opno, exp, argno);
++
++  switch (opno)
++    {
++    case 2:
++      emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
++      break;
++
++    case 3:
++      emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
++      break;
++
++    case 4:
++      emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
++      break;
++
++    default:
++      gcc_unreachable ();
++    }
++  return target;
++}
++
++/* Implement TARGET_EXPAND_BUILTIN.  */
++
++static rtx
++riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
++		     enum machine_mode mode ATTRIBUTE_UNUSED,
++		     int ignore ATTRIBUTE_UNUSED)
++{
++  tree fndecl;
++  unsigned int fcode, avail;
++  const struct riscv_builtin_description *d;
++
++  fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
++  fcode = DECL_FUNCTION_CODE (fndecl);
++  gcc_assert (fcode < ARRAY_SIZE (riscv_builtins));
++  d = &riscv_builtins[fcode];
++  avail = d->avail ();
++  gcc_assert (avail != 0);
++  switch (d->builtin_type)
++    {
++    case RISCV_BUILTIN_DIRECT:
++      return riscv_expand_builtin_direct (d->icode, target, exp, true);
++
++    case RISCV_BUILTIN_DIRECT_NO_TARGET:
++      return riscv_expand_builtin_direct (d->icode, target, exp, false);
++    }
++  gcc_unreachable ();
++}
++
++/* Implement TARGET_ASM_OUTPUT_MI_THUNK.  Generate rtl rather than asm text
++   in order to avoid duplicating too much logic from elsewhere.  */
++
++static void
++riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
++		      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
++		      tree function)
++{
++  rtx this_rtx, temp1, temp2, fnaddr;
++  rtx_insn *insn;
++  bool use_sibcall_p;
++
++  /* Pretend to be a post-reload pass while generating rtl.  */
++  reload_completed = 1;
++
++  /* Mark the end of the (empty) prologue.  */
++  emit_note (NOTE_INSN_PROLOGUE_END);
++
++  /* Determine if we can use a sibcall to call FUNCTION directly.  */
++  fnaddr = XEXP (DECL_RTL (function), 0);
++  use_sibcall_p = absolute_symbolic_operand (fnaddr, Pmode);
++
++  /* We need two temporary registers in some cases.  */
++  temp1 = gen_rtx_REG (Pmode, GP_TEMP_FIRST);
++  temp2 = gen_rtx_REG (Pmode, GP_TEMP_FIRST + 1);
++
++  /* Find out which register contains the "this" pointer.  */
++  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
++    this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
++  else
++    this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
++
++  /* Add DELTA to THIS_RTX.  */
++  if (delta != 0)
++    {
++      rtx offset = GEN_INT (delta);
++      if (!SMALL_OPERAND (delta))
++	{
++	  riscv_emit_move (temp1, offset);
++	  offset = temp1;
++	}
++      emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
++    }
++
++  /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX.  */
++  if (vcall_offset != 0)
++    {
++      rtx addr;
++
++      /* Set TEMP1 to *THIS_RTX.  */
++      riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
++
++      /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET.  */
++      addr = riscv_add_offset (temp2, temp1, vcall_offset);
++
++      /* Load the offset and add it to THIS_RTX.  */
++      riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
++      emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
++    }
++
++  /* Jump to the target function.  Use a sibcall if direct jumps are
++     allowed, otherwise load the address into a register first.  */
++  if (use_sibcall_p)
++    {
++      insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
++      SIBLING_CALL_P (insn) = 1;
++    }
++  else
++    {
++      riscv_emit_move(temp1, fnaddr);
++      emit_jump_insn (gen_indirect_jump (temp1));
++    }
++
++  /* Run just enough of rest_of_compilation.  This sequence was
++     "borrowed" from alpha.c.  */
++  insn = get_insns ();
++  split_all_insns_noflow ();
++  shorten_branches (insn);
++  final_start_function (insn, file, 1);
++  final (insn, file, 1);
++  final_end_function ();
++
++  /* Clean up the vars set above.  Note that final_end_function resets
++     the global pointer for us.  */
++  reload_completed = 0;
++}
++
++/* Allocate a chunk of memory for per-function machine-dependent data.  */
++
++static struct machine_function *
++riscv_init_machine_status (void)
++{
++  return ggc_cleared_alloc<machine_function> ();
++}
++
++/* Implement TARGET_OPTION_OVERRIDE.  */
++
++static void
++riscv_option_override (void)
++{
++  const struct riscv_cpu_info *cpu;
++
++#ifdef SUBTARGET_OVERRIDE_OPTIONS
++  SUBTARGET_OVERRIDE_OPTIONS;
++#endif
++
++  flag_pcc_struct_return = 0;
++
++  if (flag_pic)
++    g_switch_value = 0;
++
++  /* Prefer a call to memcpy over inline code when optimizing for size,
++     though see MOVE_RATIO in riscv.h.  */
++  if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
++    target_flags |= MASK_MEMCPY;
++
++  /* Handle -mtune.  */
++  cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
++			 RISCV_TUNE_STRING_DEFAULT);
++  tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
++
++  /* If the user hasn't specified a branch cost, use the processor's
++     default.  */
++  if (riscv_branch_cost == 0)
++    riscv_branch_cost = tune_info->branch_cost;
++
++  /* Function to allocate machine-dependent function status.  */
++  init_machine_status = &riscv_init_machine_status;
++
++  if (riscv_cmodel_string)
++    {
++      if (strcmp (riscv_cmodel_string, "medlow") == 0)
++	riscv_cmodel = CM_MEDLOW;
++      else if (strcmp (riscv_cmodel_string, "medany") == 0)
++	riscv_cmodel = CM_MEDANY;
++      else
++	error ("unsupported code model: %s", riscv_cmodel_string);
++    }
++
++  if (flag_pic)
++    riscv_cmodel = CM_PIC;
++
++  /* We get better code with explicit relocs for CM_MEDLOW, but
++     worse code for the others (for now).  Pick the best default.  */
++  if ((target_flags_explicit & MASK_EXPLICIT_RELOCS) == 0)
++    if (riscv_cmodel == CM_MEDLOW)
++      target_flags |= MASK_EXPLICIT_RELOCS;
++
++  /* Require that the ISA supports the requested floating-point ABI.  */
++  switch (riscv_float_abi)
++    {
++    case FLOAT_ABI_SOFT:
++      break;
++
++    case FLOAT_ABI_SINGLE:
++      if (!TARGET_HARD_FLOAT)
++	error ("-mfloat-abi=single requires -msingle-float or -mdouble-float");
++      break;
++
++    case FLOAT_ABI_DOUBLE:
++      if (!TARGET_DOUBLE_FLOAT)
++	error ("-mfloat-abi=double requires -mdouble-float");
++      break;
++    }
++}
++
++/* Implement TARGET_CONDITIONAL_REGISTER_USAGE.  */
++
++static void
++riscv_conditional_register_usage (void)
++{
++  if (!TARGET_HARD_FLOAT)
++    {
++      for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++	fixed_regs[regno] = call_used_regs[regno] = 1;
++    }
++}
++
++/* Return a register priority for hard reg REGNO.  */
++static int
++riscv_register_priority (int regno)
++{
++  /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection.  */
++  if (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
++		     || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)))
++    return 1;
++
++  return 0;
++}
++
++/* Implement TARGET_TRAMPOLINE_INIT.  */
++
++static void
++riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
++{
++  rtx addr, end_addr, mem;
++  uint32_t trampoline[4];
++  unsigned int i;
++  HOST_WIDE_INT static_chain_offset, target_function_offset;
++
++  /* Work out the offsets of the pointers from the start of the
++     trampoline code.  */
++  gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
++  static_chain_offset = TRAMPOLINE_CODE_SIZE;
++  target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
++
++  /* Get pointers to the beginning and end of the code block.  */
++  addr = force_reg (Pmode, XEXP (m_tramp, 0));
++  end_addr = riscv_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE));
++
++  /* auipc   t0, 0
++     l[wd]   t1, target_function_offset(t0)
++     l[wd]   t0, static_chain_offset(t0)
++     jr      t1
++  */
++  trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
++  trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
++		  | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
++		  | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
++		  | (target_function_offset << SHIFT_IMM);
++  trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
++		  | (STATIC_CHAIN_REGNUM << SHIFT_RD)
++		  | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
++		  | (static_chain_offset << SHIFT_IMM);
++  trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
++
++  /* Copy the trampoline code.  */
++  for (i = 0; i < ARRAY_SIZE (trampoline); i++)
++    {
++      mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
++      riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
++    }
++
++  /* Set up the static chain pointer field.  */
++  mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
++  riscv_emit_move (mem, chain_value);
++
++  /* Set up the target function field.  */
++  mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
++  riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
++
++  /* Flush the code part of the trampoline.  */
++  emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
++  emit_insn (gen_clear_cache (addr, end_addr));
++}
++
++/* Return leaf_function_p () and cache the result.  */
++
++static bool
++riscv_leaf_function_p (void)
++{
++  if (cfun->machine->is_leaf == 0)
++    cfun->machine->is_leaf = leaf_function_p () ? 1 : -1;
++
++  return cfun->machine->is_leaf > 0;
++}
++
++/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL.  */
++
++static bool
++riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
++			       tree exp ATTRIBUTE_UNUSED)
++{
++  /* When optimzing for size, don't use sibcalls in non-leaf routines */
++  if (TARGET_SAVE_RESTORE)
++    return riscv_leaf_function_p ();
++
++  return true;
++}
++
++/* Return true if INSN should not be copied.  */
++
++static bool
++riscv_cannot_copy_insn_p (rtx_insn *insn)
++{
++  return recog_memoized (insn) >= 0 && get_attr_cannot_copy (insn);
++}
++
++/* Initialize the GCC target structure.  */
++#undef TARGET_ASM_ALIGNED_HI_OP
++#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
++#undef TARGET_ASM_ALIGNED_SI_OP
++#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
++#undef TARGET_ASM_ALIGNED_DI_OP
++#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
++
++#undef TARGET_OPTION_OVERRIDE
++#define TARGET_OPTION_OVERRIDE riscv_option_override
++
++#undef TARGET_LEGITIMIZE_ADDRESS
++#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
++
++#undef TARGET_SCHED_ISSUE_RATE
++#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
++
++#undef TARGET_FUNCTION_OK_FOR_SIBCALL
++#define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
++
++#undef TARGET_REGISTER_MOVE_COST
++#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
++#undef TARGET_MEMORY_MOVE_COST
++#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
++#undef TARGET_RTX_COSTS
++#define TARGET_RTX_COSTS riscv_rtx_costs
++#undef TARGET_ADDRESS_COST
++#define TARGET_ADDRESS_COST riscv_address_cost
++
++#undef  TARGET_PREFERRED_RELOAD_CLASS
++#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
++
++#undef TARGET_ASM_FILE_START
++#define TARGET_ASM_FILE_START riscv_file_start
++#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
++#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
++
++#undef TARGET_EXPAND_BUILTIN_VA_START
++#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
++
++#undef  TARGET_PROMOTE_FUNCTION_MODE
++#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
++
++#undef TARGET_RETURN_IN_MEMORY
++#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
++
++#undef TARGET_ASM_OUTPUT_MI_THUNK
++#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
++
++#undef TARGET_PRINT_OPERAND
++#define TARGET_PRINT_OPERAND riscv_print_operand
++#undef TARGET_PRINT_OPERAND_ADDRESS
++#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
++
++#undef TARGET_SETUP_INCOMING_VARARGS
++#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
++#undef TARGET_STRICT_ARGUMENT_NAMING
++#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
++#undef TARGET_MUST_PASS_IN_STACK
++#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
++#undef TARGET_PASS_BY_REFERENCE
++#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
++#undef TARGET_ARG_PARTIAL_BYTES
++#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
++#undef TARGET_FUNCTION_ARG
++#define TARGET_FUNCTION_ARG riscv_function_arg
++#undef TARGET_FUNCTION_ARG_ADVANCE
++#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
++#undef TARGET_FUNCTION_ARG_BOUNDARY
++#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
++
++#undef TARGET_MODE_REP_EXTENDED
++#define TARGET_MODE_REP_EXTENDED riscv_mode_rep_extended
++
++#undef TARGET_SCALAR_MODE_SUPPORTED_P
++#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
++
++#undef TARGET_INIT_BUILTINS
++#define TARGET_INIT_BUILTINS riscv_init_builtins
++#undef TARGET_BUILTIN_DECL
++#define TARGET_BUILTIN_DECL riscv_builtin_decl
++#undef TARGET_EXPAND_BUILTIN
++#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
++
++#undef TARGET_HAVE_TLS
++#define TARGET_HAVE_TLS HAVE_AS_TLS
++
++#undef TARGET_CANNOT_FORCE_CONST_MEM
++#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
++
++#undef TARGET_LEGITIMATE_CONSTANT_P
++#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
++
++#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
++#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
++
++#ifdef HAVE_AS_DTPRELWORD
++#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
++#define TARGET_ASM_OUTPUT_DWARF_DTPREL riscv_output_dwarf_dtprel
++#endif
++
++#undef TARGET_LEGITIMATE_ADDRESS_P
++#define TARGET_LEGITIMATE_ADDRESS_P	riscv_legitimate_address_p
++
++#undef TARGET_CAN_ELIMINATE
++#define TARGET_CAN_ELIMINATE riscv_can_eliminate
++
++#undef TARGET_CONDITIONAL_REGISTER_USAGE
++#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
++
++#undef TARGET_CLASS_MAX_NREGS
++#define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
++
++#undef TARGET_TRAMPOLINE_INIT
++#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
++
++#undef TARGET_IN_SMALL_DATA_P
++#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
++
++#undef TARGET_ASM_SELECT_RTX_SECTION
++#define TARGET_ASM_SELECT_RTX_SECTION  riscv_elf_select_rtx_section
++
++#undef TARGET_MIN_ANCHOR_OFFSET
++#define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
++
++#undef TARGET_MAX_ANCHOR_OFFSET
++#define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
++
++#undef TARGET_LRA_P
++#define TARGET_LRA_P hook_bool_void_true
++
++#undef TARGET_REGISTER_PRIORITY
++#define TARGET_REGISTER_PRIORITY riscv_register_priority
++
++#undef TARGET_CANNOT_COPY_INSN_P
++#define TARGET_CANNOT_COPY_INSN_P riscv_cannot_copy_insn_p
++
++struct gcc_target targetm = TARGET_INITIALIZER;
++
++#include "gt-riscv.h"
+diff --git original-gcc/gcc/config/riscv/riscv.h gcc-6.2.0/gcc/config/riscv/riscv.h
+new file mode 100644
+index 0000000..51b4a6a
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv.h
+@@ -0,0 +1,1092 @@
++/* Definition of RISC-V target for GNU compiler.
++   Copyright (C) 2011-2014 Free Software Foundation, Inc.
++   Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++   Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++#ifndef GCC_RISCV_H
++#define GCC_RISCV_H
++
++#include "config/riscv/riscv-opts.h"
++
++/* Target CPU builtins.  */
++#define TARGET_CPU_CPP_BUILTINS()					\
++  do									\
++    {									\
++      builtin_assert ("machine=riscv");					\
++      builtin_assert ("cpu=riscv");					\
++      builtin_define ("__riscv__");					\
++      builtin_define ("__riscv");					\
++									\
++      if (TARGET_64BIT)							\
++	builtin_define ("__riscv64");					\
++      else								\
++	builtin_define ("__riscv32");					\
++									\
++      builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE);	\
++      builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE);	\
++      builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE);	\
++									\
++      if (TARGET_RVC)							\
++	builtin_define ("__riscv_compressed");				\
++									\
++      if (TARGET_ATOMIC)						\
++	builtin_define ("__riscv_atomic");				\
++									\
++      if (TARGET_MUL)							\
++	builtin_define ("__riscv_mul");					\
++      if (TARGET_DIV)							\
++	builtin_define ("__riscv_div");					\
++      if (TARGET_DIV && TARGET_MUL)					\
++	builtin_define ("__riscv_muldiv");				\
++									\
++      builtin_define_with_int_value ("__riscv_xlen",			\
++				     UNITS_PER_WORD * 8);		\
++      if (TARGET_HARD_FLOAT)						\
++	builtin_define_with_int_value ("__riscv_flen",			\
++				       UNITS_PER_FP_REG * 8);		\
++									\
++      if (TARGET_HARD_FLOAT && TARGET_FDIV)				\
++	{								\
++	  builtin_define ("__riscv_fdiv");				\
++	  builtin_define ("__riscv_fsqrt");				\
++	}								\
++									\
++      switch (riscv_float_abi)						\
++	{								\
++	case FLOAT_ABI_SOFT:						\
++	  builtin_define ("__riscv_float_abi_soft");			\
++	  break;							\
++									\
++	case FLOAT_ABI_SINGLE:						\
++	  builtin_define ("__riscv_float_abi_single");			\
++	  break;							\
++									\
++	case FLOAT_ABI_DOUBLE:						\
++	  builtin_define ("__riscv_float_abi_double");			\
++	  break;							\
++	}								\
++									\
++      /* The base RISC-V ISA is always little-endian. */		\
++      builtin_define_std ("RISCVEL");					\
++									\
++      if (riscv_cmodel == CM_MEDANY)					\
++	builtin_define ("_RISCV_CMODEL_MEDANY");			\
++    }									\
++  while (0)
++
++/* Default target_flags if no switches are specified  */
++
++#ifndef TARGET_DEFAULT
++#define TARGET_DEFAULT 0
++#endif
++
++#ifndef RISCV_TUNE_STRING_DEFAULT
++#define RISCV_TUNE_STRING_DEFAULT "rocket"
++#endif
++
++#if TARGET_64BIT_DEFAULT
++# define MULTILIB_ARCH_DEFAULT "m64"
++# define OPT_ARCH64 "!m32"
++# define OPT_ARCH32 "m32"
++#else
++# define MULTILIB_ARCH_DEFAULT "m32"
++# define OPT_ARCH64 "m64"
++# define OPT_ARCH32 "!m64"
++#endif
++
++#ifndef MULTILIB_DEFAULTS
++#define MULTILIB_DEFAULTS \
++    { MULTILIB_ARCH_DEFAULT }
++#endif
++
++
++/* Support for a compile-time default CPU, et cetera.  The rules are:
++   --with-tune is ignored if -mtune is specified.
++   --with-float is ignored if -mfloat-abi is specified.  */
++#define OPTION_DEFAULT_SPECS \
++  {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \
++  {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \
++  {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
++  {"float", "%{!mfloat-abi=*:%{!mno-float:-mfloat-abi=%(VALUE)}}"}, \
++
++#define DRIVER_SELF_SPECS ""
++
++#ifdef IN_LIBGCC2
++#undef TARGET_64BIT
++/* Make this compile time constant for libgcc2 */
++#ifdef __riscv64
++#define TARGET_64BIT		1
++#else
++#define TARGET_64BIT		0
++#endif
++#endif /* IN_LIBGCC2 */
++
++/* Tell collect what flags to pass to nm.  */
++#ifndef NM_FLAGS
++#define NM_FLAGS "-Bn"
++#endif
++
++#undef ASM_SPEC
++#define ASM_SPEC "\
++%(subtarget_asm_debugging_spec) \
++%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \
++%{mrvc} %{mno-rvc} \
++%{fPIC|fpic|fPIE|fpie:-fpic} \
++%{march=*} \
++%{mfloat-abi=*} \
++%{mno-float:-mfloat-abi=soft} \
++%(subtarget_asm_spec)"
++
++/* Extra switches sometimes passed to the linker.  */
++
++#ifndef LINK_SPEC
++#define LINK_SPEC "\
++%{!T:-dT riscv.ld} \
++%{m64:-melf64lriscv} \
++%{m32:-melf32lriscv} \
++%{shared}"
++#endif  /* LINK_SPEC defined */
++
++/* This macro defines names of additional specifications to put in the specs
++   that can be used in various specifications like CC1_SPEC.  Its definition
++   is an initializer with a subgrouping for each command option.
++
++   Each subgrouping contains a string constant, that defines the
++   specification name, and a string constant that used by the GCC driver
++   program.
++
++   Do not define this macro if it does not need to do anything.  */
++
++#define EXTRA_SPECS							\
++  { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT },		\
++  SUBTARGET_EXTRA_SPECS
++
++#ifndef SUBTARGET_EXTRA_SPECS
++#define SUBTARGET_EXTRA_SPECS
++#endif
++
++#define TARGET_DEFAULT_CMODEL CM_MEDLOW
++
++/* By default, turn on GDB extensions.  */
++#define DEFAULT_GDB_EXTENSIONS 1
++
++#define LOCAL_LABEL_PREFIX	"."
++#define USER_LABEL_PREFIX	""
++
++#define DWARF2_DEBUGGING_INFO 1
++#define DWARF2_ASM_LINE_DEBUG_INFO 0
++
++/* The mapping from gcc register number to DWARF 2 CFA column number.  */
++#define DWARF_FRAME_REGNUM(REGNO) \
++  (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
++
++/* The DWARF 2 CFA column which tracks the return address.  */
++#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
++
++/* Don't emit .cfi_sections, as it does not work */
++#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
++#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0
++
++/* Before the prologue, RA lives in r31.  */
++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
++
++/* Describe how we implement __builtin_eh_return.  */
++#define EH_RETURN_DATA_REGNO(N) \
++  ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
++
++#define EH_RETURN_STACKADJ_RTX  gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
++
++/* Target machine storage layout */
++
++#define BITS_BIG_ENDIAN 0
++#define BYTES_BIG_ENDIAN 0
++#define WORDS_BIG_ENDIAN 0
++
++#define MAX_BITS_PER_WORD 64
++
++/* Width of a word, in units (bytes).  */
++#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
++#ifndef IN_LIBGCC2
++#define MIN_UNITS_PER_WORD 4
++#endif
++
++/* The `Q' extension is not yet supported.  */
++#define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4)
++
++/* The largest size of value that can be held in floating-point
++   registers and moved with a single instruction.  */
++#define UNITS_PER_FP_ARG			\
++  (riscv_float_abi == FLOAT_ABI_SOFT ? 0 :	\
++   riscv_float_abi == FLOAT_ABI_SINGLE ? 4 : 8)
++
++/* The number of bytes in a double.  */
++#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
++
++/* Set the sizes of the core types.  */
++#define SHORT_TYPE_SIZE 16
++#define INT_TYPE_SIZE 32
++#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32)
++#define LONG_LONG_TYPE_SIZE 64
++
++#define FLOAT_TYPE_SIZE 32
++#define DOUBLE_TYPE_SIZE 64
++#define LONG_DOUBLE_TYPE_SIZE (TARGET_64BIT ? 128 : 64)
++
++#ifdef IN_LIBGCC2
++# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
++#endif
++
++/* Allocation boundary (in *bits*) for storing arguments in argument list.  */
++#define PARM_BOUNDARY BITS_PER_WORD
++
++/* Allocation boundary (in *bits*) for the code of a function.  */
++#define FUNCTION_BOUNDARY (TARGET_RVC ? 16 : 32)
++
++/* There is no point aligning anything to a rounder boundary than this.  */
++#define BIGGEST_ALIGNMENT 128
++
++/* All accesses must be aligned.  */
++#define STRICT_ALIGNMENT 1
++
++/* Define this if you wish to imitate the way many other C compilers
++   handle alignment of bitfields and the structures that contain
++   them.
++
++   The behavior is that the type written for a bit-field (`int',
++   `short', or other integer type) imposes an alignment for the
++   entire structure, as if the structure really did contain an
++   ordinary field of that type.  In addition, the bit-field is placed
++   within the structure so that it would fit within such a field,
++   not crossing a boundary for it.
++
++   Thus, on most machines, a bit-field whose type is written as `int'
++   would not cross a four-byte boundary, and would force four-byte
++   alignment for the whole structure.  (The alignment used may not
++   be four bytes; it is controlled by the other alignment
++   parameters.)
++
++   If the macro is defined, its definition should be a C expression;
++   a nonzero value for the expression enables this behavior.  */
++
++#define PCC_BITFIELD_TYPE_MATTERS 1
++
++/* If defined, a C expression to compute the alignment given to a
++   constant that is being placed in memory.  CONSTANT is the constant
++   and ALIGN is the alignment that the object would ordinarily have.
++   The value of this macro is used instead of that alignment to align
++   the object.
++
++   If this macro is not defined, then ALIGN is used.
++
++   The typical use of this macro is to increase alignment for string
++   constants to be word aligned so that `strcpy' calls that copy
++   constants can be done inline.  */
++
++#define CONSTANT_ALIGNMENT(EXP, ALIGN)					\
++  ((TREE_CODE (EXP) == STRING_CST  || TREE_CODE (EXP) == CONSTRUCTOR)	\
++   && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
++
++/* If defined, a C expression to compute the alignment for a static
++   variable.  TYPE is the data type, and ALIGN is the alignment that
++   the object would ordinarily have.  The value of this macro is used
++   instead of that alignment to align the object.
++
++   If this macro is not defined, then ALIGN is used.
++
++   One use of this macro is to increase alignment of medium-size
++   data to make it all fit in fewer cache lines.  Another is to
++   cause character arrays to be word-aligned so that `strcpy' calls
++   that copy constants to character arrays can be done inline.  */
++
++#undef DATA_ALIGNMENT
++#define DATA_ALIGNMENT(TYPE, ALIGN)					\
++  ((((ALIGN) < BITS_PER_WORD)						\
++    && (TREE_CODE (TYPE) == ARRAY_TYPE					\
++	|| TREE_CODE (TYPE) == UNION_TYPE				\
++	|| TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
++
++/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
++   character arrays to be word-aligned so that `strcpy' calls that copy
++   constants to character arrays can be done inline, and 'strcmp' can be
++   optimised to use word loads. */
++#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
++  DATA_ALIGNMENT (TYPE, ALIGN)
++
++/* Define if operations between registers always perform the operation
++   on the full register even if a narrower mode is specified.  */
++#define WORD_REGISTER_OPERATIONS 1
++
++/* When in 64-bit mode, move insns will sign extend SImode and CCmode
++   moves.  All other references are zero extended.  */
++#define LOAD_EXTEND_OP(MODE) \
++  (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
++   ? SIGN_EXTEND : ZERO_EXTEND)
++
++/* Define this macro if it is advisable to hold scalars in registers
++   in a wider mode than that declared by the program.  In such cases,
++   the value is constrained to be within the bounds of the declared
++   type, but kept valid in the wider mode.  The signedness of the
++   extension may differ from that of the type.  */
++
++#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE)	\
++  if (GET_MODE_CLASS (MODE) == MODE_INT		\
++      && GET_MODE_SIZE (MODE) < 4)		\
++    {						\
++      (MODE) = Pmode;				\
++    }
++
++/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
++   Extensions of pointers to word_mode must be signed.  */
++#define POINTERS_EXTEND_UNSIGNED false
++
++/* When floating-point registers are wider than integer ones, moves between
++   them must go through memory.  */
++#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE)	\
++  (GET_MODE_SIZE (MODE) > UNITS_PER_WORD		\
++   && ((CLASS1) == FP_REGS) != ((CLASS2) == FP_REGS))
++
++/* Define if loading short immediate values into registers sign extends.  */
++#define SHORT_IMMEDIATES_SIGN_EXTEND 1
++
++/* Standard register usage.  */
++
++/* Number of hardware registers.  We have:
++
++   - 32 integer registers
++   - 32 floating point registers
++   - 2 fake registers:
++	- ARG_POINTER_REGNUM
++	- FRAME_POINTER_REGNUM */
++
++#define FIRST_PSEUDO_REGISTER 66
++
++/* x0, sp, gp, and tp are fixed. */
++
++#define FIXED_REGISTERS							\
++{ /* General registers.  */                                             \
++  1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
++  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
++  /* Floating-point registers.  */                                      \
++  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
++  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\
++  /* Others.  */                                                        \
++  1, 1 \
++}
++
++
++/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
++   The call RTLs themselves clobber ra.  */
++
++#define CALL_USED_REGISTERS						\
++{ /* General registers.  */                                             \
++  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
++  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
++  /* Floating-point registers.  */                                      \
++  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
++  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
++  /* Others.  */                                                        \
++  1, 1 \
++}
++
++#define CALL_REALLY_USED_REGISTERS                                      \
++{ /* General registers.  */                                             \
++  1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
++  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
++  /* Floating-point registers.  */                                      \
++  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,			\
++  1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,			\
++  /* Others.  */                                                        \
++  1, 1 \
++}
++
++/* Internal macros to classify an ISA register's type. */
++
++#define GP_REG_FIRST 0
++#define GP_REG_LAST  31
++#define GP_REG_NUM   (GP_REG_LAST - GP_REG_FIRST + 1)
++
++#define FP_REG_FIRST 32
++#define FP_REG_LAST  63
++#define FP_REG_NUM   (FP_REG_LAST - FP_REG_FIRST + 1)
++
++/* The DWARF 2 CFA column which tracks the return address from a
++   signal handler context.  This means that to maintain backwards
++   compatibility, no hard register can be assigned this column if it
++   would need to be handled by the DWARF unwinder.  */
++#define DWARF_ALT_FRAME_RETURN_COLUMN 64
++
++#define GP_REG_P(REGNO)	\
++  ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
++#define FP_REG_P(REGNO)  \
++  ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
++
++#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
++
++/* Return coprocessor number from register number.  */
++
++#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) 				\
++  (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2'			\
++   : COP3_REG_P (REGNO) ? '3' : '?')
++
++
++#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
++
++#define HARD_REGNO_MODE_OK(REGNO, MODE)					\
++  riscv_hard_regno_mode_ok_p (REGNO, MODE)
++
++#define MODES_TIEABLE_P(MODE1, MODE2)					\
++  ((MODE1) == (MODE2) || (GET_MODE_CLASS (MODE1) == MODE_INT		\
++			  && GET_MODE_CLASS (MODE2) == MODE_INT))
++
++/* Use s0 as the frame pointer if it is so requested. */
++#define HARD_FRAME_POINTER_REGNUM 8
++#define STACK_POINTER_REGNUM 2
++#define THREAD_POINTER_REGNUM 4
++
++/* These two registers don't really exist: they get eliminated to either
++   the stack or hard frame pointer.  */
++#define ARG_POINTER_REGNUM 64
++#define FRAME_POINTER_REGNUM 65
++
++#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
++#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
++
++/* Register in which static-chain is passed to a function.  */
++#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
++
++/* Registers used as temporaries in prologue/epilogue code.
++
++   The prologue registers mustn't conflict with any
++   incoming arguments, the static chain pointer, or the frame pointer.
++   The epilogue temporary mustn't conflict with the return registers,
++   the frame pointer, the EH stack adjustment, or the EH data registers. */
++
++#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
++#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
++
++#define MCOUNT_NAME "_mcount"
++
++#define NO_PROFILE_COUNTERS 1
++
++/* Emit rtl for profiling.  Output assembler code to FILE
++   to call "_mcount" for profiling a function entry.  */
++#define PROFILE_HOOK(LABEL)						\
++  {									\
++    rtx fun, ra;							\
++    ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);		\
++    fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME);			\
++    emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, ra, Pmode);	\
++  }
++
++/* All the work done in PROFILE_HOOK, but still required.  */
++#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0)
++
++/* Define this macro if it is as good or better to call a constant
++   function address than to call an address kept in a register.  */
++#define NO_FUNCTION_CSE 1
++
++/* Define the classes of registers for register constraints in the
++   machine description.  Also define ranges of constants.
++
++   One of the classes must always be named ALL_REGS and include all hard regs.
++   If there is more than one class, another class must be named NO_REGS
++   and contain no registers.
++
++   The name GENERAL_REGS must be the name of a class (or an alias for
++   another name such as ALL_REGS).  This is the class of registers
++   that is allowed by "g" or "r" in a register constraint.
++   Also, registers outside this class are allocated only when
++   instructions express preferences for them.
++
++   The classes must be numbered in nondecreasing order; that is,
++   a larger-numbered class must never be contained completely
++   in a smaller-numbered class.
++
++   For any two classes, it is very desirable that there be another
++   class that represents their union.  */
++
++enum reg_class
++{
++  NO_REGS,			/* no registers in set */
++  T_REGS,			/* registers used by indirect sibcalls */
++  JALR_REGS,			/* registers used by indirect calls */
++  GR_REGS,			/* integer registers */
++  FP_REGS,			/* floating point registers */
++  FRAME_REGS,			/* $arg and $frame */
++  ALL_REGS,			/* all registers */
++  LIM_REG_CLASSES		/* max value + 1 */
++};
++
++#define N_REG_CLASSES (int) LIM_REG_CLASSES
++
++#define GENERAL_REGS GR_REGS
++
++/* An initializer containing the names of the register classes as C
++   string constants.  These names are used in writing some of the
++   debugging dumps.  */
++
++#define REG_CLASS_NAMES							\
++{									\
++  "NO_REGS",								\
++  "T_REGS",								\
++  "JALR_REGS",								\
++  "GR_REGS",								\
++  "FP_REGS",								\
++  "FRAME_REGS",								\
++  "ALL_REGS"								\
++}
++
++/* An initializer containing the contents of the register classes,
++   as integers which are bit masks.  The Nth integer specifies the
++   contents of class N.  The way the integer MASK is interpreted is
++   that register R is in the class if `MASK & (1 << R)' is 1.
++
++   When the machine has more than 32 registers, an integer does not
++   suffice.  Then the integers are replaced by sub-initializers,
++   braced groupings containing several integers.  Each
++   sub-initializer must be suitable as an initializer for the type
++   `HARD_REG_SET' which is defined in `hard-reg-set.h'.  */
++
++#define REG_CLASS_CONTENTS									\
++{												\
++  { 0x00000000, 0x00000000, 0x00000000 },	/* NO_REGS */		\
++  { 0xf0000040, 0x00000000, 0x00000000 },	/* T_REGS */		\
++  { 0xffffff40, 0x00000000, 0x00000000 },	/* JALR_REGS */		\
++  { 0xffffffff, 0x00000000, 0x00000000 },	/* GR_REGS */		\
++  { 0x00000000, 0xffffffff, 0x00000000 },	/* FP_REGS */		\
++  { 0x00000000, 0x00000000, 0x00000003 },	/* FRAME_REGS */	\
++  { 0xffffffff, 0xffffffff, 0x00000003 }	/* ALL_REGS */		\
++}
++
++/* A C expression whose value is a register class containing hard
++   register REGNO.  In general there is more that one such class;
++   choose a class which is "minimal", meaning that no smaller class
++   also contains the register.  */
++
++#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
++
++/* A macro whose definition is the name of the class to which a
++   valid base register must belong.  A base register is one used in
++   an address which is the register value plus a displacement.  */
++
++#define BASE_REG_CLASS GR_REGS
++
++/* A macro whose definition is the name of the class to which a
++   valid index register must belong.  An index register is one used
++   in an address where its value is either multiplied by a scale
++   factor or added to another register (as well as added to a
++   displacement).  */
++
++#define INDEX_REG_CLASS NO_REGS
++
++/* We generally want to put call-clobbered registers ahead of
++   call-saved ones.  (IRA expects this.)  */
++
++#define REG_ALLOC_ORDER							\
++{ \
++  /* Call-clobbered GPRs.  */						\
++  15, 14, 13, 12, 11, 10, 16, 17, 6, 28, 29, 30, 31, 5, 7, 1,		\
++  /* Call-saved GPRs.  */						\
++  8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,	       			\
++  /* GPRs that can never be exposed to the register allocator.  */	\
++  0, 2, 3, 4,								\
++  /* Call-clobbered FPRs.  */						\
++  47, 46, 45, 44, 43, 42, 32, 33, 34, 35, 36, 37, 38, 39, 48, 49,	\
++  60, 61, 62, 63,							\
++  /* Call-saved FPRs.  */						\
++  40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,			\
++  /* None of the remaining classes have defined call-saved		\
++     registers.  */							\
++  64, 65								\
++}
++
++/* True if VALUE is a signed 12-bit number.  */
++
++#define SMALL_OPERAND(VALUE) \
++  ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH/2 < IMM_REACH)
++
++/* True if VALUE can be loaded into a register using LUI.  */
++
++#define LUI_OPERAND(VALUE)						\
++  (((VALUE) | ((1UL<<31) - IMM_REACH)) == ((1UL<<31) - IMM_REACH)	\
++   || ((VALUE) | ((1UL<<31) - IMM_REACH)) + IMM_REACH == 0)
++
++#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
++  reg_classes_intersect_p (FP_REGS, CLASS)
++
++/* Stack layout; function entry, exit and calling.  */
++
++#define STACK_GROWS_DOWNWARD 1
++
++#define FRAME_GROWS_DOWNWARD 1
++
++#define STARTING_FRAME_OFFSET 0
++
++#define RETURN_ADDR_RTX riscv_return_addr
++
++#define ELIMINABLE_REGS							\
++{{ ARG_POINTER_REGNUM,   STACK_POINTER_REGNUM},				\
++ { ARG_POINTER_REGNUM,   HARD_FRAME_POINTER_REGNUM},			\
++ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM},				\
++ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}				\
++
++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
++  (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
++
++/* Allocate stack space for arguments at the beginning of each function.  */
++#define ACCUMULATE_OUTGOING_ARGS 1
++
++/* The argument pointer always points to the first argument.  */
++#define FIRST_PARM_OFFSET(FNDECL) 0
++
++#define REG_PARM_STACK_SPACE(FNDECL) 0
++
++/* Define this if it is the responsibility of the caller to
++   allocate the area reserved for arguments passed in registers.
++   If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
++   of this macro is to determine whether the space is included in
++   `crtl->outgoing_args_size'.  */
++#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
++
++#define STACK_BOUNDARY 128
++

++/* Symbolic macros for the registers used to return integer and floating
++   point values.  */
++
++#define GP_RETURN GP_ARG_FIRST
++#define FP_RETURN \
++  (riscv_float_abi == FLOAT_ABI_SOFT ? GP_RETURN : FP_ARG_FIRST)
++
++#define MAX_ARGS_IN_REGISTERS 8
++
++/* Symbolic macros for the first/last argument registers.  */
++
++#define GP_ARG_FIRST (GP_REG_FIRST + 10)
++#define GP_ARG_LAST  (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
++#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
++#define FP_ARG_FIRST (FP_REG_FIRST + 10)
++#define FP_ARG_LAST  (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
++
++#define CALLEE_SAVED_REG_NUMBER(REGNO)			\
++  ((REGNO) >= 8 && (REGNO) <= 9 ? (REGNO) - 8 :		\
++   (REGNO) >= 18 && (REGNO) <= 27 ? (REGNO) - 16 : -1)
++
++#define LIBCALL_VALUE(MODE) \
++  riscv_function_value (NULL_TREE, NULL_TREE, MODE)
++
++#define FUNCTION_VALUE(VALTYPE, FUNC) \
++  riscv_function_value (VALTYPE, FUNC, VOIDmode)
++
++#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
++
++/* 1 if N is a possible register number for function argument passing.
++   We have no FP argument registers when soft-float.  When FP registers
++   are 32 bits, we can't directly reference the odd numbered ones.  */
++
++/* Accept arguments in a0-a7, and in fa0-fa7 if permitted by the ABI.  */
++#define FUNCTION_ARG_REGNO_P(N)					\
++  (IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST)			\
++   || (riscv_float_abi != FLOAT_ABI_SOFT			\
++       && IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST)))
++
++/* The ABI views the arguments as a structure, of which the first 8
++   words go in registers and the rest go on the stack.  If I < 8, N, the Ith
++   word might go in the Ith integer argument register or the Ith
++   floating-point argument register. */
++
++typedef struct {
++  /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
++  unsigned int num_gprs;
++
++  /* Number of words passed on the stack.  */
++  unsigned int stack_words;
++} CUMULATIVE_ARGS;
++
++/* Initialize a variable CUM of type CUMULATIVE_ARGS
++   for a call to a function whose data type is FNTYPE.
++   For a library call, FNTYPE is 0.  */
++
++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
++  memset (&(CUM), 0, sizeof (CUM))
++
++#define EPILOGUE_USES(REGNO)	((REGNO) == RETURN_ADDR_REGNUM)
++
++/* ABI requires 16-byte alignment, even on ven on RV32. */
++#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
++
++/* Define this macro if the code for function profiling should come
++   before the function prologue.  Normally, the profiling code comes
++   after.  */
++
++/* #define PROFILE_BEFORE_PROLOGUE */
++
++/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
++   the stack pointer does not matter.  The value is tested only in
++   functions that have frame pointers.
++   No definition is equivalent to always zero.  */
++
++#define EXIT_IGNORE_STACK 1
++
++
++/* Trampolines are a block of code followed by two pointers.  */
++
++#define TRAMPOLINE_CODE_SIZE 16
++#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
++#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
++
++/* Addressing modes, and classification of registers for them.  */
++
++#define REGNO_OK_FOR_INDEX_P(REGNO) 0
++#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
++  riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
++
++/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
++   and check its validity for a certain class.
++   We have two alternate definitions for each of them.
++   The usual definition accepts all pseudo regs; the other rejects them all.
++   The symbol REG_OK_STRICT causes the latter definition to be used.
++
++   Most source files want to accept pseudo regs in the hope that
++   they will get allocated to the class that the insn wants them to be in.
++   Some source files that are used after register allocation
++   need to be strict.  */
++
++#ifndef REG_OK_STRICT
++#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
++  riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
++#else
++#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
++  riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
++#endif
++
++#define REG_OK_FOR_INDEX_P(X) 0
++
++

++/* Maximum number of registers that can appear in a valid memory address.  */
++
++#define MAX_REGS_PER_ADDRESS 1
++
++#define CONSTANT_ADDRESS_P(X) \
++  (CONSTANT_P (X) && memory_address_p (SImode, X))
++
++/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
++   'the start of the function that this code is output in'.  */
++
++#define ASM_OUTPUT_LABELREF(FILE,NAME)  \
++  if (strcmp (NAME, "..CURRENT_FUNCTION") == 0)				\
++    asm_fprintf ((FILE), "%U%s",					\
++		 XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));	\
++  else									\
++    asm_fprintf ((FILE), "%U%s", (NAME))
++
++/* This flag marks functions that cannot be lazily bound.  */
++#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
++#define SYMBOL_REF_BIND_NOW_P(RTX) \
++  ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
++
++#define JUMP_TABLES_IN_TEXT_SECTION 0
++#define CASE_VECTOR_MODE SImode
++#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
++
++/* The load-address macro is used for PC-relative addressing of symbols
++   that bind locally.  Don't use it for symbols that should be addressed
++   via the GOT.  Also, avoid it for CM_MEDLOW, where LUI addressing
++   currently results in more opportunities for linker relaxation.  */
++#define USE_LOAD_ADDRESS_MACRO(sym)					\
++  (!TARGET_EXPLICIT_RELOCS &&						\
++   ((flag_pic								\
++     && ((SYMBOL_REF_P (sym) && SYMBOL_REF_LOCAL_P (sym))		\
++	 || ((GET_CODE (sym) == CONST)					\
++	     && SYMBOL_REF_P (XEXP (XEXP (sym, 0),0))			\
++	     && SYMBOL_REF_LOCAL_P (XEXP (XEXP (sym, 0),0)))))		\
++     || riscv_cmodel == CM_MEDANY))
++
++/* Define this as 1 if `char' should by default be signed; else as 0.  */
++#define DEFAULT_SIGNED_CHAR 0
++
++/* Consider using fld/fsd to move 8 bytes at a time for RV32IFD. */
++#define MOVE_MAX UNITS_PER_WORD
++#define MAX_MOVE_MAX 8
++
++#define SLOW_BYTE_ACCESS 0
++
++#define SHIFT_COUNT_TRUNCATED 1
++
++/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
++   is done just by pretending it is already truncated.  */
++#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
++  (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
++
++/* Specify the machine mode that pointers have.
++   After generation of rtl, the compiler makes no further distinction
++   between pointers and any other objects of this machine mode.  */
++
++#ifndef Pmode
++#define Pmode (TARGET_64BIT ? DImode : SImode)
++#endif
++
++/* Give call MEMs SImode since it is the "most permissive" mode
++   for both 32-bit and 64-bit targets.  */
++
++#define FUNCTION_MODE SImode
++
++/* A C expression for the cost of a branch instruction.  A value of 2
++   seems to minimize code size.  */
++
++#define BRANCH_COST(speed_p, predictable_p) \
++  ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
++
++#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
++
++/* Control the assembler format that we output.  */
++
++/* Output to assembler file text saying following lines
++   may contain character constants, extra white space, comments, etc.  */
++
++#ifndef ASM_APP_ON
++#define ASM_APP_ON " #APP\n"
++#endif
++
++/* Output to assembler file text saying following lines
++   no longer contain unusual constructs.  */
++
++#ifndef ASM_APP_OFF
++#define ASM_APP_OFF " #NO_APP\n"
++#endif
++
++#define REGISTER_NAMES						\
++{ "zero","ra",  "sp",  "gp",  "tp",  "t0",  "t1",  "t2",	\
++  "s0",  "s1",  "a0",  "a1",  "a2",  "a3",  "a4",  "a5",	\
++  "a6",  "a7",  "s2",  "s3",  "s4",  "s5",  "s6",  "s7",	\
++  "s8",  "s9",  "s10", "s11", "t3",  "t4",  "t5",  "t6",	\
++  "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",	\
++  "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",	\
++  "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",	\
++  "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11",	\
++  "arg", "frame", }
++
++#define ADDITIONAL_REGISTER_NAMES					\
++{									\
++  { "x0",	 0 + GP_REG_FIRST },					\
++  { "x1",	 1 + GP_REG_FIRST },					\
++  { "x2",	 2 + GP_REG_FIRST },					\
++  { "x3",	 3 + GP_REG_FIRST },					\
++  { "x4",	 4 + GP_REG_FIRST },					\
++  { "x5",	 5 + GP_REG_FIRST },					\
++  { "x6",	 6 + GP_REG_FIRST },					\
++  { "x7",	 7 + GP_REG_FIRST },					\
++  { "x8",	 8 + GP_REG_FIRST },					\
++  { "x9",	 9 + GP_REG_FIRST },					\
++  { "x10",	10 + GP_REG_FIRST },					\
++  { "x11",	11 + GP_REG_FIRST },					\
++  { "x12",	12 + GP_REG_FIRST },					\
++  { "x13",	13 + GP_REG_FIRST },					\
++  { "x14",	14 + GP_REG_FIRST },					\
++  { "x15",	15 + GP_REG_FIRST },					\
++  { "x16",	16 + GP_REG_FIRST },					\
++  { "x17",	17 + GP_REG_FIRST },					\
++  { "x18",	18 + GP_REG_FIRST },					\
++  { "x19",	19 + GP_REG_FIRST },					\
++  { "x20",	20 + GP_REG_FIRST },					\
++  { "x21",	21 + GP_REG_FIRST },					\
++  { "x22",	22 + GP_REG_FIRST },					\
++  { "x23",	23 + GP_REG_FIRST },					\
++  { "x24",	24 + GP_REG_FIRST },					\
++  { "x25",	25 + GP_REG_FIRST },					\
++  { "x26",	26 + GP_REG_FIRST },					\
++  { "x27",	27 + GP_REG_FIRST },					\
++  { "x28",	28 + GP_REG_FIRST },					\
++  { "x29",	29 + GP_REG_FIRST },					\
++  { "x30",	30 + GP_REG_FIRST },					\
++  { "x31",	31 + GP_REG_FIRST },					\
++  { "f0",	 0 + FP_REG_FIRST },					\
++  { "f1",	 1 + FP_REG_FIRST },					\
++  { "f2",	 2 + FP_REG_FIRST },					\
++  { "f3",	 3 + FP_REG_FIRST },					\
++  { "f4",	 4 + FP_REG_FIRST },					\
++  { "f5",	 5 + FP_REG_FIRST },					\
++  { "f6",	 6 + FP_REG_FIRST },					\
++  { "f7",	 7 + FP_REG_FIRST },					\
++  { "f8",	 8 + FP_REG_FIRST },					\
++  { "f9",	 9 + FP_REG_FIRST },					\
++  { "f10",	10 + FP_REG_FIRST },					\
++  { "f11",	11 + FP_REG_FIRST },					\
++  { "f12",	12 + FP_REG_FIRST },					\
++  { "f13",	13 + FP_REG_FIRST },					\
++  { "f14",	14 + FP_REG_FIRST },					\
++  { "f15",	15 + FP_REG_FIRST },					\
++  { "f16",	16 + FP_REG_FIRST },					\
++  { "f17",	17 + FP_REG_FIRST },					\
++  { "f18",	18 + FP_REG_FIRST },					\
++  { "f19",	19 + FP_REG_FIRST },					\
++  { "f20",	20 + FP_REG_FIRST },					\
++  { "f21",	21 + FP_REG_FIRST },					\
++  { "f22",	22 + FP_REG_FIRST },					\
++  { "f23",	23 + FP_REG_FIRST },					\
++  { "f24",	24 + FP_REG_FIRST },					\
++  { "f25",	25 + FP_REG_FIRST },					\
++  { "f26",	26 + FP_REG_FIRST },					\
++  { "f27",	27 + FP_REG_FIRST },					\
++  { "f28",	28 + FP_REG_FIRST },					\
++  { "f29",	29 + FP_REG_FIRST },					\
++  { "f30",	30 + FP_REG_FIRST },					\
++  { "f31",	31 + FP_REG_FIRST },					\
++}
++
++/* Globalizing directive for a label.  */
++#define GLOBAL_ASM_OP "\t.globl\t"
++
++/* This is how to store into the string LABEL
++   the symbol_ref name of an internal numbered label where
++   PREFIX is the class of label and NUM is the number within the class.
++   This is suitable for output with `assemble_name'.  */
++
++#undef ASM_GENERATE_INTERNAL_LABEL
++#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM)			\
++  sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
++
++/* This is how to output an element of a case-vector that is absolute.  */
++
++#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE)				\
++  fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
++
++/* This is how to output an element of a PIC case-vector. */
++
++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL)		\
++  fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n",				\
++	   LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
++
++/* This is how to output an assembler line
++   that says to advance the location counter
++   to a multiple of 2**LOG bytes.  */
++
++#define ASM_OUTPUT_ALIGN(STREAM,LOG)					\
++  fprintf (STREAM, "\t.align\t%d\n", (LOG))
++
++/* Define the strings to put out for each section in the object file.  */
++#define TEXT_SECTION_ASM_OP	"\t.text"	/* instructions */
++#define DATA_SECTION_ASM_OP	"\t.data"	/* large data */
++#define READONLY_DATA_SECTION_ASM_OP	"\t.section\t.rodata"
++#define BSS_SECTION_ASM_OP	"\t.bss"
++#define SBSS_SECTION_ASM_OP	"\t.section\t.sbss,\"aw\", at nobits"
++#define SDATA_SECTION_ASM_OP	"\t.section\t.sdata,\"aw\", at progbits"
++
++#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO)				\
++do									\
++  {									\
++    fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n",		\
++	     reg_names[STACK_POINTER_REGNUM],				\
++	     reg_names[STACK_POINTER_REGNUM],				\
++	     TARGET_64BIT ? "sd" : "sw",				\
++	     reg_names[REGNO],						\
++	     reg_names[STACK_POINTER_REGNUM]);				\
++  }									\
++while (0)
++
++#define ASM_OUTPUT_REG_POP(STREAM,REGNO)				\
++do									\
++  {									\
++    fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n",		\
++	     TARGET_64BIT ? "ld" : "lw",				\
++	     reg_names[REGNO],						\
++	     reg_names[STACK_POINTER_REGNUM],				\
++	     reg_names[STACK_POINTER_REGNUM],				\
++	     reg_names[STACK_POINTER_REGNUM]);				\
++  }									\
++while (0)
++
++#define ASM_COMMENT_START "#"
++
++#undef SIZE_TYPE
++#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
++
++#undef PTRDIFF_TYPE
++#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
++
++/* The maximum number of bytes that can be copied by one iteration of
++   a movmemsi loop; see riscv_block_move_loop.  */
++#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER 32
++
++/* The maximum number of bytes that can be copied by a straight-line
++   implementation of movmemsi; see riscv_block_move_straight.  We want
++   to make sure that any loop-based implementation will iterate at
++   least twice.  */
++#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
++
++/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
++
++#define RISCV_CALL_RATIO 6
++
++/* Any loop-based implementation of movmemsi will have at least
++   RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
++   moves, so allow individual copies of fewer elements.
++
++   When movmemsi is not available, use a value approximating
++   the length of a memcpy call sequence, so that move_by_pieces
++   will generate inline code if it is shorter than a function call.
++   Since move_by_pieces_ninsns counts memory-to-memory moves, but
++   we'll have to generate a load/store pair for each, halve the
++   value of RISCV_CALL_RATIO to take that into account.  */
++
++#define MOVE_RATIO(speed)				\
++  (HAVE_movmemsi					\
++   ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX		\
++   : RISCV_CALL_RATIO / 2)
++
++/* For CLEAR_RATIO, when optimizing for size, give a better estimate
++   of the length of a memset call, but use the default otherwise.  */
++
++#define CLEAR_RATIO(speed)\
++  ((speed) ? 15 : RISCV_CALL_RATIO)
++
++/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
++   optimizing for size adjust the ratio to account for the overhead of
++   loading the constant and replicating it across the word.  */
++
++#define SET_RATIO(speed) \
++  ((speed) ? 15 : RISCV_CALL_RATIO - 2)
++
++#ifndef HAVE_AS_TLS
++#define HAVE_AS_TLS 0
++#endif
++
++#ifndef USED_FOR_TARGET
++
++extern const enum reg_class riscv_regno_to_class[];
++extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
++#endif
++
++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
++  (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
++
++/* ISA constants needed for code generation.  */
++#define OPCODE_LW    0x2003
++#define OPCODE_LD    0x3003
++#define OPCODE_AUIPC 0x17
++#define OPCODE_JALR  0x67
++#define SHIFT_RD  7
++#define SHIFT_RS1 15
++#define SHIFT_IMM 20
++#define IMM_BITS 12
++
++#define IMM_REACH (1LL << IMM_BITS)
++#define CONST_HIGH_PART(VALUE) (((VALUE) + (IMM_REACH/2)) & ~(IMM_REACH-1))
++#define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE))
++
++#endif /* ! GCC_RISCV_H */
+diff --git original-gcc/gcc/config/riscv/riscv.md gcc-6.2.0/gcc/config/riscv/riscv.md
+new file mode 100644
+index 0000000..9661bb3
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv.md
+@@ -0,0 +1,2377 @@
++;; Machine description for RISC-V for GNU compiler.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_c_enum "unspec" [
++  ;; Floating-point moves.
++  UNSPEC_LOAD_LOW
++  UNSPEC_LOAD_HIGH
++  UNSPEC_STORE_WORD
++
++  ;; GP manipulation.
++  UNSPEC_EH_RETURN
++
++  ;; Symbolic accesses.
++  UNSPEC_ADDRESS_FIRST
++  UNSPEC_PCREL
++  UNSPEC_LOAD_GOT
++  UNSPEC_TLS
++  UNSPEC_TLS_LE
++  UNSPEC_TLS_IE
++  UNSPEC_TLS_GD
++
++  UNSPEC_AUIPC
++
++  ;; Register save and restore.
++  UNSPEC_GPR_SAVE
++  UNSPEC_GPR_RESTORE
++
++  ;; Blockage and synchronisation.
++  UNSPEC_BLOCKAGE
++  UNSPEC_FENCE
++  UNSPEC_FENCE_I
++])
++
++(define_constants
++  [(RETURN_ADDR_REGNUM		1)
++   (T0_REGNUM			5)
++   (T1_REGNUM			6)
++])
++
++(include "predicates.md")
++(include "constraints.md")
++
++;; ....................
++;;
++;;	Attributes
++;;
++;; ....................
++
++(define_attr "got" "unset,xgot_high,load"
++  (const_string "unset"))
++
++;; Classification of moves, extensions and truncations.  Most values
++;; are as for "type" (see below) but there are also the following
++;; move-specific values:
++;;
++;; andi		a single ANDI instruction
++;; shift_shift	a shift left followed by a shift right
++;;
++;; This attribute is used to determine the instruction's length and
++;; scheduling type.  For doubleword moves, the attribute always describes
++;; the split instructions; in some cases, it is more appropriate for the
++;; scheduling type to be "multi" instead.
++(define_attr "move_type"
++  "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
++   const,logical,arith,andi,shift_shift"
++  (const_string "unknown"))
++
++;; Main data type used by the insn
++(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
++  (const_string "unknown"))
++
++;; True if the main data type is twice the size of a word.
++(define_attr "dword_mode" "no,yes"
++  (cond [(and (eq_attr "mode" "DI,DF")
++	      (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
++	 (const_string "yes")
++
++	 (and (eq_attr "mode" "TI,TF")
++	      (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
++	 (const_string "yes")]
++	(const_string "no")))
++
++;; Classification of each insn.
++;; branch	conditional branch
++;; jump		unconditional jump
++;; call		unconditional call
++;; load		load instruction(s)
++;; fpload	floating point load
++;; store	store instruction(s)
++;; fpstore	floating point store
++;; mtc		transfer to coprocessor
++;; mfc		transfer from coprocessor
++;; const	load constant
++;; arith	integer arithmetic instructions
++;; logical      integer logical instructions
++;; shift	integer shift instructions
++;; slt		set less than instructions
++;; imul		integer multiply 
++;; idiv		integer divide
++;; move		integer register move (addi rd, rs1, 0)
++;; fmove	floating point register move
++;; fadd		floating point add/subtract
++;; fmul		floating point multiply
++;; fmadd	floating point multiply-add
++;; fdiv		floating point divide
++;; fcmp		floating point compare
++;; fcvt		floating point convert
++;; fsqrt	floating point square root
++;; multi	multiword sequence (or user asm statements)
++;; nop		no operation
++;; ghost	an instruction that produces no real code
++(define_attr "type"
++  "unknown,branch,jump,call,load,fpload,store,fpstore,
++   mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
++   fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
++  (cond [(eq_attr "got" "load") (const_string "load")
++
++	 ;; If a doubleword move uses these expensive instructions,
++	 ;; it is usually better to schedule them in the same way
++	 ;; as the singleword form, rather than as "multi".
++	 (eq_attr "move_type" "load") (const_string "load")
++	 (eq_attr "move_type" "fpload") (const_string "fpload")
++	 (eq_attr "move_type" "store") (const_string "store")
++	 (eq_attr "move_type" "fpstore") (const_string "fpstore")
++	 (eq_attr "move_type" "mtc") (const_string "mtc")
++	 (eq_attr "move_type" "mfc") (const_string "mfc")
++
++	 ;; These types of move are always single insns.
++	 (eq_attr "move_type" "fmove") (const_string "fmove")
++	 (eq_attr "move_type" "arith") (const_string "arith")
++	 (eq_attr "move_type" "logical") (const_string "logical")
++	 (eq_attr "move_type" "andi") (const_string "logical")
++
++	 ;; These types of move are always split.
++	 (eq_attr "move_type" "shift_shift")
++	   (const_string "multi")
++
++	 ;; These types of move are split for doubleword modes only.
++	 (and (eq_attr "move_type" "move,const")
++	      (eq_attr "dword_mode" "yes"))
++	   (const_string "multi")
++	 (eq_attr "move_type" "move") (const_string "move")
++	 (eq_attr "move_type" "const") (const_string "const")]
++	(const_string "unknown")))
++
++;; Mode for conversion types (fcvt)
++;; I2S          integer to float single (SI/DI to SF)
++;; I2D          integer to float double (SI/DI to DF)
++;; S2I          float to integer (SF to SI/DI)
++;; D2I          float to integer (DF to SI/DI)
++;; D2S          double to float single
++;; S2D          float single to double
++
++(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" 
++  (const_string "unknown"))
++
++;; Length of instruction in bytes.
++(define_attr "length" ""
++   (cond [
++	  ;; Direct branch instructions have a range of [-0x1000,0xffc],
++	  ;; relative to the address of the delay slot.  If a branch is
++	  ;; outside this range, convert a branch like:
++	  ;;
++	  ;;	bne	r1,r2,target
++	  ;;
++	  ;; to:
++	  ;;
++	  ;;	beq	r1,r2,1f
++	  ;;  j target
++	  ;; 1:
++	  ;;
++	  (eq_attr "type" "branch")
++	  (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
++				  (le (minus (pc) (match_dup 0)) (const_int 4092)))
++	  (const_int 4)
++	  (const_int 8))
++
++	  ;; Conservatively assume calls take two instructions (AUIPC + JALR).
++	  ;; The linker will opportunistically relax the sequence to JAL.
++	  (eq_attr "type" "call") (const_int 8)
++
++	  ;; "Ghost" instructions occupy no space.
++	  (eq_attr "type" "ghost") (const_int 0)
++
++	  (eq_attr "got" "load") (const_int 8)
++
++	  (eq_attr "type" "fcmp") (const_int 8)
++
++	  ;; SHIFT_SHIFTs are decomposed into two separate instructions.
++	  (eq_attr "move_type" "shift_shift")
++		(const_int 8)
++
++	  ;; Check for doubleword moves that are decomposed into two
++	  ;; instructions.
++	  (and (eq_attr "move_type" "mtc,mfc,move")
++	       (eq_attr "dword_mode" "yes"))
++	  (const_int 8)
++
++	  ;; Doubleword CONST{,N} moves are split into two word
++	  ;; CONST{,N} moves.
++	  (and (eq_attr "move_type" "const")
++	       (eq_attr "dword_mode" "yes"))
++	  (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
++
++	  ;; Otherwise, constants, loads and stores are handled by external
++	  ;; routines.
++	  (eq_attr "move_type" "load,fpload")
++	  (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
++	  (eq_attr "move_type" "store,fpstore")
++	  (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
++	  ] (const_int 4)))
++
++;; Is copying of this instruction disallowed?
++(define_attr "cannot_copy" "no,yes" (const_string "no"))
++
++;; Describe a user's asm statement.
++(define_asm_attributes
++  [(set_attr "type" "multi")])
++
++;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
++;; from the same template.
++(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
++(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
++
++;; A copy of GPR that can be used when a pattern has two independent
++;; modes.
++(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
++
++;; This mode iterator allows :P to be used for patterns that operate on
++;; pointer-sized quantities.  Exactly one of the two alternatives will match.
++(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
++
++;; 32-bit integer moves for which we provide move patterns.
++(define_mode_iterator IMOVE32 [SI])
++
++;; 64-bit modes for which we provide move patterns.
++(define_mode_iterator MOVE64 [DI DF])
++
++;; This mode iterator allows the QI and HI extension patterns to be
++;; defined from the same template.
++(define_mode_iterator SHORT [QI HI])
++
++;; Likewise the 64-bit truncate-and-shift patterns.
++(define_mode_iterator SUBDI [QI HI SI])
++(define_mode_iterator HISI [HI SI])
++(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
++
++;; This mode iterator allows :ANYF to be used where SF or DF is allowed.
++(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
++			    (DF "TARGET_DOUBLE_FLOAT")])
++(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
++			     (SF "TARGET_HARD_FLOAT")
++			     (DF "TARGET_DOUBLE_FLOAT")])
++
++;; A floating-point mode for which moves involving FPRs may need to be split.
++(define_mode_iterator SPLITF
++  [(DF "!TARGET_64BIT")
++   (DI "!TARGET_64BIT")
++   (TF "TARGET_64BIT")])
++
++;; This attribute gives the length suffix for a sign- or zero-extension
++;; instruction.
++(define_mode_attr size [(QI "b") (HI "h")])
++
++;; Mode attributes for loads.
++(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
++
++;; Instruction names for stores.
++(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
++
++;; This attribute gives the best constraint to use for registers of
++;; a given mode.
++(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
++
++;; This attribute gives the format suffix for floating-point operations.
++(define_mode_attr fmt [(SF "s") (DF "d")])
++
++;; This attribute gives the format suffix for atomic memory operations.
++(define_mode_attr amo [(SI "w") (DI "d")])
++
++;; This attribute gives the upper-case mode name for one unit of a
++;; floating-point mode.
++(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
++
++;; This attribute gives the integer mode that has half the size of
++;; the controlling mode.
++(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
++
++;; This code iterator allows signed and unsigned widening multiplications
++;; to use the same template.
++(define_code_iterator any_extend [sign_extend zero_extend])
++
++;; This code iterator allows the two right shift instructions to be
++;; generated from the same template.
++(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
++
++;; This code iterator allows the three shift instructions to be generated
++;; from the same template.
++(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
++
++;; This code iterator allows unsigned and signed division to be generated
++;; from the same template.
++(define_code_iterator any_div [div udiv])
++
++;; This code iterator allows unsigned and signed modulus to be generated
++;; from the same template.
++(define_code_iterator any_mod [mod umod])
++
++;; These code iterators allow the signed and unsigned scc operations to use
++;; the same template.
++(define_code_iterator any_gt [gt gtu])
++(define_code_iterator any_ge [ge geu])
++(define_code_iterator any_lt [lt ltu])
++(define_code_iterator any_le [le leu])
++
++;; <u> expands to an empty string when doing a signed operation and
++;; "u" when doing an unsigned operation.
++(define_code_attr u [(sign_extend "") (zero_extend "u")
++		     (div "") (udiv "u")
++		     (mod "") (umod "u")
++		     (gt "") (gtu "u")
++		     (ge "") (geu "u")
++		     (lt "") (ltu "u")
++		     (le "") (leu "u")])
++
++;; <su> is like <u>, but the signed form expands to "s" rather than "".
++(define_code_attr su [(sign_extend "s") (zero_extend "u")])
++
++;; <optab> expands to the name of the optab for a particular code.
++(define_code_attr optab [(ashift "ashl")
++			 (ashiftrt "ashr")
++			 (lshiftrt "lshr")
++			 (ior "ior")
++			 (xor "xor")
++			 (and "and")
++			 (plus "add")
++			 (minus "sub")])
++
++;; <insn> expands to the name of the insn that implements a particular code.
++(define_code_attr insn [(ashift "sll")
++			(ashiftrt "sra")
++			(lshiftrt "srl")
++			(ior "or")
++			(xor "xor")
++			(and "and")
++			(plus "add")
++			(minus "sub")])
++
++;; Ghost instructions produce no real code and introduce no hazards.
++;; They exist purely to express an effect on dataflow.
++(define_insn_reservation "ghost" 0
++  (eq_attr "type" "ghost")
++  "nothing")
++
++;;
++;;  ....................
++;;
++;;	ADDITION
++;;
++;;  ....................
++;;
++
++(define_insn "add<mode>3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
++		   (match_operand:ANYF 2 "register_operand" "f")))]
++  ""
++  "fadd.<fmt>\t%0,%1,%2"
++  [(set_attr "type" "fadd")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "add<mode>3"
++  [(set (match_operand:GPR 0 "register_operand")
++	(plus:GPR (match_operand:GPR 1 "register_operand")
++		  (match_operand:GPR 2 "arith_operand")))]
++  "")
++
++(define_insn "*addsi3"
++  [(set (match_operand:SI 0 "register_operand" "=r,r")
++	(plus:SI (match_operand:GPR 1 "register_operand" "r,r")
++		  (match_operand:GPR2 2 "arith_operand" "r,Q")))]
++  ""
++  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++(define_insn "*adddi3"
++  [(set (match_operand:DI 0 "register_operand" "=r,r")
++	(plus:DI (match_operand:DI 1 "register_operand" "r,r")
++		  (match_operand:DI 2 "arith_operand" "r,Q")))]
++  "TARGET_64BIT"
++  "add\t%0,%1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "DI")])
++
++(define_insn "*addsi3_extended"
++  [(set (match_operand:DI 0 "register_operand" "=r,r")
++	(sign_extend:DI
++	     (plus:SI (match_operand:SI 1 "register_operand" "r,r")
++		      (match_operand:SI 2 "arith_operand" "r,Q"))))]
++  "TARGET_64BIT"
++  "addw\t%0,%1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++(define_insn "*adddisi3"
++  [(set (match_operand:SI 0 "register_operand" "=r,r")
++	     (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
++		      (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
++  "TARGET_64BIT"
++  "addw\t%0,%1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++(define_insn "*adddisisi3"
++  [(set (match_operand:SI 0 "register_operand" "=r,r")
++	     (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
++		      (match_operand:SI 2 "arith_operand" "r,Q")))]
++  "TARGET_64BIT"
++  "addw\t%0,%1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++(define_insn "*adddi3_truncsi"
++  [(set (match_operand:SI 0 "register_operand" "=r,r")
++          (truncate:SI
++	     (plus:DI (match_operand:DI 1 "register_operand" "r,r")
++		      (match_operand:DI 2 "arith_operand" "r,Q"))))]
++  "TARGET_64BIT"
++  "addw\t%0,%1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++;;
++;;  ....................
++;;
++;;	SUBTRACTION
++;;
++;;  ....................
++;;
++
++(define_insn "sub<mode>3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
++		    (match_operand:ANYF 2 "register_operand" "f")))]
++  ""
++  "fsub.<fmt>\t%0,%1,%2"
++  [(set_attr "type" "fadd")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "sub<mode>3"
++  [(set (match_operand:GPR 0 "register_operand")
++	(minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
++		   (match_operand:GPR 2 "register_operand")))]
++  "")
++
++(define_insn "*subdi3"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
++		   (match_operand:DI 2 "register_operand" "r")))]
++  "TARGET_64BIT"
++  "sub\t%0,%z1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "DI")])
++
++(define_insn "*subsi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
++		   (match_operand:GPR2 2 "register_operand" "r")))]
++  ""
++  { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++(define_insn "*subsi3_extended"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(sign_extend:DI
++	    (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++		      (match_operand:SI 2 "register_operand" "r"))))]
++  "TARGET_64BIT"
++  "subw\t%0,%z1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "DI")])
++
++(define_insn "*subdisi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	     (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
++		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++  "TARGET_64BIT"
++  "subw\t%0,%z1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++(define_insn "*subdisisi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	     (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
++		      (match_operand:SI 2 "register_operand" "r")))]
++  "TARGET_64BIT"
++  "subw\t%0,%z1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++(define_insn "*subsidisi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	     (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++  "TARGET_64BIT"
++  "subw\t%0,%z1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++(define_insn "*subdi3_truncsi"
++  [(set (match_operand:SI 0 "register_operand" "=r,r")
++          (truncate:SI
++	     (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
++		      (match_operand:DI 2 "arith_operand" "r,Q"))))]
++  "TARGET_64BIT"
++  "subw\t%0,%z1,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
++;;
++;;  ....................
++;;
++;;	MULTIPLICATION
++;;
++;;  ....................
++;;
++
++(define_insn "mul<mode>3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
++		      (match_operand:ANYF 2 "register_operand" "f")))]
++  ""
++  "fmul.<fmt>\t%0,%1,%2"
++  [(set_attr "type" "fmul")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "mul<mode>3"
++  [(set (match_operand:GPR 0 "register_operand")
++	(mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
++		   (match_operand:GPR 2 "register_operand")))]
++  "TARGET_MUL")
++
++(define_insn "*mulsi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(mult:SI (match_operand:GPR 1 "register_operand" "r")
++		  (match_operand:GPR2 2 "register_operand" "r")))]
++  "TARGET_MUL"
++  { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
++  [(set_attr "type" "imul")
++   (set_attr "mode" "SI")])
++
++(define_insn "*muldisi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	     (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
++		      (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++  "TARGET_MUL && TARGET_64BIT"
++  "mulw\t%0,%1,%2"
++  [(set_attr "type" "imul")
++   (set_attr "mode" "SI")])
++
++(define_insn "*muldi3_truncsi"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++          (truncate:SI
++	     (mult:DI (match_operand:DI 1 "register_operand" "r")
++		      (match_operand:DI 2 "register_operand" "r"))))]
++  "TARGET_MUL && TARGET_64BIT"
++  "mulw\t%0,%1,%2"
++  [(set_attr "type" "imul")
++   (set_attr "mode" "SI")])
++
++(define_insn "*muldi3"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(mult:DI (match_operand:DI 1 "register_operand" "r")
++		  (match_operand:DI 2 "register_operand" "r")))]
++  "TARGET_MUL && TARGET_64BIT"
++  "mul\t%0,%1,%2"
++  [(set_attr "type" "imul")
++   (set_attr "mode" "DI")])
++
++;;
++;;  ........................
++;;
++;;	MULTIPLICATION HIGH-PART
++;;
++;;  ........................
++;;
++
++
++(define_expand "<u>mulditi3"
++  [(set (match_operand:TI 0 "register_operand")
++        (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
++                 (any_extend:TI (match_operand:DI 2 "register_operand"))))]
++  "TARGET_MUL && TARGET_64BIT"
++{
++  rtx low = gen_reg_rtx (DImode);
++  emit_insn (gen_muldi3 (low, operands[1], operands[2]));
++
++  rtx high = gen_reg_rtx (DImode);
++  emit_insn (gen_<u>muldi3_highpart (high, operands[1], operands[2]));
++
++  emit_move_insn (gen_lowpart (DImode, operands[0]), low);
++  emit_move_insn (gen_highpart (DImode, operands[0]), high);
++  DONE;
++})
++
++(define_insn "<u>muldi3_highpart"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(truncate:DI
++	  (lshiftrt:TI
++	    (mult:TI (any_extend:TI
++		       (match_operand:DI 1 "register_operand" "r"))
++		     (any_extend:TI
++		       (match_operand:DI 2 "register_operand" "r")))
++	    (const_int 64))))]
++  "TARGET_MUL && TARGET_64BIT"
++  "mulh<u>\t%0,%1,%2"
++  [(set_attr "type" "imul")
++   (set_attr "mode" "DI")])
++
++(define_expand "usmulditi3"
++  [(set (match_operand:TI 0 "register_operand")
++        (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
++                 (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
++  "TARGET_MUL && TARGET_64BIT"
++{
++  rtx low = gen_reg_rtx (DImode);
++  emit_insn (gen_muldi3 (low, operands[1], operands[2]));
++
++  rtx high = gen_reg_rtx (DImode);
++  emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
++
++  emit_move_insn (gen_lowpart (DImode, operands[0]), low);
++  emit_move_insn (gen_highpart (DImode, operands[0]), high);
++  DONE;
++})
++
++(define_insn "usmuldi3_highpart"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(truncate:DI
++	  (lshiftrt:TI
++	    (mult:TI (zero_extend:TI
++		       (match_operand:DI 1 "register_operand" "r"))
++		     (sign_extend:TI
++		       (match_operand:DI 2 "register_operand" "r")))
++	    (const_int 64))))]
++  "TARGET_MUL && TARGET_64BIT"
++  "mulhsu\t%0,%2,%1"
++  [(set_attr "type" "imul")
++   (set_attr "mode" "DI")])
++
++(define_expand "<u>mulsidi3"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(mult:DI (any_extend:DI
++		   (match_operand:SI 1 "register_operand" "r"))
++		 (any_extend:DI
++		   (match_operand:SI 2 "register_operand" "r"))))]
++  "TARGET_MUL && !TARGET_64BIT"
++{
++  rtx temp = gen_reg_rtx (SImode);
++  emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++  emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
++				     operands[1], operands[2]));
++  emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
++  DONE;
++}
++  )
++
++(define_insn "<u>mulsi3_highpart"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(truncate:SI
++	  (lshiftrt:DI
++	    (mult:DI (any_extend:DI
++		       (match_operand:SI 1 "register_operand" "r"))
++		     (any_extend:DI
++		       (match_operand:SI 2 "register_operand" "r")))
++	    (const_int 32))))]
++  "TARGET_MUL && !TARGET_64BIT"
++  "mulh<u>\t%0,%1,%2"
++  [(set_attr "type" "imul")
++   (set_attr "mode" "SI")])
++
++
++(define_expand "usmulsidi3"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(mult:DI (zero_extend:DI
++		   (match_operand:SI 1 "register_operand" "r"))
++		 (sign_extend:DI
++		   (match_operand:SI 2 "register_operand" "r"))))]
++  "TARGET_MUL && !TARGET_64BIT"
++{
++  rtx temp = gen_reg_rtx (SImode);
++  emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++  emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
++				     operands[1], operands[2]));
++  emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
++  DONE;
++}
++  )
++
++(define_insn "usmulsi3_highpart"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(truncate:SI
++	  (lshiftrt:DI
++	    (mult:DI (zero_extend:DI
++		       (match_operand:SI 1 "register_operand" "r"))
++		     (sign_extend:DI
++		       (match_operand:SI 2 "register_operand" "r")))
++	    (const_int 32))))]
++  "TARGET_MUL && !TARGET_64BIT"
++  "mulhsu\t%0,%2,%1"
++  [(set_attr "type" "imul")
++   (set_attr "mode" "SI")])
++
++;;
++;;  ....................
++;;
++;;	DIVISION and REMAINDER
++;;
++;;  ....................
++;;
++
++(define_insn "<u>divsi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(any_div:SI (match_operand:SI 1 "register_operand" "r")
++		  (match_operand:SI 2 "register_operand" "r")))]
++  "TARGET_DIV"
++  { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; }
++  [(set_attr "type" "idiv")
++   (set_attr "mode" "SI")])
++
++(define_insn "<u>divdi3"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(any_div:DI (match_operand:DI 1 "register_operand" "r")
++		  (match_operand:DI 2 "register_operand" "r")))]
++  "TARGET_DIV && TARGET_64BIT"
++  "div<u>\t%0,%1,%2"
++  [(set_attr "type" "idiv")
++   (set_attr "mode" "DI")])
++
++(define_insn "<u>modsi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(any_mod:SI (match_operand:SI 1 "register_operand" "r")
++		  (match_operand:SI 2 "register_operand" "r")))]
++  "TARGET_DIV"
++  { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; }
++  [(set_attr "type" "idiv")
++   (set_attr "mode" "SI")])
++
++(define_insn "<u>moddi3"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(any_mod:DI (match_operand:DI 1 "register_operand" "r")
++		  (match_operand:DI 2 "register_operand" "r")))]
++  "TARGET_DIV && TARGET_64BIT"
++  "rem<u>\t%0,%1,%2"
++  [(set_attr "type" "idiv")
++   (set_attr "mode" "DI")])
++
++(define_insn "div<mode>3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(div:ANYF (match_operand:ANYF 1 "register_operand" "f")
++		  (match_operand:ANYF 2 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT && TARGET_FDIV"
++  "fdiv.<fmt>\t%0,%1,%2"
++  [(set_attr "type" "fdiv")
++   (set_attr "mode" "<UNITMODE>")])
++
++;;
++;;  ....................
++;;
++;;	SQUARE ROOT
++;;
++;;  ....................
++
++(define_insn "sqrt<mode>2"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT && TARGET_FDIV"
++{
++    return "fsqrt.<fmt>\t%0,%1";
++}
++  [(set_attr "type" "fsqrt")
++   (set_attr "mode" "<UNITMODE>")])
++
++;; Floating point multiply accumulate instructions.
++
++(define_insn "fma<mode>4"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++    (fma:ANYF
++      (match_operand:ANYF 1 "register_operand" "f")
++      (match_operand:ANYF 2 "register_operand" "f")
++      (match_operand:ANYF 3 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT"
++  "fmadd.<fmt>\t%0,%1,%2,%3"
++  [(set_attr "type" "fmadd")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "fms<mode>4"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++    (fma:ANYF
++      (match_operand:ANYF 1 "register_operand" "f")
++      (match_operand:ANYF 2 "register_operand" "f")
++      (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
++  "TARGET_HARD_FLOAT"
++  "fmsub.<fmt>\t%0,%1,%2,%3"
++  [(set_attr "type" "fmadd")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "nfma<mode>4"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++    (neg:ANYF
++      (fma:ANYF
++        (match_operand:ANYF 1 "register_operand" "f")
++        (match_operand:ANYF 2 "register_operand" "f")
++        (match_operand:ANYF 3 "register_operand" "f"))))]
++  "TARGET_HARD_FLOAT"
++  "fnmadd.<fmt>\t%0,%1,%2,%3"
++  [(set_attr "type" "fmadd")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "nfms<mode>4"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++    (neg:ANYF
++      (fma:ANYF
++        (match_operand:ANYF 1 "register_operand" "f")
++        (match_operand:ANYF 2 "register_operand" "f")
++        (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
++  "TARGET_HARD_FLOAT"
++  "fnmsub.<fmt>\t%0,%1,%2,%3"
++  [(set_attr "type" "fmadd")
++   (set_attr "mode" "<UNITMODE>")])
++
++;; modulo signed zeros, -(a*b+c) == -c-a*b
++(define_insn "*nfma<mode>4_fastmath"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++    (minus:ANYF
++      (match_operand:ANYF 3 "register_operand" "f")
++      (mult:ANYF
++        (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
++        (match_operand:ANYF 2 "register_operand" "f"))))]
++  "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++  "fnmadd.<fmt>\t%0,%1,%2,%3"
++  [(set_attr "type" "fmadd")
++   (set_attr "mode" "<UNITMODE>")])
++
++;; modulo signed zeros, -(a*b-c) == c-a*b
++(define_insn "*nfms<mode>4_fastmath"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++    (minus:ANYF
++      (match_operand:ANYF 3 "register_operand" "f")
++      (mult:ANYF
++        (match_operand:ANYF 1 "register_operand" "f")
++        (match_operand:ANYF 2 "register_operand" "f"))))]
++  "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++  "fnmsub.<fmt>\t%0,%1,%2,%3"
++  [(set_attr "type" "fmadd")
++   (set_attr "mode" "<UNITMODE>")])
++
++;;
++;;  ....................
++;;
++;;	ABSOLUTE VALUE
++;;
++;;  ....................
++
++(define_insn "abs<mode>2"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT"
++  "fabs.<fmt>\t%0,%1"
++  [(set_attr "type" "fmove")
++   (set_attr "mode" "<UNITMODE>")])
++
++
++;;
++;;  ....................
++;;
++;;	MIN/MAX
++;;
++;;  ....................
++
++(define_insn "smin<mode>3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++		   (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
++			    (match_operand:ANYF 2 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT"
++  "fmin.<fmt>\t%0,%1,%2"
++  [(set_attr "type" "fmove")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "smax<mode>3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++		   (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
++			    (match_operand:ANYF 2 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT"
++  "fmax.<fmt>\t%0,%1,%2"
++  [(set_attr "type" "fmove")
++   (set_attr "mode" "<UNITMODE>")])
++
++
++;;
++;;  ....................
++;;
++;;	NEGATION and ONE'S COMPLEMENT '
++;;
++;;  ....................
++
++(define_insn "neg<mode>2"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT"
++  "fneg.<fmt>\t%0,%1"
++  [(set_attr "type" "fmove")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "one_cmpl<mode>2"
++  [(set (match_operand:GPR 0 "register_operand" "=r")
++	(not:GPR (match_operand:GPR 1 "register_operand" "r")))]
++  ""
++  "not\t%0,%1"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "<MODE>")])
++
++;;
++;;  ....................
++;;
++;;	LOGICAL
++;;
++;;  ....................
++;;
++
++(define_insn "and<mode>3"
++  [(set (match_operand:GPR 0 "register_operand" "=r,r")
++	(and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
++  ""
++  "and\t%0,%1,%2"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "<MODE>")])
++
++(define_insn "ior<mode>3"
++  [(set (match_operand:GPR 0 "register_operand" "=r,r")
++	(ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
++  ""
++  "or\t%0,%1,%2"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "<MODE>")])
++
++(define_insn "xor<mode>3"
++  [(set (match_operand:GPR 0 "register_operand" "=r,r")
++	(xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++		 (match_operand:GPR 2 "arith_operand" "r,Q")))]
++  ""
++  "xor\t%0,%1,%2"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "<MODE>")])
++
++;;
++;;  ....................
++;;
++;;	TRUNCATION
++;;
++;;  ....................
++
++(define_insn "truncdfsf2"
++  [(set (match_operand:SF 0 "register_operand" "=f")
++	(float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
++  "TARGET_DOUBLE_FLOAT"
++  "fcvt.s.d\t%0,%1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "cnv_mode"	"D2S")   
++   (set_attr "mode"	"SF")])
++
++;; Integer truncation patterns.  Truncating to HImode/QImode is a no-op.
++;; Truncating from DImode to SImode is not, because we always keep SImode
++;; values sign-extended in a register so we can safely use DImode branches
++;; and comparisons on SImode values.
++
++(define_insn "truncdisi2"
++  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
++        (truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
++  "TARGET_64BIT"
++  "@
++    sext.w\t%0,%1
++    sw\t%1,%0"
++  [(set_attr "move_type" "arith,store")
++   (set_attr "mode" "SI")])
++
++;; Combiner patterns to optimize shift/truncate combinations.
++
++(define_insn "*ashr_trunc<mode>"
++  [(set (match_operand:SUBDI 0 "register_operand" "=r")
++        (truncate:SUBDI
++	  (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
++		       (match_operand:DI 2 "const_arith_operand" ""))))]
++  "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
++  "sra\t%0,%1,%2"
++  [(set_attr "type" "shift")
++   (set_attr "mode" "<MODE>")])
++
++(define_insn "*lshr32_trunc<mode>"
++  [(set (match_operand:SUBDI 0 "register_operand" "=r")
++        (truncate:SUBDI
++	  (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
++		       (const_int 32))))]
++  "TARGET_64BIT"
++  "sra\t%0,%1,32"
++  [(set_attr "type" "shift")
++   (set_attr "mode" "<MODE>")])
++
++;;
++;;  ....................
++;;
++;;	ZERO EXTENSION
++;;
++;;  ....................
++
++;; Extension insns.
++
++(define_insn_and_split "zero_extendsidi2"
++  [(set (match_operand:DI 0 "register_operand" "=r,r")
++        (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
++  "TARGET_64BIT"
++  "@
++   #
++   lwu\t%0,%1"
++  "&& reload_completed && REG_P (operands[1])"
++  [(set (match_dup 0)
++        (ashift:DI (match_dup 1) (const_int 32)))
++   (set (match_dup 0)
++        (lshiftrt:DI (match_dup 0) (const_int 32)))]
++  { operands[1] = gen_lowpart (DImode, operands[1]); }
++  [(set_attr "move_type" "shift_shift,load")
++   (set_attr "mode" "DI")])
++
++;; Combine is not allowed to convert this insn into a zero_extendsidi2
++;; because of TRULY_NOOP_TRUNCATION.
++
++(define_insn_and_split "*clear_upper32"
++  [(set (match_operand:DI 0 "register_operand" "=r,r")
++        (and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
++		(const_int 4294967295)))]
++  "TARGET_64BIT"
++{
++  if (which_alternative == 0)
++    return "#";
++
++  operands[1] = gen_lowpart (SImode, operands[1]);
++  return "lwu\t%0,%1";
++}
++  "&& reload_completed && REG_P (operands[1])"
++  [(set (match_dup 0)
++        (ashift:DI (match_dup 1) (const_int 32)))
++   (set (match_dup 0)
++        (lshiftrt:DI (match_dup 0) (const_int 32)))]
++  ""
++  [(set_attr "move_type" "shift_shift,load")
++   (set_attr "mode" "DI")])
++
++(define_insn_and_split "zero_extendhi<GPR:mode>2"
++  [(set (match_operand:GPR 0 "register_operand" "=r,r")
++        (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
++  ""
++  "@
++   #
++   lhu\t%0,%1"
++  "&& reload_completed && REG_P (operands[1])"
++  [(set (match_dup 0)
++        (ashift:GPR (match_dup 1) (match_dup 2)))
++   (set (match_dup 0)
++        (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
++  {
++    operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
++    operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
++  }
++  [(set_attr "move_type" "shift_shift,load")
++   (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "zero_extendqi<SUPERQI:mode>2"
++  [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
++        (zero_extend:SUPERQI
++	     (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
++  ""
++  "@
++   and\t%0,%1,0xff
++   lbu\t%0,%1"
++  [(set_attr "move_type" "andi,load")
++   (set_attr "mode" "<SUPERQI:MODE>")])
++
++;;
++;;  ....................
++;;
++;;	SIGN EXTENSION
++;;
++;;  ....................
++
++;; Extension insns.
++;; Those for integer source operand are ordered widest source type first.
++
++;; When TARGET_64BIT, all SImode integer registers should already be in
++;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2).  We can
++;; therefore get rid of register->register instructions if we constrain
++;; the source to be in the same register as the destination.
++;;
++;; The register alternative has type "arith" so that the pre-reload
++;; scheduler will treat it as a move.  This reflects what happens if
++;; the register alternative needs a reload.
++(define_insn_and_split "extendsidi2"
++  [(set (match_operand:DI 0 "register_operand" "=r,r")
++        (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
++  "TARGET_64BIT"
++  "@
++   #
++   lw\t%0,%1"
++  "&& reload_completed && register_operand (operands[1], VOIDmode)"
++  [(set (match_dup 0) (match_dup 1))]
++{
++  if (REGNO (operands[0]) == REGNO (operands[1]))
++    {
++      emit_note (NOTE_INSN_DELETED);
++      DONE;
++    }
++  operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
++}
++  [(set_attr "move_type" "move,load")
++   (set_attr "mode" "DI")])
++
++(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
++  [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
++        (sign_extend:SUPERQI
++	     (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
++  ""
++  "@
++   #
++   l<SHORT:size>\t%0,%1"
++  "&& reload_completed && REG_P (operands[1])"
++  [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
++   (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
++{
++  operands[0] = gen_lowpart (SImode, operands[0]);
++  operands[1] = gen_lowpart (SImode, operands[1]);
++  operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
++			 - GET_MODE_BITSIZE (<SHORT:MODE>mode));
++}
++  [(set_attr "move_type" "shift_shift,load")
++   (set_attr "mode" "SI")])
++
++(define_insn "extendsfdf2"
++  [(set (match_operand:DF 0 "register_operand" "=f")
++	(float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
++  "TARGET_DOUBLE_FLOAT"
++  "fcvt.d.s\t%0,%1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "cnv_mode"	"S2D")   
++   (set_attr "mode"	"DF")])
++
++;;
++;;  ....................
++;;
++;;	CONVERSIONS
++;;
++;;  ....................
++
++(define_insn "fix_truncdfsi2"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(fix:SI (match_operand:DF 1 "register_operand" "f")))]
++  "TARGET_DOUBLE_FLOAT"
++  "fcvt.w.d %0,%1,rtz"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"DF")
++   (set_attr "cnv_mode"	"D2I")])
++
++
++(define_insn "fix_truncsfsi2"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(fix:SI (match_operand:SF 1 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT"
++  "fcvt.w.s %0,%1,rtz"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"SF")
++   (set_attr "cnv_mode"	"S2I")])
++
++
++(define_insn "fix_truncdfdi2"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(fix:DI (match_operand:DF 1 "register_operand" "f")))]
++  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
++  "fcvt.l.d %0,%1,rtz"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"DF")
++   (set_attr "cnv_mode"	"D2I")])
++
++
++(define_insn "fix_truncsfdi2"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(fix:DI (match_operand:SF 1 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT && TARGET_64BIT"
++  "fcvt.l.s %0,%1,rtz"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"SF")
++   (set_attr "cnv_mode"	"S2I")])
++
++
++(define_insn "floatsidf2"
++  [(set (match_operand:DF 0 "register_operand" "=f")
++	(float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++  "TARGET_DOUBLE_FLOAT"
++  "fcvt.d.w\t%0,%z1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"DF")
++   (set_attr "cnv_mode"	"I2D")])
++
++
++(define_insn "floatdidf2"
++  [(set (match_operand:DF 0 "register_operand" "=f")
++	(float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
++  "fcvt.d.l\t%0,%z1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"DF")
++   (set_attr "cnv_mode"	"I2D")])
++
++
++(define_insn "floatsisf2"
++  [(set (match_operand:SF 0 "register_operand" "=f")
++	(float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++  "TARGET_HARD_FLOAT"
++  "fcvt.s.w\t%0,%z1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"SF")
++   (set_attr "cnv_mode"	"I2S")])
++
++
++(define_insn "floatdisf2"
++  [(set (match_operand:SF 0 "register_operand" "=f")
++	(float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++  "TARGET_HARD_FLOAT && TARGET_64BIT"
++  "fcvt.s.l\t%0,%z1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"SF")
++   (set_attr "cnv_mode"	"I2S")])
++
++
++(define_insn "floatunssidf2"
++  [(set (match_operand:DF 0 "register_operand" "=f")
++	(unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++  "TARGET_DOUBLE_FLOAT"
++  "fcvt.d.wu\t%0,%z1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"DF")
++   (set_attr "cnv_mode"	"I2D")])
++
++
++(define_insn "floatunsdidf2"
++  [(set (match_operand:DF 0 "register_operand" "=f")
++	(unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
++  "fcvt.d.lu\t%0,%z1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"DF")
++   (set_attr "cnv_mode"	"I2D")])
++
++
++(define_insn "floatunssisf2"
++  [(set (match_operand:SF 0 "register_operand" "=f")
++	(unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++  "TARGET_HARD_FLOAT"
++  "fcvt.s.wu\t%0,%z1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"SF")
++   (set_attr "cnv_mode"	"I2S")])
++
++
++(define_insn "floatunsdisf2"
++  [(set (match_operand:SF 0 "register_operand" "=f")
++	(unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++  "TARGET_HARD_FLOAT && TARGET_64BIT"
++  "fcvt.s.lu\t%0,%z1"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"SF")
++   (set_attr "cnv_mode"	"I2S")])
++
++
++(define_insn "fixuns_truncdfsi2"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
++  "TARGET_DOUBLE_FLOAT"
++  "fcvt.wu.d %0,%1,rtz"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"DF")
++   (set_attr "cnv_mode"	"D2I")])
++
++
++(define_insn "fixuns_truncsfsi2"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT"
++  "fcvt.wu.s %0,%1,rtz"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"SF")
++   (set_attr "cnv_mode"	"S2I")])
++
++
++(define_insn "fixuns_truncdfdi2"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
++  "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
++  "fcvt.lu.d %0,%1,rtz"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"DF")
++   (set_attr "cnv_mode"	"D2I")])
++
++
++(define_insn "fixuns_truncsfdi2"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
++  "TARGET_HARD_FLOAT && TARGET_64BIT"
++  "fcvt.lu.s %0,%1,rtz"
++  [(set_attr "type"	"fcvt")
++   (set_attr "mode"	"SF")
++   (set_attr "cnv_mode"	"S2I")])
++
++;;
++;;  ....................
++;;
++;;	DATA MOVEMENT
++;;
++;;  ....................
++
++;; Lower-level instructions for loading an address from the GOT.
++;; We could use MEMs, but an unspec gives more optimization
++;; opportunities.
++
++(define_insn "got_load<mode>"
++   [(set (match_operand:P 0 "register_operand" "=r")
++       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++		 UNSPEC_LOAD_GOT))]
++  "flag_pic"
++  "la\t%0,%1"
++   [(set_attr "got" "load")
++    (set_attr "mode" "<MODE>")])
++
++(define_insn "tls_add_tp_le<mode>"
++  [(set (match_operand:P 0 "register_operand" "=r")
++	(unspec:P [(match_operand:P 1 "register_operand" "r")
++		   (match_operand:P 2 "register_operand" "r")
++		   (match_operand:P 3 "symbolic_operand" "")]
++		  UNSPEC_TLS_LE))]
++  "!flag_pic || flag_pie"
++  "add\t%0,%1,%2,%%tprel_add(%3)"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "<MODE>")])
++
++(define_insn "got_load_tls_gd<mode>"
++  [(set (match_operand:P 0 "register_operand" "=r")
++       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++                 UNSPEC_TLS_GD))]
++  "flag_pic"
++  "la.tls.gd\t%0,%1"
++  [(set_attr "got" "load")
++   (set_attr "mode" "<MODE>")])
++
++(define_insn "got_load_tls_ie<mode>"
++  [(set (match_operand:P 0 "register_operand" "=r")
++       (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++                 UNSPEC_TLS_IE))]
++  "flag_pic"
++  "la.tls.ie\t%0,%1"
++  [(set_attr "got" "load")
++   (set_attr "mode" "<MODE>")])
++
++(define_insn "auipc<mode>"
++  [(set (match_operand:P 0 "register_operand" "=r")
++       (unspec:P [(match_operand:P 1 "symbolic_operand" "")
++                  (match_operand:P 2 "const_int_operand")
++                  (pc)]
++                 UNSPEC_AUIPC))]
++  ""
++  ".LA%2: auipc\t%0,%h1"
++  [(set_attr "type" "arith")
++   (set_attr "cannot_copy" "yes")])
++
++;; Instructions for adding the low 16 bits of an address to a register.
++;; Operand 2 is the address: riscv_print_operand works out which relocation
++;; should be applied.
++
++(define_insn "*low<mode>"
++  [(set (match_operand:P 0 "register_operand" "=r")
++	(lo_sum:P (match_operand:P 1 "register_operand" "r")
++		  (match_operand:P 2 "symbolic_operand" "")))]
++  ""
++  "add\t%0,%1,%R2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "<MODE>")])
++
++;; Allow combine to split complex const_int load sequences, using operand 2
++;; to store the intermediate results.  See move_operand for details.
++(define_split
++  [(set (match_operand:GPR 0 "register_operand")
++	(match_operand:GPR 1 "splittable_const_int_operand"))
++   (clobber (match_operand:GPR 2 "register_operand"))]
++  ""
++  [(const_int 0)]
++{
++  riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
++  DONE;
++})
++
++;; Likewise, for symbolic operands.
++(define_split
++  [(set (match_operand:P 0 "register_operand")
++	(match_operand:P 1))
++   (clobber (match_operand:P 2 "register_operand"))]
++  "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
++  [(set (match_dup 0) (match_dup 3))]
++{
++  riscv_split_symbol (operands[2], operands[1],
++		     MAX_MACHINE_MODE, &operands[3]);
++})
++
++;; 64-bit integer moves
++
++;; Unlike most other insns, the move insns can't be split with '
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++
++(define_expand "movdi"
++  [(set (match_operand:DI 0 "")
++	(match_operand:DI 1 ""))]
++  ""
++{
++  if (riscv_legitimize_move (DImode, operands[0], operands[1]))
++    DONE;
++})
++
++(define_insn "*movdi_32bit"
++  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,  *f,*f,*r,*f,*m")
++	(match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f"))]
++  "!TARGET_64BIT
++   && (register_operand (operands[0], DImode)
++       || reg_or_0_operand (operands[1], DImode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore")
++   (set_attr "mode" "DI")])
++
++(define_insn "*movdi_64bit"
++  [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m,*f,*f,*r,*f,*m")
++	(match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f"))]
++  "TARGET_64BIT
++   && (register_operand (operands[0], DImode)
++       || reg_or_0_operand (operands[1], DImode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore")
++   (set_attr "mode" "DI")])
++
++;; 32-bit Integer moves
++
++;; Unlike most other insns, the move insns can't be split with
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++
++(define_expand "mov<mode>"
++  [(set (match_operand:IMOVE32 0 "")
++	(match_operand:IMOVE32 1 ""))]
++  ""
++{
++  if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
++    DONE;
++})
++
++(define_insn "*mov<mode>_internal"
++  [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
++	(match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
++  "(register_operand (operands[0], <MODE>mode)
++    || reg_or_0_operand (operands[1], <MODE>mode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
++   (set_attr "mode" "SI")])
++
++;; 16-bit Integer moves
++
++;; Unlike most other insns, the move insns can't be split with
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
++
++(define_expand "movhi"
++  [(set (match_operand:HI 0 "")
++	(match_operand:HI 1 ""))]
++  ""
++{
++  if (riscv_legitimize_move (HImode, operands[0], operands[1]))
++    DONE;
++})
++
++(define_insn "*movhi_internal"
++  [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
++	(match_operand:HI 1 "move_operand"         "r,T,m,rJ,*r*J,*f"))]
++  "(register_operand (operands[0], HImode)
++    || reg_or_0_operand (operands[1], HImode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "move,const,load,store,mtc,mfc")
++   (set_attr "mode" "HI")])
++
++;; HImode constant generation; see riscv_move_integer for details.
++;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
++
++(define_insn "add<mode>hi3"
++  [(set (match_operand:HI 0 "register_operand" "=r,r")
++	(plus:HI (match_operand:HISI 1 "register_operand" "r,r")
++		  (match_operand:HISI 2 "arith_operand" "r,Q")))]
++  ""
++  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
++  [(set_attr "type" "arith")
++   (set_attr "mode" "HI")])
++
++(define_insn "xor<mode>hi3"
++  [(set (match_operand:HI 0 "register_operand" "=r,r")
++	(xor:HI (match_operand:HISI 1 "register_operand" "r,r")
++		  (match_operand:HISI 2 "arith_operand" "r,Q")))]
++  ""
++  "xor\t%0,%1,%2"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "HI")])
++
++;; 8-bit Integer moves
++
++(define_expand "movqi"
++  [(set (match_operand:QI 0 "")
++	(match_operand:QI 1 ""))]
++  ""
++{
++  if (riscv_legitimize_move (QImode, operands[0], operands[1]))
++    DONE;
++})
++
++(define_insn "*movqi_internal"
++  [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
++	(match_operand:QI 1 "move_operand"         "r,I,m,rJ,*r*J,*f"))]
++  "(register_operand (operands[0], QImode)
++    || reg_or_0_operand (operands[1], QImode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "move,const,load,store,mtc,mfc")
++   (set_attr "mode" "QI")])
++
++;; 32-bit floating point moves
++
++(define_expand "movsf"
++  [(set (match_operand:SF 0 "")
++	(match_operand:SF 1 ""))]
++  ""
++{
++  if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
++    DONE;
++})
++
++(define_insn "*movsf_hardfloat"
++  [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
++	(match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
++  "TARGET_HARD_FLOAT
++   && (register_operand (operands[0], SFmode)
++       || reg_or_0_operand (operands[1], SFmode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
++   (set_attr "mode" "SF")])
++
++(define_insn "*movsf_softfloat"
++  [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
++	(match_operand:SF 1 "move_operand" "Gr,m,r"))]
++  "!TARGET_HARD_FLOAT
++   && (register_operand (operands[0], SFmode)
++       || reg_or_0_operand (operands[1], SFmode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "move,load,store")
++   (set_attr "mode" "SF")])
++
++;; 64-bit floating point moves
++
++(define_expand "movdf"
++  [(set (match_operand:DF 0 "")
++	(match_operand:DF 1 ""))]
++  ""
++{
++  if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
++    DONE;
++})
++
++;; In RV32, we lack mtf.d/mff.d.  Go through memory instead.
++;; (except for moving a constant 0 to an FPR.  for that we use fcvt.d.w.)
++(define_insn "*movdf_hardfloat_rv32"
++  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
++	(match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
++  "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
++   && (register_operand (operands[0], DFmode)
++       || reg_or_0_operand (operands[1], DFmode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
++   (set_attr "mode" "DF")])
++
++(define_insn "*movdf_hardfloat_rv64"
++  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
++	(match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
++  "TARGET_64BIT && TARGET_DOUBLE_FLOAT
++   && (register_operand (operands[0], DFmode)
++       || reg_or_0_operand (operands[1], DFmode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
++   (set_attr "mode" "DF")])
++
++(define_insn "*movdf_softfloat"
++  [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
++	(match_operand:DF 1 "move_operand" "rG,m,rG"))]
++  "!TARGET_DOUBLE_FLOAT
++   && (register_operand (operands[0], DFmode)
++       || reg_or_0_operand (operands[1], DFmode))"
++  { return riscv_output_move (operands[0], operands[1]); }
++  [(set_attr "move_type" "move,load,store")
++   (set_attr "mode" "DF")])
++
++(define_split
++  [(set (match_operand:MOVE64 0 "nonimmediate_operand")
++	(match_operand:MOVE64 1 "move_operand"))]
++  "reload_completed && !TARGET_64BIT
++   && riscv_split_64bit_move_p (operands[0], operands[1])"
++  [(const_int 0)]
++{
++  riscv_split_doubleword_move (operands[0], operands[1]);
++  DONE;
++})
++
++;; 64-bit paired-single floating point moves
++
++;; Load the low word of operand 0 with operand 1.
++(define_insn "load_low<mode>"
++  [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
++	(unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")]
++		       UNSPEC_LOAD_LOW))]
++  "TARGET_HARD_FLOAT"
++{
++  operands[0] = riscv_subword (operands[0], 0);
++  return riscv_output_move (operands[0], operands[1]);
++}
++  [(set_attr "move_type" "mtc,fpload")
++   (set_attr "mode" "<HALFMODE>")])
++
++;; Load the high word of operand 0 from operand 1, preserving the value
++;; in the low word.
++(define_insn "load_high<mode>"
++  [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
++	(unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")
++			(match_operand:SPLITF 2 "register_operand" "0,0")]
++		       UNSPEC_LOAD_HIGH))]
++  "TARGET_HARD_FLOAT"
++{
++  operands[0] = riscv_subword (operands[0], 1);
++  return riscv_output_move (operands[0], operands[1]);
++}
++  [(set_attr "move_type" "mtc,fpload")
++   (set_attr "mode" "<HALFMODE>")])
++
++;; Store one word of operand 1 in operand 0.  Operand 2 is 1 to store the
++;; high word and 0 to store the low word.
++(define_insn "store_word<mode>"
++  [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=r,m")
++	(unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f")
++			    (match_operand 2 "const_int_operand")]
++			   UNSPEC_STORE_WORD))]
++  "TARGET_HARD_FLOAT"
++{
++  operands[1] = riscv_subword (operands[1], INTVAL (operands[2]));
++  return riscv_output_move (operands[0], operands[1]);
++}
++  [(set_attr "move_type" "mfc,fpstore")
++   (set_attr "mode" "<HALFMODE>")])
++
++;; Expand in-line code to clear the instruction cache between operand[0] and
++;; operand[1].
++(define_expand "clear_cache"
++  [(match_operand 0 "pmode_register_operand")
++   (match_operand 1 "pmode_register_operand")]
++  ""
++  "
++{
++  emit_insn(gen_fence_i());
++  DONE;
++}")
++
++(define_insn "fence"
++  [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
++  ""
++  "%|fence%-")
++
++(define_insn "fence_i"
++  [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
++  ""
++  "fence.i")
++
++;; Block moves, see riscv.c for more details.
++;; Argument 0 is the destination
++;; Argument 1 is the source
++;; Argument 2 is the length
++;; Argument 3 is the alignment
++
++(define_expand "movmemsi"
++  [(parallel [(set (match_operand:BLK 0 "general_operand")
++		   (match_operand:BLK 1 "general_operand"))
++	      (use (match_operand:SI 2 ""))
++	      (use (match_operand:SI 3 "const_int_operand"))])]
++  "!TARGET_MEMCPY"
++{
++  if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
++    DONE;
++  else
++    FAIL;
++})
++
++;;
++;;  ....................
++;;
++;;	SHIFTS
++;;
++;;  ....................
++
++(define_insn "<optab>si3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(any_shift:SI (match_operand:SI 1 "register_operand" "r")
++		       (match_operand:SI 2 "arith_operand" "rI")))]
++  ""
++{
++  if (GET_CODE (operands[2]) == CONST_INT)
++    operands[2] = GEN_INT (INTVAL (operands[2])
++			   & (GET_MODE_BITSIZE (SImode) - 1));
++
++  return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
++}
++  [(set_attr "type" "shift")
++   (set_attr "mode" "SI")])
++
++(define_insn "*<optab>disi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	     (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
++		      (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
++  "TARGET_64BIT"
++  "<insn>w\t%0,%1,%2"
++  [(set_attr "type" "shift")
++   (set_attr "mode" "SI")])
++
++(define_insn "*ashldi3_truncsi"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++          (truncate:SI
++	     (ashift:DI (match_operand:DI 1 "register_operand" "r")
++		      (match_operand:DI 2 "const_arith_operand" "I"))))]
++  "TARGET_64BIT && INTVAL (operands[2]) < 32"
++  "sllw\t%0,%1,%2"
++  [(set_attr "type" "shift")
++   (set_attr "mode" "SI")])
++
++(define_insn "*ashldisi3"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	  (ashift:SI (match_operand:GPR 1 "register_operand" "r")
++		      (match_operand:GPR2 2 "arith_operand" "rI")))]
++  "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
++  "sllw\t%0,%1,%2"
++  [(set_attr "type" "shift")
++   (set_attr "mode" "SI")])
++
++(define_insn "<optab>di3"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(any_shift:DI (match_operand:DI 1 "register_operand" "r")
++		       (match_operand:DI 2 "arith_operand" "rI")))]
++  "TARGET_64BIT"
++{
++  if (GET_CODE (operands[2]) == CONST_INT)
++    operands[2] = GEN_INT (INTVAL (operands[2])
++			   & (GET_MODE_BITSIZE (DImode) - 1));
++
++  return "<insn>\t%0,%1,%2";
++}
++  [(set_attr "type" "shift")
++   (set_attr "mode" "DI")])
++
++(define_insn "<optab>si3_extend"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(sign_extend:DI
++	   (any_shift:SI (match_operand:SI 1 "register_operand" "r")
++			 (match_operand:SI 2 "arith_operand" "rI"))))]
++  "TARGET_64BIT"
++{
++  if (GET_CODE (operands[2]) == CONST_INT)
++    operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
++
++  return "<insn>w\t%0,%1,%2";
++}
++  [(set_attr "type" "shift")
++   (set_attr "mode" "SI")])
++
++;;
++;;  ....................
++;;
++;;	CONDITIONAL BRANCHES
++;;
++;;  ....................
++
++;; Conditional branches
++
++(define_insn "*branch_order<mode>"
++  [(set (pc)
++	(if_then_else
++	 (match_operator 1 "order_operator"
++			 [(match_operand:GPR 2 "register_operand" "r")
++			  (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
++	 (label_ref (match_operand 0 "" ""))
++	 (pc)))]
++  ""
++{
++  if (GET_CODE (operands[3]) == CONST_INT)
++    return "b%C1z\t%2,%0";
++  return "b%C1\t%2,%3,%0";
++}
++  [(set_attr "type" "branch")
++   (set_attr "mode" "none")])
++
++;; Used to implement built-in functions.
++(define_expand "condjump"
++  [(set (pc)
++	(if_then_else (match_operand 0)
++		      (label_ref (match_operand 1))
++		      (pc)))])
++
++(define_expand "cbranch<mode>4"
++  [(set (pc)
++	(if_then_else (match_operator 0 "comparison_operator"
++		       [(match_operand:GPR 1 "register_operand")
++		        (match_operand:GPR 2 "nonmemory_operand")])
++		      (label_ref (match_operand 3 ""))
++		      (pc)))]
++  ""
++{
++  riscv_expand_conditional_branch (operands);
++  DONE;
++})
++
++(define_expand "cbranch<mode>4"
++  [(set (pc)
++	(if_then_else (match_operator 0 "comparison_operator"
++		       [(match_operand:ANYF 1 "register_operand")
++		        (match_operand:ANYF 2 "register_operand")])
++		      (label_ref (match_operand 3 ""))
++		      (pc)))]
++  ""
++{
++  riscv_expand_conditional_branch (operands);
++  DONE;
++})
++
++(define_insn_and_split "*branch_on_bit<GPR:mode>"
++  [(set (pc)
++	(if_then_else
++	 (match_operator 0 "equality_operator"
++	  [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
++		 (const_int 1)
++		 (match_operand 3 "branch_on_bit_operand"))
++		 (const_int 0)])
++	 (label_ref (match_operand 1))
++	 (pc)))
++   (clobber (match_scratch:GPR 4 "=&r"))]
++  ""
++  "#"
++  "reload_completed"
++  [(set (match_dup 4)
++        (ashift:GPR (match_dup 2) (match_dup 3)))
++   (set (pc)
++	(if_then_else
++	 (match_op_dup 0 [(match_dup 4) (const_int 0)])
++	 (label_ref (match_operand 1))
++	 (pc)))]
++{
++  int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
++  operands[3] = GEN_INT (shift);
++
++  if (GET_CODE (operands[0]) == EQ)
++    operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
++  else
++    operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
++})
++
++(define_insn_and_split "*branch_on_bit_range<GPR:mode>"
++  [(set (pc)
++	(if_then_else
++	 (match_operator 0 "equality_operator"
++	  [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
++		 (match_operand 3 "branch_on_bit_operand")
++		 (const_int 0))
++		 (const_int 0)])
++	 (label_ref (match_operand 1))
++	 (pc)))
++   (clobber (match_scratch:GPR 4 "=&r"))]
++  ""
++  "#"
++  "reload_completed"
++  [(set (match_dup 4)
++        (ashift:GPR (match_dup 2) (match_dup 3)))
++   (set (pc)
++	(if_then_else
++	 (match_op_dup 0 [(match_dup 4) (const_int 0)])
++	 (label_ref (match_operand 1))
++	 (pc)))]
++{
++  operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
++})
++
++;;
++;;  ....................
++;;
++;;	SETTING A REGISTER FROM A COMPARISON
++;;
++;;  ....................
++
++;; Destination is always set in SI mode.
++
++(define_expand "cstore<mode>4"
++  [(set (match_operand:SI 0 "register_operand")
++	(match_operator:SI 1 "order_operator"
++	 [(match_operand:GPR 2 "register_operand")
++	  (match_operand:GPR 3 "nonmemory_operand")]))]
++  ""
++{
++  riscv_expand_scc (operands);
++  DONE;
++})
++
++(define_insn "cstore<mode>4"
++   [(set (match_operand:SI 0 "register_operand" "=r")
++        (match_operator:SI 1 "fp_order_operator"
++	      [(match_operand:ANYF 2 "register_operand" "f")
++	       (match_operand:ANYF 3 "register_operand" "f")]))]
++  "TARGET_HARD_FLOAT"
++{
++  if (GET_CODE (operands[1]) == NE)
++    return "feq.<fmt>\t%0,%2,%3; seqz %0, %0";
++  return "f%C1.<fmt>\t%0,%2,%3";
++}
++  [(set_attr "type" "fcmp")
++   (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
++  [(set (match_operand:GPR2 0 "register_operand" "=r")
++	(eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
++		 (const_int 0)))]
++  ""
++  "seqz\t%0,%1"
++  [(set_attr "type" "slt")
++   (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
++  [(set (match_operand:GPR2 0 "register_operand" "=r")
++	(ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
++		 (const_int 0)))]
++  ""
++  "snez\t%0,%1"
++  [(set_attr "type" "slt")
++   (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
++  [(set (match_operand:GPR2 0 "register_operand" "=r")
++	(any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
++		     (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
++  ""
++  "slt<u>\t%0,%z2,%1"
++  [(set_attr "type" "slt")
++   (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
++  [(set (match_operand:GPR2 0 "register_operand" "=r")
++	(any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r")
++		     (const_int 1)))]
++  ""
++  "slt<u>\t%0,zero,%1"
++  [(set_attr "type" "slt")
++   (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
++  [(set (match_operand:GPR2 0 "register_operand" "=r")
++	(any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r")
++		     (match_operand:GPR 2 "arith_operand" "rI")))]
++  ""
++  "slt<u>\t%0,%1,%2"
++  [(set_attr "type" "slt")
++   (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
++  [(set (match_operand:GPR2 0 "register_operand" "=r")
++	(any_le:GPR2 (match_operand:GPR 1 "register_operand" "r")
++		     (match_operand:GPR 2 "sle_operand" "")))]
++  ""
++{
++  operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
++  return "slt<u>\t%0,%1,%2";
++}
++  [(set_attr "type" "slt")
++   (set_attr "mode" "<GPR:MODE>")])
++
++;;
++;;  ....................
++;;
++;;	UNCONDITIONAL BRANCHES
++;;
++;;  ....................
++
++;; Unconditional branches.
++
++(define_insn "jump"
++  [(set (pc)
++	(label_ref (match_operand 0 "" "")))]
++  ""
++  "j\t%l0"
++  [(set_attr "type"	"jump")
++   (set_attr "mode"	"none")])
++
++(define_expand "indirect_jump"
++  [(set (pc) (match_operand 0 "register_operand"))]
++  ""
++{
++  operands[0] = force_reg (Pmode, operands[0]);
++  if (Pmode == SImode)
++    emit_jump_insn (gen_indirect_jumpsi (operands[0]));
++  else
++    emit_jump_insn (gen_indirect_jumpdi (operands[0]));
++  DONE;
++})
++
++(define_insn "indirect_jump<mode>"
++  [(set (pc) (match_operand:P 0 "register_operand" "l"))]
++  ""
++  "jr\t%0"
++  [(set_attr "type" "jump")
++   (set_attr "mode" "none")])
++
++(define_expand "tablejump"
++  [(set (pc) (match_operand 0 "register_operand" ""))
++	      (use (label_ref (match_operand 1 "" "")))]
++  ""
++{
++  if (CASE_VECTOR_PC_RELATIVE)
++      operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
++					 gen_rtx_LABEL_REF (Pmode, operands[1]),
++					 NULL_RTX, 0, OPTAB_DIRECT);
++
++  if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
++    emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
++  else
++    emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
++  DONE;
++})
++
++(define_insn "tablejump<mode>"
++  [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
++   (use (label_ref (match_operand 1 "" "")))]
++  ""
++  "jr\t%0"
++  [(set_attr "type" "jump")
++   (set_attr "mode" "none")])
++
++;;
++;;  ....................
++;;
++;;	Function prologue/epilogue
++;;
++;;  ....................
++;;
++
++(define_expand "prologue"
++  [(const_int 1)]
++  ""
++{
++  riscv_expand_prologue ();
++  DONE;
++})
++
++;; Block any insns from being moved before this point, since the
++;; profiling call to mcount can use various registers that aren't
++;; saved or used to pass arguments.
++
++(define_insn "blockage"
++  [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
++  ""
++  ""
++  [(set_attr "type" "ghost")
++   (set_attr "mode" "none")])
++
++(define_expand "epilogue"
++  [(const_int 2)]
++  ""
++{
++  riscv_expand_epilogue (false);
++  DONE;
++})
++
++(define_expand "sibcall_epilogue"
++  [(const_int 2)]
++  ""
++{
++  riscv_expand_epilogue (true);
++  DONE;
++})
++
++;; Trivial return.  Make it look like a normal return insn as that
++;; allows jump optimizations to work better.
++
++(define_expand "return"
++  [(simple_return)]
++  "riscv_can_use_return_insn ()"
++  "")
++
++(define_insn "simple_return"
++  [(simple_return)]
++  ""
++  "ret"
++  [(set_attr "type"	"jump")
++   (set_attr "mode"	"none")])
++
++;; Normal return.
++
++(define_insn "simple_return_internal"
++  [(simple_return)
++   (use (match_operand 0 "pmode_register_operand" ""))]
++  ""
++  "jr\t%0"
++  [(set_attr "type"	"jump")
++   (set_attr "mode"	"none")])
++
++;; This is used in compiling the unwind routines.
++(define_expand "eh_return"
++  [(use (match_operand 0 "general_operand"))]
++  ""
++{
++  if (GET_MODE (operands[0]) != word_mode)
++    operands[0] = convert_to_mode (word_mode, operands[0], 0);
++  if (TARGET_64BIT)
++    emit_insn (gen_eh_set_lr_di (operands[0]));
++  else
++    emit_insn (gen_eh_set_lr_si (operands[0]));
++  DONE;
++})
++
++;; Clobber the return address on the stack.  We can't expand this
++;; until we know where it will be put in the stack frame.
++
++(define_insn "eh_set_lr_si"
++  [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++   (clobber (match_scratch:SI 1 "=&r"))]
++  "! TARGET_64BIT"
++  "#")
++
++(define_insn "eh_set_lr_di"
++  [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++   (clobber (match_scratch:DI 1 "=&r"))]
++  "TARGET_64BIT"
++  "#")
++
++(define_split
++  [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
++   (clobber (match_scratch 1))]
++  "reload_completed"
++  [(const_int 0)]
++{
++  riscv_set_return_address (operands[0], operands[1]);
++  DONE;
++})
++
++;;
++;;  ....................
++;;
++;;	FUNCTION CALLS
++;;
++;;  ....................
++
++;; Sibling calls.  All these patterns use jump instructions.
++
++;; call_insn_operand will only accept constant
++;; addresses if a direct jump is acceptable.  Since the 'S' constraint
++;; is defined in terms of call_insn_operand, the same is true of the
++;; constraints.
++
++;; When we use an indirect jump, we need a register that will be
++;; preserved by the epilogue (constraint j).
++
++(define_expand "sibcall"
++  [(parallel [(call (match_operand 0 "")
++		    (match_operand 1 ""))
++	      (use (match_operand 2 ""))	;; next_arg_reg
++	      (use (match_operand 3 ""))])]	;; struct_value_size_rtx
++  ""
++{
++  riscv_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]);
++  DONE;
++})
++
++(define_insn "sibcall_internal"
++  [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
++	 (match_operand 1 "" ""))]
++  "SIBLING_CALL_P (insn)"
++  { return REG_P (operands[0]) ? "jr\t%0"
++	   : absolute_symbolic_operand (operands[0], VOIDmode) ? "tail\t%0"
++	   : "tail\t%0 at plt"; }
++  [(set_attr "type" "call")])
++
++(define_expand "sibcall_value"
++  [(parallel [(set (match_operand 0 "")
++		   (call (match_operand 1 "")
++			 (match_operand 2 "")))
++	      (use (match_operand 3 ""))])]		;; next_arg_reg
++  ""
++{
++  riscv_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]);
++  DONE;
++})
++
++(define_insn "sibcall_value_internal"
++  [(set (match_operand 0 "register_operand" "")
++        (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
++              (match_operand 2 "" "")))]
++  "SIBLING_CALL_P (insn)"
++  { return REG_P (operands[1]) ? "jr\t%1"
++	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
++	   : "tail\t%1 at plt"; }
++  [(set_attr "type" "call")])
++
++(define_insn "sibcall_value_multiple_internal"
++  [(set (match_operand 0 "register_operand" "")
++        (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
++              (match_operand 2 "" "")))
++   (set (match_operand 3 "register_operand" "")
++	(call (mem:SI (match_dup 1))
++	      (match_dup 2)))]
++  "SIBLING_CALL_P (insn)"
++  { return REG_P (operands[1]) ? "jr\t%1"
++	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
++	   : "tail\t%1 at plt"; }
++  [(set_attr "type" "call")])
++
++(define_expand "call"
++  [(parallel [(call (match_operand 0 "")
++		    (match_operand 1 ""))
++	      (use (match_operand 2 ""))	;; next_arg_reg
++	      (use (match_operand 3 ""))])]	;; struct_value_size_rtx
++  ""
++{
++  riscv_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]);
++  DONE;
++})
++
++(define_insn "call_internal"
++  [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S"))
++	 (match_operand 1 "" ""))
++   (clobber (reg:SI RETURN_ADDR_REGNUM))]
++  ""
++  { return REG_P (operands[0]) ? "jalr\t%0"
++	   : absolute_symbolic_operand (operands[0], VOIDmode) ? "call\t%0"
++	   : "call\t%0 at plt"; }
++  [(set_attr "type" "call")])
++
++(define_expand "call_value"
++  [(parallel [(set (match_operand 0 "")
++		   (call (match_operand 1 "")
++			 (match_operand 2 "")))
++	      (use (match_operand 3 ""))])]		;; next_arg_reg
++  ""
++{
++  riscv_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]);
++  DONE;
++})
++
++;; See comment for call_internal.
++(define_insn "call_value_internal"
++  [(set (match_operand 0 "register_operand" "")
++        (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
++              (match_operand 2 "" "")))
++   (clobber (reg:SI RETURN_ADDR_REGNUM))]
++  ""
++  { return REG_P (operands[1]) ? "jalr\t%1"
++	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
++	   : "call\t%1 at plt"; }
++  [(set_attr "type" "call")])
++
++;; See comment for call_internal.
++(define_insn "call_value_multiple_internal"
++  [(set (match_operand 0 "register_operand" "")
++        (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
++              (match_operand 2 "" "")))
++   (set (match_operand 3 "register_operand" "")
++	(call (mem:SI (match_dup 1))
++	      (match_dup 2)))
++   (clobber (reg:SI RETURN_ADDR_REGNUM))]
++  ""
++  { return REG_P (operands[1]) ? "jalr\t%1"
++	   : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
++	   : "call\t%1 at plt"; }
++  [(set_attr "type" "call")])
++
++;; Call subroutine returning any type.
++
++(define_expand "untyped_call"
++  [(parallel [(call (match_operand 0 "")
++		    (const_int 0))
++	      (match_operand 1 "")
++	      (match_operand 2 "")])]
++  ""
++{
++  int i;
++
++  emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
++
++  for (i = 0; i < XVECLEN (operands[2], 0); i++)
++    {
++      rtx set = XVECEXP (operands[2], 0, i);
++      riscv_emit_move (SET_DEST (set), SET_SRC (set));
++    }
++
++  emit_insn (gen_blockage ());
++  DONE;
++})
++
++(define_insn "nop"
++  [(const_int 0)]
++  ""
++  "nop"
++  [(set_attr "type"	"nop")
++   (set_attr "mode"	"none")])
++
++(define_insn "trap"
++  [(trap_if (const_int 1) (const_int 0))]
++  ""
++  "sbreak")
++
++(define_insn "gpr_save"
++  [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_SAVE)
++   (clobber (reg:SI T0_REGNUM))
++   (clobber (reg:SI T1_REGNUM))]
++  ""
++  { return riscv_output_gpr_save (INTVAL (operands[0])); })
++
++(define_insn "gpr_restore"
++  [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_RESTORE)]
++  ""
++  "tail\t__riscv_restore_%0")
++
++(define_insn "gpr_restore_return"
++  [(return)
++   (use (match_operand 0 "pmode_register_operand" ""))
++   (const_int 0)]
++  ""
++  "")
++
++(include "sync.md")
++(include "peephole.md")
++(include "generic.md")
+diff --git original-gcc/gcc/config/riscv/riscv.opt gcc-6.2.0/gcc/config/riscv/riscv.opt
+new file mode 100644
+index 0000000..1d048be
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv.opt
+@@ -0,0 +1,123 @@
++; Options for the RISC-V port of the compiler
++;
++; Copyright (C) 2011-2016 Free Software Foundation, Inc.
++;
++; This file is part of GCC.
++;
++; GCC is free software; you can redistribute it and/or modify it under
++; the terms of the GNU General Public License as published by the Free
++; Software Foundation; either version 3, or (at your option) any later
++; version.
++;
++; GCC is distributed in the hope that it will be useful, but WITHOUT
++; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++; License for more details.
++;
++; You should have received a copy of the GNU General Public License
++; along with GCC; see the file COPYING3.  If not see
++; <http://www.gnu.org/licenses/>.
++
++HeaderInclude
++config/riscv/riscv-opts.h
++
++m32
++Target RejectNegative Mask(32BIT)
++Generate RV32 code.
++
++m64
++Target RejectNegative InverseMask(32BIT, 64BIT)
++Generate RV64 code.
++
++mbranch-cost=
++Target RejectNegative Joined UInteger Var(riscv_branch_cost)
++-mbranch-cost=N	Set the cost of branches to roughly N instructions.
++
++mmemcpy
++Target Report Mask(MEMCPY)
++Don't optimize block moves.
++
++mplt
++Target Report Var(TARGET_PLT) Init(1)
++When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
++
++mfloat-abi=
++Target Report RejectNegative Joined Enum(float_abi_type) Var(riscv_float_abi) Init(FLOAT_ABI_SOFT)
++Specify floating-point calling convention.
++
++Enum
++Name(float_abi_type) Type(enum riscv_float_abi_type)
++Known floating-point ABIs (for use with the -mfloat-abi= option):
++
++EnumValue
++Enum(float_abi_type) String(soft) Value(FLOAT_ABI_SOFT)
++
++EnumValue
++Enum(float_abi_type) String(single) Value(FLOAT_ABI_SINGLE)
++
++EnumValue
++Enum(float_abi_type) String(double) Value(FLOAT_ABI_DOUBLE)
++
++mno-fdiv
++Target Report RejectNegative Undocumented Mask(NO_FDIV)
++Don't use hardware floating-point divide and square root instructions.
++
++mfdiv
++Target Report RejectNegative InverseMask(NO_FDIV, FDIV)
++Use hardware floating-point divide and square root instructions.
++
++march=
++Target Report RejectNegative Joined
++-march=	Generate code for given RISC-V ISA (e.g. RV64IM).
++
++mtune=
++Target RejectNegative Joined Var(riscv_tune_string)
++-mtune=PROCESSOR	Optimize the output for PROCESSOR.
++
++msmall-data-limit=
++Target Joined Separate UInteger Var(g_switch_value) Init(8)
++-msmall-data-limit=N	Put global and static data smaller than <number> bytes into a special section (on some targets).
++
++matomic
++Target Report Mask(ATOMIC)
++Use hardware atomic memory instructions.
++
++mmuldiv
++Target Report
++Use hardware instructions for integer multiplication and division.
++
++mmul
++Target Report Mask(MUL)
++Use hardware instructions for integer multiplication
++
++mdiv
++Target Report Mask(DIV)
++Use hardware instructions for integer division.
++
++mrvc
++Target Report Mask(RVC)
++Use compressed instruction encoding.
++
++msave-restore
++Target Report Mask(SAVE_RESTORE)
++Use smaller but slower prologue and epilogue code.
++
++mcmodel=
++Target RejectNegative Joined Var(riscv_cmodel_string)
++Use given RISC-V code model (medlow or medany).
++
++mexplicit-relocs
++Target Report Mask(EXPLICIT_RELOCS)
++Use %reloc() operators, rather than assembly macros, to load addresses.
++
++mno-float
++Target Report RejectNegative
++Disable hardware floating-point.  Implies -mfloat-abi=soft.
++
++msingle-float
++Target Report RejectNegative Mask(HARD_FLOAT)
++Enable only single-precision floating-point.
++
++mdouble-float
++Target Report RejectNegative Mask(DOUBLE_FLOAT)
++Enable single- and double-precision floating-point.
+diff --git original-gcc/gcc/config/riscv/sync.md gcc-6.2.0/gcc/config/riscv/sync.md
+new file mode 100644
+index 0000000..4f7f4f3
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/sync.md
+@@ -0,0 +1,204 @@
++;; Machine description for RISC-V atomic operations.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman at cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_c_enum "unspec" [
++  UNSPEC_COMPARE_AND_SWAP
++  UNSPEC_SYNC_OLD_OP
++  UNSPEC_SYNC_EXCHANGE
++  UNSPEC_ATOMIC_STORE
++  UNSPEC_MEMORY_BARRIER
++])
++
++(define_code_iterator any_atomic [plus ior xor and])
++(define_code_attr atomic_optab
++  [(plus "add") (ior "or") (xor "xor") (and "and")])
++
++;; Memory barriers.
++
++(define_expand "mem_thread_fence"
++  [(match_operand:SI 0 "const_int_operand" "")] ;; model
++  ""
++{
++  if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
++    {
++      rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
++      MEM_VOLATILE_P (mem) = 1;
++      emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
++    }
++  DONE;
++})
++
++(define_insn "mem_thread_fence_1"
++  [(set (match_operand:BLK 0 "" "")
++	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
++   (match_operand:SI 1 "const_int_operand" "")] ;; model
++  ""
++{
++  long model = INTVAL (operands[1]);
++
++  switch (model)
++    {
++    case MEMMODEL_SEQ_CST:
++    case MEMMODEL_SYNC_SEQ_CST:
++    case MEMMODEL_ACQ_REL:
++      return "fence rw,rw";
++    case MEMMODEL_ACQUIRE:
++    case MEMMODEL_SYNC_ACQUIRE:
++    case MEMMODEL_CONSUME:
++      return "fence r,rw";
++    case MEMMODEL_RELEASE:
++    case MEMMODEL_SYNC_RELEASE:
++      return "fence rw,w";
++    default:
++      fprintf(stderr, "mem_thread_fence_1(%ld)\n", model);
++      gcc_unreachable();
++    }
++})
++
++;; Atomic memory operations.
++
++;; Implement atomic stores with amoswap.  Fall back to fences for atomic loads.
++(define_insn "atomic_store<mode>"
++  [(set (match_operand:GPR 0 "memory_operand" "=A")
++    (unspec_volatile:GPR
++      [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
++       (match_operand:SI 2 "const_int_operand")]      ;; model
++      UNSPEC_ATOMIC_STORE))]
++  "TARGET_ATOMIC"
++  "amoswap.<amo>%A2 zero,%z1,%0")
++
++(define_insn "atomic_<atomic_optab><mode>"
++  [(set (match_operand:GPR 0 "memory_operand" "+A")
++	(unspec_volatile:GPR
++	  [(any_atomic:GPR (match_dup 0)
++		     (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
++	   (match_operand:SI 2 "const_int_operand")] ;; model
++	 UNSPEC_SYNC_OLD_OP))]
++  "TARGET_ATOMIC"
++  "amo<insn>.<amo>%A2 zero,%z1,%0")
++
++(define_insn "atomic_fetch_<atomic_optab><mode>"
++  [(set (match_operand:GPR 0 "register_operand" "=&r")
++	(match_operand:GPR 1 "memory_operand" "+A"))
++   (set (match_dup 1)
++	(unspec_volatile:GPR
++	  [(any_atomic:GPR (match_dup 1)
++		     (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
++	   (match_operand:SI 3 "const_int_operand")] ;; model
++	 UNSPEC_SYNC_OLD_OP))]
++  "TARGET_ATOMIC"
++  "amo<insn>.<amo>%A3 %0,%z2,%1")
++
++(define_insn "atomic_exchange<mode>"
++  [(set (match_operand:GPR 0 "register_operand" "=&r")
++	(unspec_volatile:GPR
++	  [(match_operand:GPR 1 "memory_operand" "+A")
++	   (match_operand:SI 3 "const_int_operand")] ;; model
++	  UNSPEC_SYNC_EXCHANGE))
++   (set (match_dup 1)
++        (match_operand:GPR 2 "register_operand" "0"))]
++  "TARGET_ATOMIC"
++  "amoswap.<amo>%A3 %0,%z2,%1")
++
++(define_insn "atomic_cas_value_strong<mode>"
++  [(set (match_operand:GPR 0 "register_operand" "=&r")
++	(match_operand:GPR 1 "memory_operand" "+A"))
++   (set (match_dup 1)
++	(unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
++			      (match_operand:GPR 3 "reg_or_0_operand" "rJ")
++			      (match_operand:SI 4 "const_int_operand")  ;; mod_s
++			      (match_operand:SI 5 "const_int_operand")] ;; mod_f
++	 UNSPEC_COMPARE_AND_SWAP))
++   (clobber (match_scratch:GPR 6 "=&r"))]
++  "TARGET_ATOMIC"
++  "1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
++  [(set (attr "length") (const_int 16))])
++
++(define_expand "atomic_compare_and_swap<mode>"
++  [(match_operand:SI 0 "register_operand" "")   ;; bool output
++   (match_operand:GPR 1 "register_operand" "")  ;; val output
++   (match_operand:GPR 2 "memory_operand" "")    ;; memory
++   (match_operand:GPR 3 "reg_or_0_operand" "")  ;; expected value
++   (match_operand:GPR 4 "reg_or_0_operand" "")  ;; desired value
++   (match_operand:SI 5 "const_int_operand" "")  ;; is_weak
++   (match_operand:SI 6 "const_int_operand" "")  ;; mod_s
++   (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
++  "TARGET_ATOMIC"
++{
++  emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
++						operands[3], operands[4],
++						operands[6], operands[7]));
++
++  rtx compare = operands[1];
++  if (operands[3] != const0_rtx)
++    {
++      rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
++      compare = gen_reg_rtx (<MODE>mode);
++      emit_insn (gen_rtx_SET (compare, difference));
++    }
++
++  rtx eq = gen_rtx_EQ (<MODE>mode, compare, const0_rtx);
++  rtx result = gen_reg_rtx (<MODE>mode);
++  emit_insn (gen_rtx_SET (result, eq));
++  emit_insn (gen_rtx_SET (operands[0], gen_lowpart (SImode, result)));
++  DONE;
++})
++
++(define_expand "atomic_test_and_set"
++  [(match_operand:QI 0 "register_operand" "")     ;; bool output
++   (match_operand:QI 1 "memory_operand" "+A")    ;; memory
++   (match_operand:SI 2 "const_int_operand" "")]   ;; model
++  "TARGET_ATOMIC"
++{
++  /* We have no QImode atomics, so use the address LSBs to form a mask,
++     then use an aligned SImode atomic. */
++  rtx result = operands[0];
++  rtx mem = operands[1];
++  rtx model = operands[2];
++  rtx addr = force_reg (Pmode, XEXP (mem, 0));
++
++  rtx aligned_addr = gen_reg_rtx (Pmode);
++  emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
++
++  rtx aligned_mem = change_address (mem, SImode, aligned_addr);
++  set_mem_alias_set (aligned_mem, 0);
++
++  rtx offset = gen_reg_rtx (SImode);
++  emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
++				       GEN_INT (3)));
++
++  rtx tmp = gen_reg_rtx (SImode);
++  emit_move_insn (tmp, GEN_INT (1));
++
++  rtx shmt = gen_reg_rtx (SImode);
++  emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
++
++  rtx word = gen_reg_rtx (SImode);
++  emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
++
++  tmp = gen_reg_rtx (SImode);
++  emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
++
++  emit_move_insn (gen_lowpart (SImode, result),
++		  gen_rtx_LSHIFTRT (SImode, tmp,
++				    gen_lowpart (SImode, shmt)));
++  DONE;
++})
+diff --git original-gcc/gcc/config/riscv/t-elf gcc-6.2.0/gcc/config/riscv/t-elf
+new file mode 100644
+index 0000000..ebb6e92
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/t-elf
+@@ -0,0 +1,9 @@
++# Build the libraries for both hard and soft floating point
++
++MULTILIB_OPTIONS = m64/m32
++MULTILIB_DIRNAMES = 64 32
++
++ifneq ($(with_float), soft)
++MULTILIB_OPTIONS += mno-float
++MULTILIB_DIRNAMES += soft-float
++endif
+diff --git original-gcc/gcc/config/riscv/t-linux gcc-6.2.0/gcc/config/riscv/t-linux
+new file mode 100644
+index 0000000..8747ecd
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/t-linux
+@@ -0,0 +1,11 @@
++# Build the libraries for both hard and soft floating point
++
++MULTILIB_OPTIONS = m64/m32
++MULTILIB_DIRNAMES = 64 32
++MULTILIB_OSDIRNAMES = ../lib ../lib32
++
++ifneq ($(with_float), soft)
++MULTILIB_OPTIONS += mno-float
++MULTILIB_DIRNAMES += soft-float
++MULTILIB_OSDIRNAMES += soft-float
++endif
+diff --git original-gcc/gcc/configure gcc-6.2.0/gcc/configure
+index fc83cc8..ebf12f9 100755
+--- original-gcc/gcc/configure
++++ gcc-6.2.0/gcc/configure
+@@ -24134,6 +24134,25 @@ x3:	.space 4
+ 	tls_first_minor=14
+ 	tls_as_opt="-a32 --fatal-warnings"
+ 	;;
++  riscv*-*-*)
++    conftest_s='
++	.section .tdata,"awT", at progbits
++x:
++	.word 2
++	.text
++	la.tls.gd a0,x
++	la.tls.ie a1,x
++	lui a0,%tls_ie_pcrel_hi(x)
++	lw a0,%pcrel_lo(x)(a0)
++	add a0,a0,tp
++	lw a0,0(a0)
++	lui a0,%tprel_hi(x)
++	add a0,a0,tp,%tprel_add(x)
++	lw a0,%tprel_lo(x)(a0)'
++	tls_first_major=2
++	tls_first_minor=21
++	tls_as_opt='-m32 --fatal-warnings'
++	;;
+   s390-*-*)
+     conftest_s='
+ 	.section ".tdata","awT", at progbits
+diff --git original-gcc/gcc/configure.ac gcc-6.2.0/gcc/configure.ac
+index dc22d3c..2591e5e 100644
+--- original-gcc/gcc/configure.ac
++++ gcc-6.2.0/gcc/configure.ac
+@@ -3367,6 +3367,25 @@ x3:	.space 4
+ 	tls_first_minor=14
+ 	tls_as_opt="-a32 --fatal-warnings"
+ 	;;
++  riscv*-*-*)
++    conftest_s='
++	.section .tdata,"awT", at progbits
++x:
++	.word 2
++	.text
++	la.tls.gd a0,x
++	la.tls.ie a1,x
++	lui a0,%tls_ie_pcrel_hi(x)
++	lw a0,%pcrel_lo(x)(a0)
++	add a0,a0,tp
++	lw a0,0(a0)
++	lui a0,%tprel_hi(x)
++	add a0,a0,tp,%tprel_add(x)
++	lw a0,%tprel_lo(x)(a0)'
++	tls_first_major=2
++	tls_first_minor=21
++	tls_as_opt='-m32 --fatal-warnings'
++	;;
+   s390-*-*)
+     conftest_s='
+ 	.section ".tdata","awT", at progbits
+diff --git original-gcc/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C gcc-6.2.0/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C
+index 80a571a..2e0ef68 100644
+--- original-gcc/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C
++++ gcc-6.2.0/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C
+@@ -2,7 +2,7 @@
+ // { dg-do compile { target c++11 } }
+ // { dg-additional-options -G0 { target { { alpha*-*-* frv*-*-* ia64-*-* lm32*-*-* m32r*-*-* microblaze*-*-* mips*-*-* nios2-*-* powerpc*-*-* rs6000*-*-* } && { ! { *-*-darwin* *-*-aix* alpha*-*-*vms* } } } } }
+ // { dg-final { scan-assembler "\\.rdata" { target mips*-*-* } } }
+-// { dg-final { scan-assembler "rodata" { target { { *-*-linux-gnu *-*-gnu* *-*-elf } && { ! mips*-*-* } } } } }
++// { dg-final { scan-assembler "rodata" { target { { *-*-linux-gnu *-*-gnu* *-*-elf } && { ! { mips*-*-* riscv*-*-* } } } } } }
+ 
+ struct Data
+ {
+diff --git original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c gcc-6.2.0/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
+index 744763f..2b06bd6 100644
+--- original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
++++ gcc-6.2.0/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
+@@ -6,6 +6,9 @@
+ #elif defined (__powerpc__) || defined (__PPC__) || defined (__ppc__) || defined (__POWERPC__) || defined (__ppc)
+   /* On PPC division by zero does not trap.  */
+ # define DO_TEST 0
++#elif defined (__riscv__)
++  /* On RISC-V division by zero does not trap.  */
++# define DO_TEST 0
+ #elif defined (__SPU__)
+   /* On SPU division by zero does not trap.  */
+ # define DO_TEST 0
+diff --git original-gcc/gcc/testsuite/gcc.dg/20020312-2.c gcc-6.2.0/gcc/testsuite/gcc.dg/20020312-2.c
+index 5fce50d..f77862c 100644
+--- original-gcc/gcc/testsuite/gcc.dg/20020312-2.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/20020312-2.c
+@@ -67,6 +67,8 @@ extern void abort (void);
+ # else
+ #  define PIC_REG  "30"
+ # endif
++#elif defined(__riscv__)
++/* No pic register.  */
+ #elif defined(__RX__)
+ /* No pic register.  */
+ #elif defined(__s390__)
+diff --git original-gcc/gcc/testsuite/gcc.dg/ifcvt-4.c gcc-6.2.0/gcc/testsuite/gcc.dg/ifcvt-4.c
+index 319b583..2a86344 100644
+--- original-gcc/gcc/testsuite/gcc.dg/ifcvt-4.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/ifcvt-4.c
+@@ -1,6 +1,6 @@
+ /* { dg-options "-fdump-rtl-ce1 -O2 --param max-rtl-if-conversion-insns=3" } */
+ /* { dg-additional-options "-misel" { target { powerpc*-*-* } } } */
+-/* { dg-skip-if "Multiple set if-conversion not guaranteed on all subtargets" { "arm*-*-* hppa*64*-*-* visium-*-*" } }  */
++/* { dg-skip-if "Multiple set if-conversion not guaranteed on all subtargets" { "arm*-*-* hppa*64*-*-* visium-*-*" riscv*-*-* } }  */
+ 
+ typedef int word __attribute__((mode(word)));
+ 
+diff --git original-gcc/gcc/testsuite/gcc.dg/loop-8.c gcc-6.2.0/gcc/testsuite/gcc.dg/loop-8.c
+index 463c5d0..a760072 100644
+--- original-gcc/gcc/testsuite/gcc.dg/loop-8.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/loop-8.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-options "-O1 -fdump-rtl-loop2_invariant" } */
+-/* { dg-skip-if "unexpected IV" { "hppa*-*-* visium-*-*" } { "*" } { "" } } */
++/* { dg-skip-if "unexpected IV" { "hppa*-*-* visium-*-* riscv*-*-*" } { "*" } { "" } } */
+ 
+ void
+ f (int *a, int *b)
+diff --git original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c gcc-6.2.0/gcc/testsuite/gcc.dg/stack-usage-1.c
+index 7864c6a..12f91a8 100644
+--- original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/stack-usage-1.c
+@@ -63,6 +63,8 @@
+ #  else
+ #    define SIZE 240
+ #  endif
++#elif defined (__riscv__)
++#  define SIZE 240
+ #elif defined (__AVR__)
+ #  define SIZE 254
+ #elif defined (__s390x__)
+diff --git original-gcc/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c
+index f7b5dfa..a1237cf 100644
+--- original-gcc/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c
+@@ -33,4 +33,4 @@ void test55 (int x, int y)
+    that the && should be emitted (based on BRANCH_COST).  Fix this
+    by teaching dom to look through && and register all components
+    as true.  */
+-/* { dg-final { scan-tree-dump-times "link_error" 0 "optimized" { xfail { ! "alpha*-*-* arm*-*-* aarch64*-*-* powerpc*-*-* cris-*-* crisv32-*-* hppa*-*-* i?86-*-* mmix-*-* mips*-*-* m68k*-*-* moxie-*-* nds32*-*-* s390*-*-* sh*-*-* sparc*-*-* spu-*-* visium-*-* x86_64-*-*" } } } } */
++/* { dg-final { scan-tree-dump-times "link_error" 0 "optimized" { xfail { ! "alpha*-*-* arm*-*-* aarch64*-*-* powerpc*-*-* cris-*-* crisv32-*-* hppa*-*-* i?86-*-* mmix-*-* mips*-*-* m68k*-*-* moxie-*-* nds32*-*-* s390*-*-* sh*-*-* sparc*-*-* spu-*-* visium-*-* x86_64-*-* riscv*-*-*" } } } } */
+diff --git original-gcc/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
+index 1a4bfe6..665ac23 100644
+--- original-gcc/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
+@@ -25,4 +25,4 @@ foo ()
+    but the loop reads only one element at a time, and DOM cannot resolve these.
+    The same happens on powerpc depending on the SIMD support available.  */
+ 
+-/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* powerpc64*-*-* } || { sparc*-*-* && lp64 } } } } } */
++/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* powerpc64*-*-* riscv*64*-*-* } || { sparc*-*-* && lp64 } } } } } */
+diff --git original-gcc/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c
+index a287dad..0aecfed 100644
+--- original-gcc/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c
+@@ -5,7 +5,7 @@
+ 
+    When the condition is true, we distribute "(int) (a + b)" as
+    "(int) a + (int) b", otherwise we keep the original.  */
+-/* { dg-do compile { target { { ! mips64 } && { ! spu-*-* } } } } */
++/* { dg-do compile { target { { ! mips64 } && { { ! spu-*-* } && { ! riscv*64*-*-* } } } } } */
+ /* { dg-options "-O -fno-tree-forwprop -fno-tree-ccp -fwrapv -fdump-tree-fre1-details" } */
+ 
+ /* From PR14844.  */
+diff --git original-gcc/gcc/testsuite/lib/target-supports.exp gcc-6.2.0/gcc/testsuite/lib/target-supports.exp
+index 6d9b488..1c96306 100644
+--- original-gcc/gcc/testsuite/lib/target-supports.exp
++++ gcc-6.2.0/gcc/testsuite/lib/target-supports.exp
+@@ -6902,6 +6902,7 @@ proc check_effective_target_logical_op_short_circuit {} {
+ 	 || [istarget s390*-*-*]
+ 	 || [istarget powerpc*-*-*]
+ 	 || [istarget nios2*-*-*]
++	 || [istarget riscv*-*-*]
+ 	 || [istarget visium-*-*]
+ 	 || [check_effective_target_arm_cortex_m] } {
+ 	return 1
+diff --git original-gcc/libatomic/configure.tgt gcc-6.2.0/libatomic/configure.tgt
+index eab2765..1343e37 100644
+--- original-gcc/libatomic/configure.tgt
++++ gcc-6.2.0/libatomic/configure.tgt
+@@ -37,6 +37,7 @@ case "${target_cpu}" in
+ 	ARCH=alpha
+ 	;;
+   rs6000 | powerpc*)	ARCH=powerpc ;;
++  riscv*)		ARCH=riscv ;;
+   sh*)			ARCH=sh ;;
+ 
+   arm*)
+diff --git original-gcc/libgcc/config.host gcc-6.2.0/libgcc/config.host
+index 16a45c8..0545bbc 100644
+--- original-gcc/libgcc/config.host
++++ gcc-6.2.0/libgcc/config.host
+@@ -169,6 +169,9 @@ powerpc*-*-*)
+ 	;;
+ rs6000*-*-*)
+ 	;;
++riscv*)
++	cpu_type=riscv
++	;;
+ sparc64*-*-*)
+ 	cpu_type=sparc
+ 	;;
+@@ -1088,6 +1091,14 @@ powerpcle-*-eabi*)
+ 	tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ 	extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
+ 	;;
++riscv*-*-linux*)
++	tmake_file="${tmake_file} t-softfp-sfdf riscv/t-softfp${host_address} t-softfp riscv/t-elf riscv/t-elf${host_address}"
++	extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
++	;;
++riscv*-*-*)
++	tmake_file="${tmake_file} t-softfp-sfdf riscv/t-softfp${host_address} t-softfp riscv/t-elf riscv/t-elf${host_address}"
++	extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
++	;;
+ rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
+ 	md_unwind_header=rs6000/aix-unwind.h
+ 	tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
+diff --git original-gcc/libgcc/config/riscv/atomic.c gcc-6.2.0/libgcc/config/riscv/atomic.c
+new file mode 100644
+index 0000000..00e8111
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/atomic.c
+@@ -0,0 +1,111 @@
++/* Legacy sub-word atomics for RISC-V.
++ 
++   Copyright (C) 2016 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++<http://www.gnu.org/licenses/>.  */
++
++#ifdef __riscv_atomic
++
++#include <stdbool.h>
++
++#define INVERT		"not %[tmp1], %[tmp1]\n\t"
++#define DONT_INVERT	""
++
++#define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop)	\
++  type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v)	\
++  {									\
++    unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
++    int shift = (((unsigned long) p) & 3) * 8;				\
++    unsigned mask = ((1U << ((sizeof v) * 8)) - 1) << shift;		\
++    unsigned old, tmp1, tmp2;						\
++									\
++    asm volatile ("1:\n\t"						\
++		  "lr.w.aq %[old], %[mem]\n\t"				\
++		  #insn " %[tmp1], %[old], %[value]\n\t"		\
++		  invert						\
++		  "and %[tmp1], %[tmp1], %[mask]\n\t"			\
++		  "and %[tmp2], %[old], %[not_mask]\n\t"		\
++		  "or %[tmp2], %[tmp2], %[tmp1]\n\t"			\
++		  "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t"		\
++		  "bnez %[tmp1], 1b"					\
++		  : [old] "=&r" (old),					\
++		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
++		    [tmp1] "=&r" (tmp1),				\
++		    [tmp2] "=&r" (tmp2)					\
++		  : [value] "r" (((unsigned) v) << shift),		\
++		    [mask] "r" (mask),					\
++		    [not_mask] "r" (~mask));				\
++									\
++    return (type) (old >> shift);					\
++  }									\
++									\
++  type __sync_ ## opname ## _and_fetch_ ## size (type *p, type v)	\
++  {									\
++    type o = __sync_fetch_and_ ## opname ## _ ## size (p, v);		\
++    return cop;								\
++  }
++
++#define GENERATE_COMPARE_AND_SWAP(type, size)				\
++  type __sync_val_compare_and_swap_ ## size (type *p, type o, type n)	\
++  {									\
++    unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
++    int shift = (((unsigned long) p) & 3) * 8;				\
++    unsigned mask = ((1U << ((sizeof o) * 8)) - 1) << shift;		\
++    unsigned old, tmp1;							\
++									\
++    asm volatile ("1:\n\t"						\
++		  "lr.w.aq %[old], %[mem]\n\t"				\
++		  "and %[tmp1], %[old], %[mask]\n\t"			\
++		  "bne %[tmp1], %[o], 1f\n\t"				\
++		  "and %[tmp1], %[old], %[not_mask]\n\t"		\
++		  "or %[tmp1], %[tmp1], %[n]\n\t"			\
++		  "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t"		\
++		  "bnez %[tmp1], 1b\n\t"				\
++		  "1:"							\
++		  : [old] "=&r" (old),					\
++		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
++		    [tmp1] "=&r" (tmp1)					\
++		  : [o] "r" ((((unsigned) o) << shift) & mask),		\
++		    [n] "r" ((((unsigned) n) << shift) & mask),		\
++		    [mask] "r" (mask),					\
++		    [not_mask] "r" (~mask));				\
++									\
++    return (type) (old >> shift);					\
++  }									\
++  bool __sync_bool_compare_and_swap_ ## size (type *p, type o, type n)	\
++  {									\
++    return __sync_val_compare_and_swap(p, o, n) == o;			\
++  }
++
++#define GENERATE_ALL(type, size)					\
++  GENERATE_FETCH_AND_OP(type, size, add, add, DONT_INVERT, o + v)	\
++  GENERATE_FETCH_AND_OP(type, size, sub, sub, DONT_INVERT, o - v)	\
++  GENERATE_FETCH_AND_OP(type, size, and, and, DONT_INVERT, o & v)	\
++  GENERATE_FETCH_AND_OP(type, size, xor, xor, DONT_INVERT, o ^ v)	\
++  GENERATE_FETCH_AND_OP(type, size, or, or, DONT_INVERT, o | v)		\
++  GENERATE_FETCH_AND_OP(type, size, nand, and, INVERT, ~(o & v))	\
++  GENERATE_COMPARE_AND_SWAP(type, size)
++
++GENERATE_ALL(unsigned char, 1)
++GENERATE_ALL(unsigned short, 2)
++
++#endif
+diff --git original-gcc/libgcc/config/riscv/crti.S gcc-6.2.0/libgcc/config/riscv/crti.S
+new file mode 100644
+index 0000000..89bac70
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/crti.S
+@@ -0,0 +1 @@
++/* crti.S is empty because .init_array/.fini_array are used exclusively. */
+diff --git original-gcc/libgcc/config/riscv/crtn.S gcc-6.2.0/libgcc/config/riscv/crtn.S
+new file mode 100644
+index 0000000..ca6ee7b
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/crtn.S
+@@ -0,0 +1 @@
++/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
+diff --git original-gcc/libgcc/config/riscv/div.S gcc-6.2.0/libgcc/config/riscv/div.S
+new file mode 100644
+index 0000000..385634a
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/div.S
+@@ -0,0 +1,121 @@
++  .text
++  .align 2
++
++#ifndef __riscv64
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines.  */
++# define __udivdi3 __udivsi3
++# define __umoddi3 __umodsi3
++# define __divdi3 __divsi3
++# define __moddi3 __modsi3
++#else
++  .globl __udivsi3
++__udivsi3:
++  /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t.  */
++  sll    a0, a0, 32
++  sll    a1, a1, 32
++  move   t0, ra
++  jal    __udivdi3
++  sext.w a0, a0
++  jr     t0
++
++  .globl __umodsi3
++__umodsi3:
++  /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t.  */
++  sll    a0, a0, 32
++  sll    a1, a1, 32
++  srl    a0, a0, 32
++  srl    a1, a1, 32
++  move   t0, ra
++  jal    __udivdi3
++  sext.w a0, a1
++  jr     t0
++
++  .globl __modsi3
++  __modsi3 = __moddi3
++
++  .globl __divsi3
++__divsi3:
++  /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3.  */
++  li    t0, -1
++  beq   a1, t0, .L20
++#endif
++
++  .globl __divdi3
++__divdi3:
++  bltz  a0, .L10
++  bltz  a1, .L11
++  /* Since the quotient is positive, fall into __udivdi3.  */
++
++  .globl __udivdi3
++__udivdi3:
++  mv    a2, a1
++  mv    a1, a0
++  li    a0, -1
++  beqz  a2, .L5
++  li    a3, 1
++  bgeu  a2, a1, .L2
++.L1:
++  blez  a2, .L2
++  slli  a2, a2, 1
++  slli  a3, a3, 1
++  bgtu  a1, a2, .L1
++.L2:
++  li    a0, 0
++.L3:
++  bltu  a1, a2, .L4
++  sub   a1, a1, a2
++  or    a0, a0, a3
++.L4:
++  srli  a3, a3, 1
++  srli  a2, a2, 1
++  bnez  a3, .L3
++.L5:
++  ret
++
++  .globl __umoddi3
++__umoddi3:
++  /* Call __udivdi3(a0, a1), then return the remainder, which is in a1.  */
++  move  t0, ra
++  jal   __udivdi3
++  move  a0, a1
++  jr    t0
++
++  /* Handle negative arguments to __divdi3.  */
++.L10:
++  neg   a0, a0 
++  bgez  a1, .L12      /* Compute __udivdi3(-a0, a1), then negate the result.  */
++  neg   a1, a1
++  j     __divdi3      /* Compute __udivdi3(-a0, -a1).  */
++.L11:                 /* Compute __udivdi3(a0, -a1), then negate the result.  */
++  neg   a1, a1
++.L12:
++  move  t0, ra
++  jal   __divdi3
++  neg   a0, a0
++  jr    t0
++
++  .globl __moddi3
++__moddi3:
++  move   t0, ra
++  bltz   a1, .L31
++  bltz   a0, .L32
++.L30:
++  jal    __udivdi3    /* The dividend is not negative.  */
++  move   a0, a1
++  jr     t0
++.L31:
++  neg    a1, a1
++  bgez   a0, .L30
++.L32:
++  neg    a0, a0
++  jal    __udivdi3    /* The dividend is hella negative.  */
++  neg    a0, a1
++  jr     t0
++
++#ifdef __riscv64
++  /* continuation of __divsi3 */
++.L20:
++  sll   t0, t0, 31
++  bne   a0, t0, __divdi3
++  ret
++#endif
+diff --git original-gcc/libgcc/config/riscv/muldi3.S gcc-6.2.0/libgcc/config/riscv/muldi3.S
+new file mode 100644
+index 0000000..f5061b9
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/muldi3.S
+@@ -0,0 +1,21 @@
++  .text
++  .align 2
++
++#ifndef __riscv64
++/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine.  */
++# define __muldi3 __mulsi3
++#endif
++
++  .globl __muldi3
++__muldi3:
++  mv     a2, a0
++  li     a0, 0
++.L1:
++  andi   a3, a1, 1
++  beqz   a3, .L2
++  add    a0, a0, a2
++.L2:
++  srli   a1, a1, 1
++  slli   a2, a2, 1
++  bnez   a1, .L1
++  ret
+diff --git original-gcc/libgcc/config/riscv/multi3.S gcc-6.2.0/libgcc/config/riscv/multi3.S
+new file mode 100644
+index 0000000..849951e
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/multi3.S
+@@ -0,0 +1,56 @@
++  .text
++  .align 2
++
++#ifndef __riscv64
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines.  */
++# define __multi3 __muldi3
++#endif
++
++  .globl __multi3
++__multi3:
++
++#ifndef __riscv64
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines.  */
++# define __muldi3 __mulsi3
++#endif
++
++/* We rely on the fact that __muldi3 doesn't clobber the t-registers.  */
++
++  mv  t0, ra
++  mv  t5, a0
++  mv  a0, a1
++  mv  t6, a3
++  mv  a1, t5
++  mv  a4, a2
++  li  a5, 0
++  li  t2, 0
++  li  t4, 0
++.L1:
++  add  a6, t2, a1
++  andi t3, a4, 1
++  slli a7, a5, 1
++  slti t1, a1, 0
++  srli a4, a4, 1
++  add  a5, t4, a5
++  beqz t3, .L2
++  sltu t3, a6, t2
++  mv   t2, a6
++  add  t4, t3, a5
++.L2:
++  slli a1, a1, 1
++  or   a5, t1, a7
++  bnez a4, .L1
++  beqz a0, .L3
++  mv   a1, a2
++  call __muldi3
++  add  t4, t4, a0
++.L3:
++  beqz t6, .L4
++  mv   a1, t6
++  mv   a0, t5
++  call  __muldi3
++  add  t4, t4, a0
++.L4:
++  mv  a0, t2
++  mv  a1, t4
++  jr  t0
+diff --git original-gcc/libgcc/config/riscv/save-restore.S gcc-6.2.0/libgcc/config/riscv/save-restore.S
+new file mode 100644
+index 0000000..bbf0e33
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/save-restore.S
+@@ -0,0 +1,220 @@
++  .text
++
++  .globl __riscv_save_12
++  .globl __riscv_save_11
++  .globl __riscv_save_10
++  .globl __riscv_save_9
++  .globl __riscv_save_8
++  .globl __riscv_save_7
++  .globl __riscv_save_6
++  .globl __riscv_save_5
++  .globl __riscv_save_4
++  .globl __riscv_save_3
++  .globl __riscv_save_2
++  .globl __riscv_save_1
++  .globl __riscv_save_0
++
++  .globl __riscv_restore_12
++  .globl __riscv_restore_11
++  .globl __riscv_restore_10
++  .globl __riscv_restore_9
++  .globl __riscv_restore_8
++  .globl __riscv_restore_7
++  .globl __riscv_restore_6
++  .globl __riscv_restore_5
++  .globl __riscv_restore_4
++  .globl __riscv_restore_3
++  .globl __riscv_restore_2
++  .globl __riscv_restore_1
++  .globl __riscv_restore_0
++
++#ifdef __riscv64
++
++__riscv_save_12:
++  addi sp, sp, -112
++  li t1, 0
++  sd s11, 8(sp)
++  j .Ls10
++
++__riscv_save_11:
++__riscv_save_10:
++  addi sp, sp, -112
++  li t1, -16
++.Ls10:
++  sd s10, 16(sp)
++  sd s9, 24(sp)
++  j .Ls8
++
++__riscv_save_9:
++__riscv_save_8:
++  addi sp, sp, -112
++  li t1, -32
++.Ls8:
++  sd s8, 32(sp)
++  sd s7, 40(sp)
++  j .Ls6
++
++__riscv_save_7:
++__riscv_save_6:
++  addi sp, sp, -112
++  li t1, -48
++.Ls6:
++  sd s6, 48(sp)
++  sd s5, 56(sp)
++  j .Ls4
++
++__riscv_save_5:
++__riscv_save_4:
++  addi sp, sp, -112
++  li t1, -64
++.Ls4:
++  sd s4, 64(sp)
++  sd s3, 72(sp)
++  j .Ls2
++
++__riscv_save_3:
++__riscv_save_2:
++  addi sp, sp, -112
++  li t1, -80
++.Ls2:
++  sd s2, 80(sp)
++  sd s1, 88(sp)
++  sd s0, 96(sp)
++  sd ra, 104(sp)
++  sub sp, sp, t1
++  jr t0
++
++__riscv_save_1:
++__riscv_save_0:
++  addi sp, sp, -16
++  sd s0, 0(sp)
++  sd ra, 8(sp)
++  jr t0
++
++__riscv_restore_12:
++  ld s11, 8(sp)
++  addi sp, sp, 16
++
++__riscv_restore_11:
++__riscv_restore_10:
++  ld s10, 0(sp)
++  ld s9, 8(sp)
++  addi sp, sp, 16
++
++__riscv_restore_9:
++__riscv_restore_8:
++  ld s8, 0(sp)
++  ld s7, 8(sp)
++  addi sp, sp, 16
++
++__riscv_restore_7:
++__riscv_restore_6:
++  ld s6, 0(sp)
++  ld s5, 8(sp)
++  addi sp, sp, 16
++
++__riscv_restore_5:
++__riscv_restore_4:
++  ld s4, 0(sp)
++  ld s3, 8(sp)
++  addi sp, sp, 16
++
++__riscv_restore_3:
++__riscv_restore_2:
++  ld s2, 0(sp)
++  ld s1, 8(sp)
++  addi sp, sp, 16
++
++__riscv_restore_1:
++__riscv_restore_0:
++  ld s0, 0(sp)
++  ld ra, 8(sp)
++  addi sp, sp, 16
++  ret
++
++#else
++
++__riscv_save_12:
++  addi sp, sp, -64
++  li t1, 0
++  sw s11, 12(sp)
++  j .Ls10
++
++__riscv_save_11:
++__riscv_save_10:
++__riscv_save_9:
++__riscv_save_8:
++  addi sp, sp, -64
++  li t1, -16
++.Ls10:
++  sw s10, 16(sp)
++  sw s9, 20(sp)
++  sw s8, 24(sp)
++  sw s7, 28(sp)
++  j .Ls6
++
++__riscv_save_7:
++__riscv_save_6:
++__riscv_save_5:
++__riscv_save_4:
++  addi sp, sp, -64
++  li t1, -32
++.Ls6:
++  sw s6, 32(sp)
++  sw s5, 36(sp)
++  sw s4, 40(sp)
++  sw s3, 44(sp)
++  sw s2, 48(sp)
++  sw s1, 52(sp)
++  sw s0, 56(sp)
++  sw ra, 60(sp)
++  sub sp, sp, t1
++  jr t0
++
++__riscv_save_3:
++__riscv_save_2:
++__riscv_save_1:
++__riscv_save_0:
++  addi sp, sp, -16
++  sw s2, 0(sp)
++  sw s1, 4(sp)
++  sw s0, 8(sp)
++  sw ra, 12(sp)
++  jr t0
++
++__riscv_restore_12:
++  lw s11, 12(sp)
++  addi sp, sp, 16
++
++__riscv_restore_11:
++__riscv_restore_10:
++__riscv_restore_9:
++__riscv_restore_8:
++  lw s10, 0(sp)
++  lw s9, 4(sp)
++  lw s8, 8(sp)
++  lw s7, 12(sp)
++  addi sp, sp, 16
++
++__riscv_restore_7:
++__riscv_restore_6:
++__riscv_restore_5:
++__riscv_restore_4:
++  lw s6, 0(sp)
++  lw s5, 4(sp)
++  lw s4, 8(sp)
++  lw s3, 12(sp)
++  addi sp, sp, 16
++
++__riscv_restore_3:
++__riscv_restore_2:
++__riscv_restore_1:
++__riscv_restore_0:
++  lw s2, 0(sp)
++  lw s1, 4(sp)
++  lw s0, 8(sp)
++  lw ra, 12(sp)
++  addi sp, sp, 16
++  ret
++
++#endif
+diff --git original-gcc/libgcc/config/riscv/sfp-machine.h gcc-6.2.0/libgcc/config/riscv/sfp-machine.h
+new file mode 100644
+index 0000000..c1f90c4
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/sfp-machine.h
+@@ -0,0 +1,100 @@
++
++#ifdef __riscv32
++
++#define _FP_W_TYPE_SIZE		32
++#define _FP_W_TYPE		unsigned long
++#define _FP_WS_TYPE		signed long
++#define _FP_I_TYPE		long
++
++#define _FP_MUL_MEAT_S(R,X,Y)				\
++  _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_D(R,X,Y)				\
++  _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_Q(R,X,Y)				\
++  _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
++
++#define _FP_DIV_MEAT_S(R,X,Y)	_FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
++#define _FP_DIV_MEAT_D(R,X,Y)	_FP_DIV_MEAT_2_udiv(D,R,X,Y)
++#define _FP_DIV_MEAT_Q(R,X,Y)	_FP_DIV_MEAT_4_udiv(Q,R,X,Y)
++
++#define _FP_NANFRAC_S		((_FP_QNANBIT_S << 1) - 1)
++#define _FP_NANFRAC_D		((_FP_QNANBIT_D << 1) - 1), -1
++#define _FP_NANFRAC_Q		((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
++
++#else
++
++#define _FP_W_TYPE_SIZE		64
++#define _FP_W_TYPE		unsigned long long
++#define _FP_WS_TYPE		signed long long
++#define _FP_I_TYPE		long long
++
++#define _FP_MUL_MEAT_S(R,X,Y)					\
++  _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
++#define _FP_MUL_MEAT_D(R,X,Y)					\
++  _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_Q(R,X,Y)					\
++  _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
++
++#define _FP_DIV_MEAT_S(R,X,Y)	_FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
++#define _FP_DIV_MEAT_D(R,X,Y)	_FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
++#define _FP_DIV_MEAT_Q(R,X,Y)	_FP_DIV_MEAT_2_udiv(Q,R,X,Y)
++
++#define _FP_NANFRAC_S		((_FP_QNANBIT_S << 1) - 1)
++#define _FP_NANFRAC_D		((_FP_QNANBIT_D << 1) - 1)
++#define _FP_NANFRAC_Q		((_FP_QNANBIT_Q << 1) - 1), -1
++
++#endif
++
++#ifdef __riscv64
++typedef int TItype __attribute__ ((mode (TI)));
++typedef unsigned int UTItype __attribute__ ((mode (TI)));
++#define TI_BITS (__CHAR_BIT__ * (int)sizeof(TItype))
++#endif
++
++/* The type of the result of a floating point comparison.  This must
++   match __libgcc_cmp_return__ in GCC for the target.  */
++typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
++#define CMPtype __gcc_CMPtype
++
++#define _FP_NANSIGN_S		0
++#define _FP_NANSIGN_D		0
++#define _FP_NANSIGN_Q		0
++
++#define _FP_KEEPNANFRACP 1
++#define _FP_QNANNEGATEDP 0
++
++
++/* From my experiments it seems X is chosen unless one of the
++   NaNs is sNaN,  in which case the result is NANSIGN/NANFRAC.  */
++#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)			\
++  do {								\
++    if ((_FP_FRAC_HIGH_RAW_##fs(X) |				\
++	 _FP_FRAC_HIGH_RAW_##fs(Y)) & _FP_QNANBIT_##fs)		\
++      {								\
++	R##_s = _FP_NANSIGN_##fs;				\
++        _FP_FRAC_SET_##wc(R,_FP_NANFRAC_##fs);			\
++      }								\
++    else							\
++      {								\
++	R##_s = X##_s;						\
++        _FP_FRAC_COPY_##wc(R,X);				\
++      }								\
++    R##_c = FP_CLS_NAN;						\
++  } while (0)
++
++#define _FP_TININESS_AFTER_ROUNDING 0
++
++#define	__LITTLE_ENDIAN	1234
++#define	__BIG_ENDIAN	4321
++
++#if defined __big_endian__
++# define __BYTE_ORDER __BIG_ENDIAN
++#else
++# define __BYTE_ORDER __LITTLE_ENDIAN
++#endif
++
++
++/* Define ALIASNAME as a strong alias for NAME.  */
++# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
++# define _strong_alias(name, aliasname) \
++  extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+diff --git original-gcc/libgcc/config/riscv/t-elf gcc-6.2.0/libgcc/config/riscv/t-elf
+new file mode 100644
+index 0000000..01d5eba
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/t-elf
+@@ -0,0 +1,6 @@
++LIB2ADD += $(srcdir)/config/riscv/save-restore.S \
++	   $(srcdir)/config/riscv/muldi3.S \
++	   $(srcdir)/config/riscv/multi3.S \
++	   $(srcdir)/config/riscv/div.S \
++	   $(srcdir)/config/riscv/atomic.c \
++
+diff --git original-gcc/libgcc/config/riscv/t-elf32 gcc-6.2.0/libgcc/config/riscv/t-elf32
+new file mode 100644
+index 0000000..83363ce
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/t-elf32
+@@ -0,0 +1,4 @@
++LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
++
++HOST_LIBGCC2_CFLAGS += -m32
++CRTSTUFF_CFLAGS += -m32
+diff --git original-gcc/libgcc/config/riscv/t-elf64 gcc-6.2.0/libgcc/config/riscv/t-elf64
+new file mode 100644
+index 0000000..f375123
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/t-elf64
+@@ -0,0 +1 @@
++LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
+diff --git original-gcc/libgcc/config/riscv/t-softfp32 gcc-6.2.0/libgcc/config/riscv/t-softfp32
+new file mode 100644
+index 0000000..e69de29
+diff --git original-gcc/libgcc/config/riscv/t-softfp64 gcc-6.2.0/libgcc/config/riscv/t-softfp64
+new file mode 100644
+index 0000000..61a8bff
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/t-softfp64
+@@ -0,0 +1,4 @@
++softfp_float_modes += tf
++softfp_int_modes += ti
++softfp_extensions += sftf dftf
++softfp_truncations += tfsf tfdf
+diff --git original-gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_linux.cc
+index 2cefa20..76dd411 100644
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc
++++ gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_linux.cc
+@@ -1136,6 +1136,11 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+   *pc = ucontext->uc_mcontext.pc;
+   *bp = ucontext->uc_mcontext.gregs[30];
+   *sp = ucontext->uc_mcontext.gregs[29];
++# elif defined(__riscv__)
++  ucontext_t *ucontext = (ucontext_t*)context;
++  *pc = ucontext->uc_mcontext.gregs[REG_PC];
++  *bp = ucontext->uc_mcontext.gregs[REG_S0];
++  *sp = ucontext->uc_mcontext.gregs[REG_SP];
+ #else
+ # error "Unsupported arch"
+ #endif
+diff --git original-gcc/libsanitizer/sanitizer_common/sanitizer_platform.h gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform.h
+index 7d0ff28..cdd62d9 100644
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform.h
++++ gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform.h
+@@ -113,9 +113,9 @@
+ 
+ // The AArch64 linux port uses the canonical syscall set as mandated by
+ // the upstream linux community for all new ports. Other ports may still
+-// use legacy syscalls.
++// use legacy syscalls.  The RISC-V port also does this.
+ #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+-# if defined(__aarch64__) && SANITIZER_LINUX
++# if (defined(__aarch64__) || defined(__riscv__)) && SANITIZER_LINUX
+ # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
+ # else
+ # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
+diff --git original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+index a1f0432..6c25901 100644
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
++++ gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+@@ -61,7 +61,8 @@ namespace __sanitizer {
+ }  // namespace __sanitizer
+ 
+ #if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
+-                            && !defined(__mips__) && !defined(__sparc__)
++                            && !defined(__mips__) && !defined(__sparc__)\
++                            && !defined(__riscv__)
+ COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
+ #endif
+ 
+diff --git original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+index b6f90eb..3aa9338 100644
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
++++ gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+@@ -81,6 +81,9 @@ namespace __sanitizer {
+   const unsigned struct_kernel_stat_sz = 144;
+   #endif
+   const unsigned struct_kernel_stat64_sz = 104;
++#elif defined(__riscv__)
++  const unsigned struct_kernel_stat_sz = 128;
++  const unsigned struct_kernel_stat64_sz = 128;
+ #elif defined(__sparc__) && defined(__arch64__)
+   const unsigned struct___old_kernel_stat_sz = 0;
+   const unsigned struct_kernel_stat_sz = 104;
+@@ -109,7 +112,7 @@ namespace __sanitizer {
+ 
+ #if SANITIZER_LINUX || SANITIZER_FREEBSD
+ 
+-#if defined(__powerpc64__)
++#if defined(__powerpc64__) || defined(__riscv__)
+   const unsigned struct___old_kernel_stat_sz = 0;
+ #elif !defined(__sparc__)
+   const unsigned struct___old_kernel_stat_sz = 32;
+@@ -532,7 +535,7 @@ namespace __sanitizer {
+   typedef long __sanitizer___kernel_off_t;
+ #endif
+ 
+-#if defined(__powerpc__) || defined(__mips__)
++#if defined(__powerpc__) || defined(__mips__) || defined(__riscv__)
+   typedef unsigned int __sanitizer___kernel_old_uid_t;
+   typedef unsigned int __sanitizer___kernel_old_gid_t;
+ #else
diff --git a/util/crossgcc/patches/gdb-7.11_amd64.patch b/util/crossgcc/patches/gdb-7.11_amd64.patch
deleted file mode 100644
index ef6b260..0000000
--- a/util/crossgcc/patches/gdb-7.11_amd64.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-diff -urN gdb-7.11.orig/gdb/configure.tgt gdb-7.11/gdb/configure.tgt
---- gdb-7.11.orig/gdb/configure.tgt	2016-02-09 19:19:39.000000000 -0800
-+++ gdb-7.11/gdb/configure.tgt	2016-04-21 17:42:32.628433139 -0700
-@@ -681,6 +681,11 @@
- 			i387-tdep.o i386bsd-tdep.o i386obsd-tdep.o \
- 			obsd-tdep.o bsd-uthread.o solib-svr4.o"
- 	;;
-+x86_64-*-*)
-+	# Target: amd64
-+	gdb_target_obs="amd64-tdep.o i386-tdep.o i387-tdep.o"
-+	;;
-+
- xtensa*-*-linux*)	gdb_target=linux
- 	# Target: GNU/Linux Xtensa
- 	gdb_target_obs="xtensa-tdep.o xtensa-config.o xtensa-linux-tdep.o \
diff --git a/util/crossgcc/patches/gdb-7.11_no-doc.patch b/util/crossgcc/patches/gdb-7.11_no-doc.patch
deleted file mode 100644
index 6cd5e49..0000000
--- a/util/crossgcc/patches/gdb-7.11_no-doc.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -urN gdb-7.11.orig/gdb/Makefile.in gdb-7.11/gdb/Makefile.in
---- gdb-7.11.orig/gdb/Makefile.in	2016-02-24 01:55:15.000000000 -0800
-+++ gdb-7.11/gdb/Makefile.in	2016-04-21 17:44:32.721472633 -0700
-@@ -1092,7 +1092,7 @@
- 
- TSOBS = inflow.o
- 
--SUBDIRS = doc @subdirs@ data-directory $(GNULIB_BUILDDIR)
-+SUBDIRS = @subdirs@ data-directory $(GNULIB_BUILDDIR)
- CLEANDIRS = $(SUBDIRS)
- 
- # List of subdirectories in the build tree that must exist.
diff --git a/util/crossgcc/patches/gdb-7.11_pythonhome.patch b/util/crossgcc/patches/gdb-7.11_pythonhome.patch
deleted file mode 100644
index 9bf88be..0000000
--- a/util/crossgcc/patches/gdb-7.11_pythonhome.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-diff -urN gdb-7.11.orig/gdb/python/python.c gdb-7.11/gdb/python/python.c
---- gdb-7.11.orig/gdb/python/python.c	2016-02-09 19:19:39.000000000 -0800
-+++ gdb-7.11/gdb/python/python.c	2016-04-21 17:45:39.119833428 -0700
-@@ -1748,6 +1748,15 @@
- #endif
- #endif
- 
-+  char readlinkbuffer[BUFSIZ];
-+  int readlinks = readlink("/proc/self/exe", readlinkbuffer, BUFSIZ - 1);
-+  readlinkbuffer[readlinks] = 0;
-+  char *executeablepath = dirname(readlinkbuffer);
-+  char *pythonhome = malloc(strlen(executeablepath) + strlen("/../") + 2);
-+  strcpy(pythonhome, executeablepath);
-+  strcat(pythonhome, "/../");
-+  setenv("PYTHONHOME", pythonhome, 1);
-+
-   Py_Initialize ();
-   PyEval_InitThreads ();
- 
diff --git a/util/crossgcc/patches/gdb-7.12_amd64.patch b/util/crossgcc/patches/gdb-7.12_amd64.patch
new file mode 100644
index 0000000..39e6fab
--- /dev/null
+++ b/util/crossgcc/patches/gdb-7.12_amd64.patch
@@ -0,0 +1,15 @@
+diff -urN gdb-7.12.orig/gdb/configure.tgt gdb-7.12/gdb/configure.tgt
+--- gdb-7.12.orig/gdb/configure.tgt	2016-02-09 19:19:39.000000000 -0800
++++ gdb-7.12/gdb/configure.tgt	2016-04-21 17:42:32.628433139 -0700
+@@ -681,6 +681,11 @@
+ 			i387-tdep.o i386bsd-tdep.o i386obsd-tdep.o \
+ 			obsd-tdep.o bsd-uthread.o solib-svr4.o"
+ 	;;
++x86_64-*-*)
++	# Target: amd64
++	gdb_target_obs="amd64-tdep.o i386-tdep.o i387-tdep.o"
++	;;
++
+ xtensa*-*-linux*)	gdb_target=linux
+ 	# Target: GNU/Linux Xtensa
+ 	gdb_target_obs="xtensa-tdep.o xtensa-config.o xtensa-linux-tdep.o \
diff --git a/util/crossgcc/patches/gdb-7.12_no-doc.patch b/util/crossgcc/patches/gdb-7.12_no-doc.patch
new file mode 100644
index 0000000..2c5e571
--- /dev/null
+++ b/util/crossgcc/patches/gdb-7.12_no-doc.patch
@@ -0,0 +1,12 @@
+diff -urN gdb-7.12.orig/gdb/Makefile.in gdb-7.12/gdb/Makefile.in
+--- gdb-7.12.orig/gdb/Makefile.in	2016-02-24 01:55:15.000000000 -0800
++++ gdb-7.12/gdb/Makefile.in	2016-04-21 17:44:32.721472633 -0700
+@@ -1092,7 +1092,7 @@
+ 
+ TSOBS = inflow.o
+ 
+-SUBDIRS = doc @subdirs@ data-directory $(GNULIB_BUILDDIR)
++SUBDIRS = @subdirs@ data-directory $(GNULIB_BUILDDIR)
+ CLEANDIRS = $(SUBDIRS)
+ 
+ # List of subdirectories in the build tree that must exist.
diff --git a/util/crossgcc/patches/gdb-7.12_pythonhome.patch b/util/crossgcc/patches/gdb-7.12_pythonhome.patch
new file mode 100644
index 0000000..aea8c02
--- /dev/null
+++ b/util/crossgcc/patches/gdb-7.12_pythonhome.patch
@@ -0,0 +1,19 @@
+diff -urN gdb-7.12.orig/gdb/python/python.c gdb-7.12/gdb/python/python.c
+--- gdb-7.12.orig/gdb/python/python.c	2016-02-09 19:19:39.000000000 -0800
++++ gdb-7.12/gdb/python/python.c	2016-04-21 17:45:39.119833428 -0700
+@@ -1748,6 +1748,15 @@
+ #endif
+ #endif
+ 
++  char readlinkbuffer[BUFSIZ];
++  int readlinks = readlink("/proc/self/exe", readlinkbuffer, BUFSIZ - 1);
++  readlinkbuffer[readlinks] = 0;
++  char *executeablepath = dirname(readlinkbuffer);
++  char *pythonhome = malloc(strlen(executeablepath) + strlen("/../") + 2);
++  strcpy(pythonhome, executeablepath);
++  strcat(pythonhome, "/../");
++  setenv("PYTHONHOME", pythonhome, 1);
++
+   Py_Initialize ();
+   PyEval_InitThreads ();
+ 
diff --git a/util/crossgcc/sum/acpica-unix2-20160831.tar.gz.cksum b/util/crossgcc/sum/acpica-unix2-20160831.tar.gz.cksum
deleted file mode 100644
index d7e538b..0000000
--- a/util/crossgcc/sum/acpica-unix2-20160831.tar.gz.cksum
+++ /dev/null
@@ -1 +0,0 @@
-7e7449f15a195fefd72b65b1671df18e4dccf665  tarballs/acpica-unix2-20160831.tar.gz
diff --git a/util/crossgcc/sum/acpica-unix2-20161222.tar.gz.cksum b/util/crossgcc/sum/acpica-unix2-20161222.tar.gz.cksum
new file mode 100644
index 0000000..d857678
--- /dev/null
+++ b/util/crossgcc/sum/acpica-unix2-20161222.tar.gz.cksum
@@ -0,0 +1 @@
+73e57d4d558c9bc831165c71adbff577b526f256  tarballs/acpica-unix2-20161222.tar.gz
diff --git a/util/crossgcc/sum/binutils-2.26.1.tar.bz2.cksum b/util/crossgcc/sum/binutils-2.26.1.tar.bz2.cksum
deleted file mode 100644
index 4bd0bce..0000000
--- a/util/crossgcc/sum/binutils-2.26.1.tar.bz2.cksum
+++ /dev/null
@@ -1 +0,0 @@
-624cd377e3a8eef3db83a56ce289a60f556b3ec2  tarballs/binutils-2.26.1.tar.bz2
diff --git a/util/crossgcc/sum/binutils-2.27.tar.bz2.cksum b/util/crossgcc/sum/binutils-2.27.tar.bz2.cksum
new file mode 100644
index 0000000..a06cecd
--- /dev/null
+++ b/util/crossgcc/sum/binutils-2.27.tar.bz2.cksum
@@ -0,0 +1 @@
+6e472ddae565a2b1447e6f2393809bb8799982cf  tarballs/binutils-2.27.tar.bz2
diff --git a/util/crossgcc/sum/gcc-5.3.0.tar.bz2.cksum b/util/crossgcc/sum/gcc-5.3.0.tar.bz2.cksum
deleted file mode 100644
index bb05e39..0000000
--- a/util/crossgcc/sum/gcc-5.3.0.tar.bz2.cksum
+++ /dev/null
@@ -1 +0,0 @@
-0612270b103941da08376df4d0ef4e5662a2e9eb  tarballs/gcc-5.3.0.tar.bz2
diff --git a/util/crossgcc/sum/gcc-6.3.0.tar.bz2.cksum b/util/crossgcc/sum/gcc-6.3.0.tar.bz2.cksum
new file mode 100644
index 0000000..f3bb227
--- /dev/null
+++ b/util/crossgcc/sum/gcc-6.3.0.tar.bz2.cksum
@@ -0,0 +1 @@
+928ab552666ee08eed645ff20ceb49d139205dea  tarballs/gcc-6.3.0.tar.bz2
diff --git a/util/crossgcc/sum/gdb-7.11.tar.xz.cksum b/util/crossgcc/sum/gdb-7.11.tar.xz.cksum
deleted file mode 100644
index ffe5a1c..0000000
--- a/util/crossgcc/sum/gdb-7.11.tar.xz.cksum
+++ /dev/null
@@ -1 +0,0 @@
-466208d771d97d3dfcf965d5c835a669cff8d847  tarballs/gdb-7.11.tar.xz
diff --git a/util/crossgcc/sum/gdb-7.12.tar.xz.cksum b/util/crossgcc/sum/gdb-7.12.tar.xz.cksum
new file mode 100644
index 0000000..61334c9
--- /dev/null
+++ b/util/crossgcc/sum/gdb-7.12.tar.xz.cksum
@@ -0,0 +1 @@
+1a6a0f2fe04d6ac9ba85048af35610e0fc217300  tarballs/gdb-7.12.tar.xz
diff --git a/util/crossgcc/sum/gmp-6.1.0.tar.xz.cksum b/util/crossgcc/sum/gmp-6.1.0.tar.xz.cksum
deleted file mode 100644
index 348b80f..0000000
--- a/util/crossgcc/sum/gmp-6.1.0.tar.xz.cksum
+++ /dev/null
@@ -1 +0,0 @@
-99d691607613e749aa5d7c0c2a89aeab38fec070  tarballs/gmp-6.1.0.tar.xz
diff --git a/util/crossgcc/sum/gmp-6.1.2.tar.xz.cksum b/util/crossgcc/sum/gmp-6.1.2.tar.xz.cksum
new file mode 100644
index 0000000..774658c
--- /dev/null
+++ b/util/crossgcc/sum/gmp-6.1.2.tar.xz.cksum
@@ -0,0 +1 @@
+9dc6981197a7d92f339192eea974f5eca48fcffe  tarballs/gmp-6.1.2.tar.xz
diff --git a/util/crossgcc/sum/mpfr-3.1.4.tar.xz.cksum b/util/crossgcc/sum/mpfr-3.1.4.tar.xz.cksum
deleted file mode 100644
index 90f90eb..0000000
--- a/util/crossgcc/sum/mpfr-3.1.4.tar.xz.cksum
+++ /dev/null
@@ -1 +0,0 @@
-cedc0055d55b6ee4cd17e1e6119ed412520ff81a  tarballs/mpfr-3.1.4.tar.xz
diff --git a/util/crossgcc/sum/mpfr-3.1.5.tar.xz.cksum b/util/crossgcc/sum/mpfr-3.1.5.tar.xz.cksum
new file mode 100644
index 0000000..c2ae697
--- /dev/null
+++ b/util/crossgcc/sum/mpfr-3.1.5.tar.xz.cksum
@@ -0,0 +1 @@
+c0fab77c6da4cb710c81cc04092fb9bea11a9403  tarballs/mpfr-3.1.5.tar.xz



More information about the coreboot-gerrit mailing list