Thread View
j
: Next unread message
k
: Previous unread message
j a
: Jump to all threads
j l
: Jump to MailingList overview
HAOUAS Elyes (ehaouas(a)noos.fr) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/17191
-gerrit
commit 3c2cb320fefda4299bdd927c936bcbffe272655c
Author: Elyes HAOUAS <ehaouas(a)noos.fr>
Date: Sun Oct 30 18:30:21 2016 +0100
nb/intel/i945/early_init.c: Add DDR2-667 detection for 945GC
Change-Id: I3d54c88af897a71db757d00288f3968ed2c19151
Signed-off-by: Elyes HAOUAS <ehaouas(a)noos.fr>
---
src/northbridge/intel/i945/early_init.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/src/northbridge/intel/i945/early_init.c b/src/northbridge/intel/i945/early_init.c
index 4373167..17fd5a4 100644
--- a/src/northbridge/intel/i945/early_init.c
+++ b/src/northbridge/intel/i945/early_init.c
@@ -132,6 +132,9 @@ static void i945_detect_chipset(void)
case 0:
printk(BIOS_DEBUG, "up to DDR2-667");
break;
+ case 2:
+ printk(BIOS_DEBUG, "up to DDR2-667");
+ break;
case 3:
printk(BIOS_DEBUG, "up to DDR2-533");
break;
HAOUAS Elyes (ehaouas(a)noos.fr) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/17191
-gerrit
commit 5ec99d0e9b37b23c7641cf8fa005d05c8dca2b54
Author: Elyes HAOUAS <ehaouas(a)noos.fr>
Date: Sun Oct 30 18:30:21 2016 +0100
nb/intel/i945/early_init.c: Add DDR2-667 detection for 945GC
Change-Id: I3d54c88af897a71db757d00288f3968ed2c19151
Signed-off-by: Elyes HAOUAS <ehaouas(a)noos.fr>
---
3rdparty/blobs | 2 +-
3rdparty/chromeec | 2 +-
3rdparty/vboot | 2 +-
src/northbridge/intel/i945/early_init.c | 3 +++
4 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/3rdparty/blobs b/3rdparty/blobs
index 8ad2d63..b0eeddd 160000
--- a/3rdparty/blobs
+++ b/3rdparty/blobs
@@ -1 +1 @@
-Subproject commit 8ad2d6385652e14b6f0d35ab9b474c31ddeb1773
+Subproject commit b0eeddd4f5c583818e66521f2552cd3448b357b2
diff --git a/3rdparty/chromeec b/3rdparty/chromeec
index ea1a869..83b6d69 160000
--- a/3rdparty/chromeec
+++ b/3rdparty/chromeec
@@ -1 +1 @@
-Subproject commit ea1a8699e96425806abdd532d04da254ae093f6e
+Subproject commit 83b6d69732f782e2b295153f959ec36d4a56c024
diff --git a/3rdparty/vboot b/3rdparty/vboot
index ea72ee4..46b77fb 160000
--- a/3rdparty/vboot
+++ b/3rdparty/vboot
@@ -1 +1 @@
-Subproject commit ea72ee454aea5e0f378275fe7114cf683b7db938
+Subproject commit 46b77fb2f04941c869c3a98cd17e9209c36b2917
diff --git a/src/northbridge/intel/i945/early_init.c b/src/northbridge/intel/i945/early_init.c
index 4373167..17fd5a4 100644
--- a/src/northbridge/intel/i945/early_init.c
+++ b/src/northbridge/intel/i945/early_init.c
@@ -132,6 +132,9 @@ static void i945_detect_chipset(void)
case 0:
printk(BIOS_DEBUG, "up to DDR2-667");
break;
+ case 2:
+ printk(BIOS_DEBUG, "up to DDR2-667");
+ break;
case 3:
printk(BIOS_DEBUG, "up to DDR2-533");
break;
Lijian Zhao (lijian.zhao(a)intel.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/17181
-gerrit
commit ba901334e92f1e5a0585980a0b9b4e28eb4282ab
Author: Lijian Zhao <lijian.zhao(a)intel.com>
Date: Fri Oct 28 11:01:09 2016 -0700
soc/intel/apollolake: Add pmc_ipc device support
A dedicated pmc_ipc DSDT entry is required for pmc_ipc kernel driver.
The ACPI mode entry includes resources for PMC_IPC1, SRAM, ACPI IO and
Punit Mailbox.
BRANCH=None
BUG=chrome-os-partner:57364
TEST=Boot up into OS successfully and check with dmesg to see the
driver has been loaded successfully without errors.
Change-Id: Ib0a300febe1e7fc1796bfeca1a04493f932640e1
Signed-off-by: Lijian Zhao <lijian.zhao(a)intel.com>
---
src/soc/intel/apollolake/acpi/pmc_ipc.asl | 61 +++++++++++++++++++++++++++
src/soc/intel/apollolake/acpi/southbridge.asl | 3 ++
2 files changed, 64 insertions(+)
diff --git a/src/soc/intel/apollolake/acpi/pmc_ipc.asl b/src/soc/intel/apollolake/acpi/pmc_ipc.asl
new file mode 100644
index 0000000..0e8e751
--- /dev/null
+++ b/src/soc/intel/apollolake/acpi/pmc_ipc.asl
@@ -0,0 +1,61 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2016 Intel Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/iomap.h>
+
+#define MAILBOX_DATA 0x7080
+#define MAILBOX_INTF 0x7084
+#define PMIO_LENGTH 0x80
+#define PMIO_LIMIT 0x480
+
+scope (\_SB) {
+ Device (IPC1)
+ {
+ Name (_HID, "INT34D2")
+ Name (_CID, "INT34D2")
+ Name (_DDN, "Intel(R) IPC1 Controller")
+ Name (RBUF, ResourceTemplate ()
+ {
+ Memory32Fixed (ReadWrite, 0x0, 0x2000, IBAR)
+ Memory32Fixed (ReadWrite, 0x0, 0x4, MDAT)
+ Memory32Fixed (ReadWrite, 0x0, 0x4, MINF)
+ IO (Decode16, ACPI_PMIO_BASE, PMIO_LIMIT,
+ 0x04, PMIO_LENGTH)
+ Memory32Fixed (ReadWrite, 0x0, 0x2000, SBAR)
+ Interrupt (ResourceConsumer, Level, ActiveLow, Exclusive, , , )
+ {
+ PMC_INT
+ }
+ })
+
+ Method (_CRS, 0x0, NotSerialized)
+ {
+ CreateDwordField (^RBUF, ^IBAR._BAS, IBAS)
+ Store (PMC_BAR0, IBAS)
+
+ CreateDwordField (^RBUF, ^MDAT._BAS, MDBA)
+ Store (MCH_BASE_ADDR + MAILBOX_DATA, MDBA)
+ CreateDwordField (^RBUF, ^MINF._BAS, MIBA)
+ Store (MCH_BASE_ADDR + MAILBOX_INTF, MIBA)
+
+ CreateDwordField (^RBUF, ^SBAR._BAS, SBAS)
+ Store (PMC_SRAM_BASE_0, SBAS)
+
+ Return (^RBUF)
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/src/soc/intel/apollolake/acpi/southbridge.asl b/src/soc/intel/apollolake/acpi/southbridge.asl
index 1c10f1a..e3ee1ae 100644
--- a/src/soc/intel/apollolake/acpi/southbridge.asl
+++ b/src/soc/intel/apollolake/acpi/southbridge.asl
@@ -46,5 +46,8 @@ Scope (\_SB)
/* eMMC */
#include "scs.asl"
+/* PMC IPC controller */
+#include "pmc_ipc.asl"
+
/* PCI _OSC */
#include <soc/intel/common/acpi/pci_osc.asl>
Iru Cai (mytbk920423(a)gmail.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/17190
-gerrit
commit e7e0fe46bd5b253e742d028a82809029e461d1e5
Author: Iru Cai <mytbk920423(a)gmail.com>
Date: Sat Oct 29 23:37:42 2016 +0800
buildgcc: Update GCC to 6.2.0, and update GMP and MPFR
For RISC-V, I rebase riscv-gnu-toolchain on gcc-6_2_0-release and make a diff:
git diff --src-prefix=original-gcc/ --dst-prefix=gcc-6.2.0/ gcc-6_2_0-release
Change-Id: I33b3494d4cf2c8b093480de61ef4d5c074e9ea52
Signed-off-by: Iru Cai <mytbk920423(a)gmail.com>
---
util/crossgcc/buildgcc | 10 +-
util/crossgcc/patches/gcc-5.3.0_elf_biarch.patch | 87 -
util/crossgcc/patches/gcc-5.3.0_gnat.patch | 11 -
util/crossgcc/patches/gcc-5.3.0_libgcc.patch | 57 -
util/crossgcc/patches/gcc-5.3.0_nds32.patch | 17 -
util/crossgcc/patches/gcc-5.3.0_riscv.patch | 10122 --------------------
util/crossgcc/patches/gcc-6.2.0_elf_biarch.patch | 87 +
util/crossgcc/patches/gcc-6.2.0_gnat.patch | 11 +
util/crossgcc/patches/gcc-6.2.0_libgcc.patch | 57 +
util/crossgcc/patches/gcc-6.2.0_nds32.patch | 17 +
util/crossgcc/patches/gcc-6.2.0_riscv.patch | 10428 +++++++++++++++++++++
util/crossgcc/sum/gcc-5.3.0.tar.bz2.cksum | 1 -
util/crossgcc/sum/gcc-6.2.0.tar.bz2.cksum | 1 +
util/crossgcc/sum/gmp-6.1.0.tar.xz.cksum | 1 -
util/crossgcc/sum/gmp-6.1.1.tar.xz.cksum | 1 +
util/crossgcc/sum/mpfr-3.1.4.tar.xz.cksum | 1 -
util/crossgcc/sum/mpfr-3.1.5.tar.xz.cksum | 1 +
17 files changed, 10608 insertions(+), 10302 deletions(-)
diff --git a/util/crossgcc/buildgcc b/util/crossgcc/buildgcc
index 0bd344b..fa9e66e 100755
--- a/util/crossgcc/buildgcc
+++ b/util/crossgcc/buildgcc
@@ -18,8 +18,8 @@
cd $(dirname $0)
-CROSSGCC_DATE="August 31st, 2016"
-CROSSGCC_VERSION="1.43"
+CROSSGCC_DATE="October 29th, 2016"
+CROSSGCC_VERSION="1.44"
CROSSGCC_COMMIT=$( git describe )
# default settings
@@ -34,11 +34,11 @@ SKIPPYTHON=1
BOOTSTRAP=0
# GCC toolchain version numbers
-GMP_VERSION=6.1.0
-MPFR_VERSION=3.1.4
+GMP_VERSION=6.1.1
+MPFR_VERSION=3.1.5
MPC_VERSION=1.0.3
LIBELF_VERSION=0.8.13
-GCC_VERSION=5.3.0
+GCC_VERSION=6.2.0
GCC_AUTOCONF_VERSION=2.69
BINUTILS_VERSION=2.27
GDB_VERSION=7.11
diff --git a/util/crossgcc/patches/gcc-5.3.0_elf_biarch.patch b/util/crossgcc/patches/gcc-5.3.0_elf_biarch.patch
deleted file mode 100644
index 574e151..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_elf_biarch.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-diff -urN gcc-4.9.2/gcc/config/i386/t-elf64 gcc-4.9.2/gcc/config/i386/t-elf64
---- gcc-4.9.2/gcc/config/i386/t-elf64 1969-12-31 16:00:00.000000000 -0800
-+++ gcc-5.3.0/gcc/config/i386/t-elf64 2015-06-17 11:20:08.032513005 -0700
-@@ -0,0 +1,38 @@
-+# Copyright (C) 2002-2014 Free Software Foundation, Inc.
-+#
-+# This file is part of GCC.
-+#
-+# GCC is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 3, or (at your option)
-+# any later version.
-+#
-+# GCC is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with GCC; see the file COPYING3. If not see
-+# <http://www.gnu.org/licenses/ >.
-+
-+# On Debian, Ubuntu and other derivative distributions, the 32bit libraries
-+# are found in /lib32 and /usr/lib32, /lib64 and /usr/lib64 are symlinks to
-+# /lib and /usr/lib, while other distributions install libraries into /lib64
-+# and /usr/lib64. The LSB does not enforce the use of /lib64 and /usr/lib64,
-+# it doesn't tell anything about the 32bit libraries on those systems. Set
-+# MULTILIB_OSDIRNAMES according to what is found on the target.
-+
-+# To support i386, x86-64 and x32 libraries, the directory structrue
-+# should be:
-+#
-+# /lib has i386 libraries.
-+# /lib64 has x86-64 libraries.
-+# /libx32 has x32 libraries.
-+#
-+comma=,
-+MULTILIB_OPTIONS = $(subst $(comma),/,$(TM_MULTILIB_CONFIG))
-+MULTILIB_DIRNAMES = $(patsubst m%, %, $(subst /, ,$(MULTILIB_OPTIONS)))
-+MULTILIB_OSDIRNAMES = m64=../lib64$(call if_multiarch,:x86_64-elf)
-+MULTILIB_OSDIRNAMES+= m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:i386-elf)
-+MULTILIB_OSDIRNAMES+= mx32=../libx32$(call if_multiarch,:x86_64-elf-x32)
-diff -urN gcc-4.9.2/gcc/config.gcc gcc-4.9.2/gcc/config.gcc
---- gcc-4.9.2/gcc/config.gcc 2015-06-17 11:20:57.841008182 -0700
-+++ gcc-5.3.0/gcc/config.gcc 2015-06-17 11:17:24.818890200 -0700
-@@ -1353,6 +1353,30 @@
- ;;
- x86_64-*-elf*)
- tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h"
-+ tmake_file="${tmake_file} i386/t-elf64"
-+ x86_multilibs="${with_multilib_list}"
-+ if test "$x86_multilibs" = "default"; then
-+ case ${with_abi} in
-+ x32 | mx32)
-+ x86_multilibs="mx32"
-+ ;;
-+ *)
-+ x86_multilibs="m64,m32"
-+ ;;
-+ esac
-+ fi
-+ x86_multilibs=`echo $x86_multilibs | sed -e 's/,/ /g'`
-+ for x86_multilib in ${x86_multilibs}; do
-+ case ${x86_multilib} in
-+ m32 | m64 | mx32)
-+ TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG},${x86_multilib}"
-+ ;;
-+ *)
-+ echo "--with-multilib-list=${x86_with_multilib} not supported."
-+ exit 1
-+ esac
-+ done
-+ TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'`
- ;;
- i[34567]86-*-rdos*)
- tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/rdos.h"
---- gcc-5.3.0/gcc/config/i386/x86-64.h.orig 2015-08-20 17:17:34.555919593 +0200
-+++ gcc-5.3.0/gcc/config/i386/x86-64.h 2015-08-20 17:17:42.615908670 +0200
-@@ -49,7 +49,7 @@
- #define WCHAR_TYPE_SIZE 32
-
- #undef ASM_SPEC
--#define ASM_SPEC "%{m32:--32} %{m64:--64} %{mx32:--x32}"
-+#define ASM_SPEC "%{m16|m32:--32} %{m64:--64} %{mx32:--x32}"
-
- #undef ASM_OUTPUT_ALIGNED_BSS
- #define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
diff --git a/util/crossgcc/patches/gcc-5.3.0_gnat.patch b/util/crossgcc/patches/gcc-5.3.0_gnat.patch
deleted file mode 100644
index 167d118..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_gnat.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- gcc-5.3.0/gcc/ada/gcc-interface/Make-lang.in.bak 2015-08-24 16:23:25.004493665 +0200
-+++ gcc-5.3.0/gcc/ada/gcc-interface/Make-lang.in 2015-08-24 17:53:52.496636113 +0200
-@@ -45,7 +45,7 @@
-
-
- # Extra flags to pass to recursive makes.
--COMMON_ADAFLAGS= -gnatpg
-+COMMON_ADAFLAGS= -gnatpg -gnatwG
- ifeq ($(TREECHECKING),)
- CHECKING_ADAFLAGS=
- else
diff --git a/util/crossgcc/patches/gcc-5.3.0_libgcc.patch b/util/crossgcc/patches/gcc-5.3.0_libgcc.patch
deleted file mode 100644
index fd4b254..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_libgcc.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-diff -urN gcc-5.2.0.orig/libgcc/config/t-hardfp gcc-5.2.0/libgcc/config/t-hardfp
---- gcc-5.2.0.orig/libgcc/config/t-hardfp 2015-01-05 04:33:28.000000000 -0800
-+++ gcc-5.3.0/libgcc/config/t-hardfp 2016-04-06 12:04:51.000000000 -0700
-@@ -59,21 +59,52 @@
-
- hardfp_func_list := $(filter-out $(hardfp_exclusions),$(hardfp_func_list))
-
-+HOST_OS ?= $(shell uname)
-+
- # Regexp for matching a floating-point mode.
-+ifeq ($(HOST_OS), Darwin)
-+hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /|/g')
-+else
-+ifeq ($(HOST_OS), FreeBSD)
-+hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /|/g')
-+else
- hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /\\|/g')
-+endif
-+endif
-
- # Regexp for matching the end of a function name, after the last
- # floating-point mode.
-+ifeq ($(HOST_OS), Darwin)
-+hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /|/g')
-+else
-+ifeq ($(HOST_OS), FreeBSD)
-+hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /|/g')
-+else
- hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /\\|/g')
-+endif
-+endif
-
- # Add -D options to define:
- # FUNC: the function name (e.g. __addsf3)
- # OP: the function name without the leading __ and with the last
- # floating-point mode removed (e.g. add3)
- # TYPE: the last floating-point mode (e.g. sf)
-+
-+ifeq ($(HOST_OS), Darwin)
- hardfp_defines_for = \
- $(shell echo $1 | \
-- sed 's/\(.*\)\($(hardfp_mode_regexp)\)\($(hardfp_suffix_regexp)\|\)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
-+ sed -E 's/(.*)($(hardfp_mode_regexp))($(hardfp_suffix_regexp)|.*)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
-+else
-+ifeq ($(HOST_OS), FreeBSD)
-+hardfp_defines_for = \
-+ $(shell echo $1 | \
-+ sed -r 's/(.*)($(hardfp_mode_regexp))($(hardfp_suffix_regexp)|.*)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
-+else
-+hardfp_defines_for = \
-+ $(shell echo $1 | \
-+ sed 's/\(.*\)\($(hardfp_mode_regexp)\)\($(hardfp_suffix_regexp)\|\)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
-+endif
-+endif
-
- hardfp-o = $(patsubst %,%$(objext),$(hardfp_func_list))
- $(hardfp-o): %$(objext): $(srcdir)/config/hardfp.c
diff --git a/util/crossgcc/patches/gcc-5.3.0_nds32.patch b/util/crossgcc/patches/gcc-5.3.0_nds32.patch
deleted file mode 100644
index 34f2573..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_nds32.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-diff -urN gcc-5.3.0.orig/gcc/config/nds32/nds32.md gcc-5.3.0/gcc/config/nds32/nds32.md
---- gcc-5.3.0.orig/gcc/config/nds32/nds32.md 2015-01-15 22:45:09.000000000 -0800
-+++ gcc-5.3.0/gcc/config/nds32/nds32.md 2016-04-14 22:09:09.000000000 -0700
-@@ -2289,11 +2289,11 @@
- emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2],
- operands[4]));
-
-- operands[5] = gen_reg_rtx (SImode);
-+ rtx tmp = gen_reg_rtx (SImode);
- /* Step C, D, E, and F, using another temporary register operands[5]. */
- emit_jump_insn (gen_casesi_internal (operands[0],
- operands[3],
-- operands[5]));
-+ tmp));
- DONE;
- })
-
diff --git a/util/crossgcc/patches/gcc-5.3.0_riscv.patch b/util/crossgcc/patches/gcc-5.3.0_riscv.patch
deleted file mode 100644
index 7e2e828..0000000
--- a/util/crossgcc/patches/gcc-5.3.0_riscv.patch
+++ /dev/null
@@ -1,10122 +0,0 @@
---- original-gcc/gcc/config.gcc
-+++ gcc-5.3.0/gcc/config.gcc
-@@ -439,6 +439,10 @@ powerpc*-*-*)
- esac
- extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
- ;;
-+riscv*)
-+ cpu_type=riscv
-+ need_64bit_hwint=yes
-+ ;;
- rs6000*-*-*)
- extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
- ;;
-@@ -1982,6 +1986,34 @@ microblaze*-*-elf)
- cxx_target_objs="${cxx_target_objs} microblaze-c.o"
- tmake_file="${tmake_file} microblaze/t-microblaze"
- ;;
-+riscv32*-*-linux*)
-+ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h riscv/default-32.h ${tm_file} riscv/linux.h riscv/linux64.h"
-+ tmake_file="${tmake_file} riscv/t-linux64"
-+ gnu_ld=yes
-+ gas=yes
-+ gcc_cv_initfini_array=yes
-+ ;;
-+riscv*-*-linux*)
-+ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h riscv/linux64.h"
-+ tmake_file="${tmake_file} riscv/t-linux64"
-+ gnu_ld=yes
-+ gas=yes
-+ gcc_cv_initfini_array=yes
-+ ;;
-+riscv32*-*-elf*)
-+ tm_file="elfos.h newlib-stdint.h riscv/default-32.h ${tm_file} riscv/elf.h"
-+ tmake_file="${tmake_file} riscv/t-elf"
-+ gnu_ld=yes
-+ gas=yes
-+ gcc_cv_initfini_array=yes
-+ ;;
-+riscv*-*-elf*)
-+ tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
-+ tmake_file="${tmake_file} riscv/t-elf"
-+ gnu_ld=yes
-+ gas=yes
-+ gcc_cv_initfini_array=yes
-+ ;;
- mips*-*-netbsd*) # NetBSD/mips, either endian.
- target_cpu_default="MASK_ABICALLS"
- tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
-@@ -3866,6 +3898,31 @@ case "${target}" in
- done
- ;;
-
-+ riscv*-*-*)
-+ supported_defaults="abi arch arch_32 arch_64 float tune tune_32 tune_64"
-+
-+ case ${with_float} in
-+ "" | soft | hard)
-+ # OK
-+ ;;
-+ *)
-+ echo "Unknown floating point type used in --with-float=$with_float" 1>&2
-+ exit 1
-+ ;;
-+ esac
-+
-+ case ${with_abi} in
-+ "" | 32 | 64)
-+ # OK
-+ ;;
-+ *)
-+ echo "Unknown ABI used in --with-abi=$with_abi" 1>&2
-+ exit 1
-+ ;;
-+ esac
-+
-+ ;;
-+
- mips*-*-*)
- supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci"
-
---- original-gcc/gcc/configure
-+++ gcc-5.3.0/gcc/configure
-@@ -23717,6 +23717,25 @@ x3: .space 4
- tls_first_minor=14
- tls_as_opt="-a32 --fatal-warnings"
- ;;
-+ riscv*-*-*)
-+ conftest_s='
-+ .section .tdata,"awT",@progbits
-+x:
-+ .word 2
-+ .text
-+ la.tls.gd a0,x
-+ la.tls.ie a1,x
-+ lui a0,%tls_ie_pcrel_hi(x)
-+ lw a0,%pcrel_lo(x)(a0)
-+ add a0,a0,tp
-+ lw a0,0(a0)
-+ lui a0,%tprel_hi(x)
-+ add a0,a0,tp,%tprel_add(x)
-+ lw a0,%tprel_lo(x)(a0)'
-+ tls_first_major=2
-+ tls_first_minor=21
-+ tls_as_opt='-m32 --fatal-warnings'
-+ ;;
- s390-*-*)
- conftest_s='
- .section ".tdata","awT",@progbits
---- original-gcc/gcc/configure.ac
-+++ gcc-5.3.0/gcc/configure.ac
-@@ -3263,6 +3263,25 @@ x3: .space 4
- tls_first_minor=14
- tls_as_opt="-a32 --fatal-warnings"
- ;;
-+ riscv*-*-*)
-+ conftest_s='
-+ .section .tdata,"awT",@progbits
-+x:
-+ .word 2
-+ .text
-+ la.tls.gd a0,x
-+ la.tls.ie a1,x
-+ lui a0,%tls_ie_pcrel_hi(x)
-+ lw a0,%pcrel_lo(x)(a0)
-+ add a0,a0,tp
-+ lw a0,0(a0)
-+ lui a0,%tprel_hi(x)
-+ add a0,a0,tp,%tprel_add(x)
-+ lw a0,%tprel_lo(x)(a0)'
-+ tls_first_major=2
-+ tls_first_minor=21
-+ tls_as_opt='-m32 --fatal-warnings'
-+ ;;
- s390-*-*)
- conftest_s='
- .section ".tdata","awT",@progbits
---- original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
-+++ gcc-5.3.0/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
-@@ -6,6 +6,9 @@
- #elif defined (__powerpc__) || defined (__PPC__) || defined (__ppc__) || defined (__POWERPC__) || defined (__ppc)
- /* On PPC division by zero does not trap. */
- # define DO_TEST 0
-+#elif defined (__riscv__)
-+ /* On RISC-V division by zero does not trap. */
-+# define DO_TEST 0
- #elif defined (__SPU__)
- /* On SPU division by zero does not trap. */
- # define DO_TEST 0
---- original-gcc/gcc/testsuite/gcc.dg/20020312-2.c
-+++ gcc-5.3.0/gcc/testsuite/gcc.dg/20020312-2.c
-@@ -66,6 +66,8 @@ extern void abort (void);
- # else
- # define PIC_REG "30"
- # endif
-+#elif defined(__riscv__)
-+/* No pic register. */
- #elif defined(__RX__)
- /* No pic register. */
- #elif defined(__s390__)
---- original-gcc/gcc/testsuite/gcc.dg/20040813-1.c
-+++ gcc-5.3.0/gcc/testsuite/gcc.dg/20040813-1.c
-@@ -2,7 +2,7 @@
- /* Contributed by Devang Patel <dpatel(a)apple.com> */
-
- /* { dg-do compile } */
--/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* nios2-*-* *-*-vxworks* nvptx-*-* } { "*" } { "" } } */
-+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* riscv*-*-* tile*-*-* nios2-*-* *-*-vxworks* nvptx-*-* } { "*" } { "" } } */
- /* { dg-options "-gstabs" } */
-
- int
---- original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c
-+++ gcc-5.3.0/gcc/testsuite/gcc.dg/stack-usage-1.c
-@@ -61,6 +61,8 @@
- # else
- # define SIZE 240
- # endif
-+#elif defined (__riscv__)
-+# define SIZE 240
- #elif defined (__AVR__)
- # define SIZE 254
- #elif defined (__s390x__)
---- original-gcc/libatomic/configure.tgt
-+++ gcc-5.3.0/libatomic/configure.tgt
-@@ -33,6 +33,7 @@ case "${target_cpu}" in
- ARCH=alpha
- ;;
- rs6000 | powerpc*) ARCH=powerpc ;;
-+ riscv*) ARCH=riscv ;;
- sh*) ARCH=sh ;;
-
- arm*)
---- original-gcc/libgcc/config.host
-+++ gcc-5.3.0/libgcc/config.host
-@@ -167,6 +167,9 @@ powerpc*-*-*)
- ;;
- rs6000*-*-*)
- ;;
-+riscv*)
-+ cpu_type=riscv
-+ ;;
- sparc64*-*-*)
- cpu_type=sparc
- ;;
-@@ -1064,6 +1067,14 @@ powerpcle-*-eabi*)
- tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
- extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
- ;;
-+riscv*-*-linux*)
-+ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-tpbit riscv/t-elf riscv/t-elf${host_address}"
-+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
-+ ;;
-+riscv*-*-*)
-+ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-elf riscv/t-elf${host_address}"
-+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
-+ ;;
- rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
- md_unwind_header=rs6000/aix-unwind.h
- tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
---- original-gcc/libsanitizer/asan/asan_linux.cc
-+++ gcc-5.3.0/libsanitizer/asan/asan_linux.cc
-@@ -213,6 +213,11 @@ void GetPcSpBp(void *context, uptr *pc,
- *pc = ucontext->uc_mcontext.gregs[31];
- *bp = ucontext->uc_mcontext.gregs[30];
- *sp = ucontext->uc_mcontext.gregs[29];
-+# elif defined(__riscv__)
-+ ucontext_t *ucontext = (ucontext_t*)context;
-+ *pc = ucontext->uc_mcontext.gregs[REG_PC];
-+ *bp = ucontext->uc_mcontext.gregs[REG_S0];
-+ *sp = ucontext->uc_mcontext.gregs[REG_SP];
- #else
- # error "Unsupported arch"
- #endif
---- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
-+++ gcc-5.3.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
-@@ -61,7 +61,8 @@ namespace __sanitizer {
- } // namespace __sanitizer
-
- #if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
-- && !defined(__mips__) && !defined(__sparc__)
-+ && !defined(__mips__) && !defined(__sparc__)\
-+ && !defined(__riscv__)
- COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
- #endif
-
---- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
-+++ gcc-5.3.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
-@@ -72,6 +72,10 @@ namespace __sanitizer {
- const unsigned struct_kernel_stat_sz = 144;
- #endif
- const unsigned struct_kernel_stat64_sz = 104;
-+#elif defined(__riscv__)
-+ const unsigned struct___old_kernel_stat_sz = 0;
-+ const unsigned struct_kernel_stat_sz = 128;
-+ const unsigned struct_kernel_stat64_sz = 128;
- #elif defined(__sparc__) && defined(__arch64__)
- const unsigned struct___old_kernel_stat_sz = 0;
- const unsigned struct_kernel_stat_sz = 104;
-@@ -511,7 +515,7 @@ namespace __sanitizer {
- typedef long __sanitizer___kernel_off_t;
- #endif
-
--#if defined(__powerpc__) || defined(__mips__)
-+#if defined(__powerpc__) || defined(__mips__) || defined(__riscv__)
- typedef unsigned int __sanitizer___kernel_old_uid_t;
- typedef unsigned int __sanitizer___kernel_old_gid_t;
- #else
-diff -ru gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform.h gcc-5.1.0/libsanitizer/sanitizer_common/sanitizer_platform.h
---- gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform.h 2015-05-13 19:36:27.061421043 -0700
-+++ gcc-5.3.0/libsanitizer/sanitizer_common/sanitizer_platform.h 2015-05-13 19:44:19.274355577 -0700
-@@ -98,9 +98,9 @@
-
- // The AArch64 linux port uses the canonical syscall set as mandated by
- // the upstream linux community for all new ports. Other ports may still
--// use legacy syscalls.
-+// use legacy syscalls. The RISC-V port also does this.
- #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
--# if defined(__aarch64__) && SANITIZER_LINUX
-+# if (defined(__aarch64__) || defined(__riscv__)) && SANITIZER_LINUX
- # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
- # else
- # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
-diff -ru gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h gcc-5.1.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
---- gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h 2015-05-13 19:36:27.061421043 -0700
-+++ gcc-5.3.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h 2015-05-13 19:39:13.515487834 -0700
-@@ -73,7 +73,6 @@
- #endif
- const unsigned struct_kernel_stat64_sz = 104;
- #elif defined(__riscv__)
-- const unsigned struct___old_kernel_stat_sz = 0;
- const unsigned struct_kernel_stat_sz = 128;
- const unsigned struct_kernel_stat64_sz = 128;
- #elif defined(__sparc__) && defined(__arch64__)
-@@ -104,7 +103,7 @@
-
- #if SANITIZER_LINUX || SANITIZER_FREEBSD
-
--#if defined(__powerpc64__)
-+#if defined(__powerpc64__) || defined(__riscv__)
- const unsigned struct___old_kernel_stat_sz = 0;
- #elif !defined(__sparc__)
- const unsigned struct___old_kernel_stat_sz = 32;
---- original-gcc/libstdc++-v3/configure
-+++ gcc-5.3.0/libstdc++-v3/configure
-@@ -16646,7 +16646,7 @@ ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
- # Long term, -std=c++0x could be even better, could manage to explicitly
- # request C99 facilities to the underlying C headers.
- ac_save_CXXFLAGS="$CXXFLAGS"
-- CXXFLAGS="$CXXFLAGS -std=c++98"
-+ CXXFLAGS="$CXXFLAGS -std=gnu++98"
- ac_save_LIBS="$LIBS"
- ac_save_gcc_no_link="$gcc_no_link"
-
-@@ -17268,9 +17268,11 @@ rm -f core conftest.err conftest.$ac_obj
- $as_echo "$glibcxx_cv_c99_wchar" >&6; }
- fi
-
-+ # For newlib, don't check complex since missing c99 functions, but
-+ # rest of c99 stuff is there so don't loose it
- # Option parsed, now set things appropriately.
- if test x"$glibcxx_cv_c99_math" = x"no" ||
-- test x"$glibcxx_cv_c99_complex" = x"no" ||
-+ # test x"$glibcxx_cv_c99_complex" = x"no" ||
- test x"$glibcxx_cv_c99_stdio" = x"no" ||
- test x"$glibcxx_cv_c99_stdlib" = x"no" ||
- test x"$glibcxx_cv_c99_wchar" = x"no"; then
-diff -urN empty/gcc/common/config/riscv/riscv-common.c gcc-5.3.0/gcc/common/config/riscv/riscv-common.c
---- empty/gcc/common/config/riscv/riscv-common.c 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/common/config/riscv/riscv-common.c 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,139 @@
-+/* Common hooks for RISC-V.
-+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+#include "config.h"
-+#include "system.h"
-+#include "coretypes.h"
-+#include "tm.h"
-+#include "common/common-target.h"
-+#include "common/common-target-def.h"
-+#include "opts.h"
-+#include "flags.h"
-+#include "errors.h"
-+
-+/* Parse a RISC-V ISA string into an option mask. */
-+
-+static void
-+riscv_parse_arch_string (const char *isa, int *flags)
-+{
-+ const char *p = isa;
-+
-+ if (strncmp (p, "RV32", 4) == 0)
-+ *flags |= MASK_32BIT, p += 4;
-+ else if (strncmp (p, "RV64", 4) == 0)
-+ *flags &= ~MASK_32BIT, p += 4;
-+
-+ if (*p++ != 'I')
-+ {
-+ error ("-march=%s: ISA strings must begin with I, RV32I, or RV64I", isa);
-+ return;
-+ }
-+
-+ *flags &= ~MASK_MULDIV;
-+ if (*p == 'M')
-+ *flags |= MASK_MULDIV, p++;
-+
-+ *flags &= ~MASK_ATOMIC;
-+ if (*p == 'A')
-+ *flags |= MASK_ATOMIC, p++;
-+
-+ *flags |= MASK_SOFT_FLOAT_ABI;
-+ if (*p == 'F')
-+ *flags &= ~MASK_SOFT_FLOAT_ABI, p++;
-+
-+ if (*p == 'D')
-+ {
-+ p++;
-+ if (!TARGET_HARD_FLOAT)
-+ {
-+ error ("-march=%s: the D extension requires the F extension", isa);
-+ return;
-+ }
-+ }
-+ else if (TARGET_HARD_FLOAT)
-+ {
-+ error ("-march=%s: single-precision-only is not yet supported", isa);
-+ return;
-+ }
-+
-+ *flags &= ~MASK_RVC;
-+ if (*p == 'C')
-+ *flags |= MASK_RVC, p++;
-+
-+ /* FIXME: For now we just stop parsing when faced with a
-+ non-standard RISC-V ISA extension, partially becauses of a
-+ problem with the naming scheme. */
-+ if (*p == 'X')
-+ return;
-+
-+ if (*p)
-+ {
-+ error ("-march=%s: unsupported ISA substring %s", isa, p);
-+ return;
-+ }
-+}
-+
-+static int
-+riscv_flags_from_arch_string (const char *isa)
-+{
-+ int flags = 0;
-+ riscv_parse_arch_string (isa, &flags);
-+ return flags;
-+}
-+
-+/* Implement TARGET_HANDLE_OPTION. */
-+
-+static bool
-+riscv_handle_option (struct gcc_options *opts,
-+ struct gcc_options *opts_set ATTRIBUTE_UNUSED,
-+ const struct cl_decoded_option *decoded,
-+ location_t loc ATTRIBUTE_UNUSED)
-+{
-+ switch (decoded->opt_index)
-+ {
-+ case OPT_march_:
-+ riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
-+ return true;
-+
-+ default:
-+ return true;
-+ }
-+}
-+
-+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
-+static const struct default_options riscv_option_optimization_table[] =
-+ {
-+ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
-+ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
-+ { OPT_LEVELS_NONE, 0, NULL, 0 }
-+ };
-+
-+#undef TARGET_OPTION_OPTIMIZATION_TABLE
-+#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
-+
-+#undef TARGET_DEFAULT_TARGET_FLAGS
-+#define TARGET_DEFAULT_TARGET_FLAGS \
-+ (TARGET_DEFAULT \
-+ | riscv_flags_from_arch_string (RISCV_ARCH_STRING_DEFAULT) \
-+ | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT))
-+
-+#undef TARGET_HANDLE_OPTION
-+#define TARGET_HANDLE_OPTION riscv_handle_option
-+
-+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
-diff -urN empty/gcc/config/riscv/constraints.md gcc-5.3.0/gcc/config/riscv/constraints.md
---- empty/gcc/config/riscv/constraints.md 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/constraints.md 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,93 @@
-+;; Constraint definitions for RISC-V target.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+;;
-+;; This file is part of GCC.
-+;;
-+;; GCC is free software; you can redistribute it and/or modify
-+;; it under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+;;
-+;; GCC is distributed in the hope that it will be useful,
-+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+;; GNU General Public License for more details.
-+;;
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3. If not see
-+;; <http://www.gnu.org/licenses/ >.
-+
-+;; Register constraints
-+
-+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
-+ "A floating-point register (if available).")
-+
-+(define_register_constraint "b" "ALL_REGS"
-+ "@internal")
-+
-+(define_register_constraint "j" "T_REGS"
-+ "@internal")
-+
-+(define_register_constraint "l" "JALR_REGS"
-+ "@internal")
-+
-+;; Integer constraints
-+
-+(define_constraint "Z"
-+ "@internal"
-+ (and (match_code "const_int")
-+ (match_test "1")))
-+
-+(define_constraint "I"
-+ "An I-type 12-bit signed immediate."
-+ (and (match_code "const_int")
-+ (match_test "SMALL_OPERAND (ival)")))
-+
-+(define_constraint "J"
-+ "Integer zero."
-+ (and (match_code "const_int")
-+ (match_test "ival == 0")))
-+
-+;; Floating-point constraints
-+
-+(define_constraint "G"
-+ "Floating-point zero."
-+ (and (match_code "const_double")
-+ (match_test "op == CONST0_RTX (mode)")))
-+
-+;; General constraints
-+
-+(define_constraint "Q"
-+ "@internal"
-+ (match_operand 0 "const_arith_operand"))
-+
-+(define_memory_constraint "A"
-+ "An address that is held in a general-purpose register."
-+ (and (match_code "mem")
-+ (match_test "GET_CODE(XEXP(op,0)) == REG")))
-+
-+(define_constraint "S"
-+ "@internal
-+ A constant call address."
-+ (and (match_operand 0 "call_insn_operand")
-+ (match_test "CONSTANT_P (op)")))
-+
-+(define_constraint "T"
-+ "@internal
-+ A constant @code{move_operand}."
-+ (and (match_operand 0 "move_operand")
-+ (match_test "CONSTANT_P (op)")))
-+
-+(define_memory_constraint "W"
-+ "@internal
-+ A memory address based on a member of @code{BASE_REG_CLASS}."
-+ (and (match_code "mem")
-+ (match_operand 0 "memory_operand")))
-+
-+(define_constraint "YG"
-+ "@internal
-+ A vector zero."
-+ (and (match_code "const_vector")
-+ (match_test "op == CONST0_RTX (mode)")))
-diff -urN empty/gcc/config/riscv/default-32.h gcc-5.3.0/gcc/config/riscv/default-32.h
---- empty/gcc/config/riscv/default-32.h 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/default-32.h 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,22 @@
-+/* Definitions of target machine for GCC, for RISC-V,
-+ defaulting to 32-bit code generation.
-+
-+ Copyright (C) 1999-2014 Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+#define TARGET_64BIT_DEFAULT 0
-diff -urN empty/gcc/config/riscv/elf.h gcc-5.3.0/gcc/config/riscv/elf.h
---- empty/gcc/config/riscv/elf.h 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/elf.h 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,31 @@
-+/* Target macros for riscv*-elf targets.
-+ Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010
-+ Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+/* Leave the linker script to choose the appropriate libraries. */
-+#undef LIB_SPEC
-+#define LIB_SPEC ""
-+
-+#undef STARTFILE_SPEC
-+#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
-+
-+#undef ENDFILE_SPEC
-+#define ENDFILE_SPEC "crtend%O%s"
-+
-+#define NO_IMPLICIT_EXTERN_C 1
-diff -urN empty/gcc/config/riscv/generic.md gcc-5.3.0/gcc/config/riscv/generic.md
---- empty/gcc/config/riscv/generic.md 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/generic.md 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,78 @@
-+;; Generic DFA-based pipeline description for RISC-V targets.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+
-+;; This file is part of GCC.
-+
-+;; GCC is free software; you can redistribute it and/or modify it
-+;; under the terms of the GNU General Public License as published
-+;; by the Free Software Foundation; either version 3, or (at your
-+;; option) any later version.
-+
-+;; GCC is distributed in the hope that it will be useful, but WITHOUT
-+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
-+;; License for more details.
-+
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3. If not see
-+;; <http://www.gnu.org/licenses/ >.
-+
-+
-+(define_automaton "pipe0")
-+(define_cpu_unit "alu" "pipe0")
-+(define_cpu_unit "imuldiv" "pipe0")
-+(define_cpu_unit "fdivsqrt" "pipe0")
-+
-+(define_insn_reservation "generic_alu" 1
-+ (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
-+ "alu")
-+
-+(define_insn_reservation "generic_load" 3
-+ (eq_attr "type" "load,fpload")
-+ "alu")
-+
-+(define_insn_reservation "generic_store" 1
-+ (eq_attr "type" "store,fpstore")
-+ "alu")
-+
-+(define_insn_reservation "generic_xfer" 3
-+ (eq_attr "type" "mfc,mtc,fcvt,fmove,fcmp")
-+ "alu")
-+
-+(define_insn_reservation "generic_branch" 1
-+ (eq_attr "type" "branch,jump,call")
-+ "alu")
-+
-+(define_insn_reservation "generic_imul" 10
-+ (eq_attr "type" "imul")
-+ "imuldiv*10")
-+
-+(define_insn_reservation "generic_idivsi" 34
-+ (and (eq_attr "type" "idiv")
-+ (eq_attr "mode" "SI"))
-+ "imuldiv*34")
-+
-+(define_insn_reservation "generic_idivdi" 66
-+ (and (eq_attr "type" "idiv")
-+ (eq_attr "mode" "DI"))
-+ "imuldiv*66")
-+
-+(define_insn_reservation "generic_fmul_single" 5
-+ (and (eq_attr "type" "fadd,fmul,fmadd")
-+ (eq_attr "mode" "SF"))
-+ "alu")
-+
-+(define_insn_reservation "generic_fmul_double" 7
-+ (and (eq_attr "type" "fadd,fmul,fmadd")
-+ (eq_attr "mode" "DF"))
-+ "alu")
-+
-+(define_insn_reservation "generic_fdiv" 20
-+ (eq_attr "type" "fdiv")
-+ "fdivsqrt*20")
-+
-+(define_insn_reservation "generic_fsqrt" 25
-+ (eq_attr "type" "fsqrt")
-+ "fdivsqrt*25")
-diff -urN empty/gcc/config/riscv/linux64.h gcc-5.3.0/gcc/config/riscv/linux64.h
---- empty/gcc/config/riscv/linux64.h 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/linux64.h 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,43 @@
-+/* Definitions for 64-bit RISC-V GNU/Linux systems with ELF format.
-+ Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
-+ Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+/* Force the default ABI flags onto the command line
-+ in order to make the other specs easier to write. */
-+#undef LIB_SPEC
-+#define LIB_SPEC "\
-+%{pthread:-lpthread} \
-+%{shared:-lc} \
-+%{!shared: \
-+ %{profile:-lc_p} %{!profile:-lc}}"
-+
-+#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1"
-+#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1"
-+
-+#undef LINK_SPEC
-+#define LINK_SPEC "\
-+%{shared} \
-+ %{!shared: \
-+ %{!static: \
-+ %{rdynamic:-export-dynamic} \
-+ %{" OPT_ARCH64 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
-+ %{" OPT_ARCH32 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
-+ %{static:-static}} \
-+%{" OPT_ARCH64 ":-melf64lriscv} \
-+%{" OPT_ARCH32 ":-melf32lriscv}"
-diff -urN empty/gcc/config/riscv/linux.h gcc-5.3.0/gcc/config/riscv/linux.h
---- empty/gcc/config/riscv/linux.h 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/linux.h 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,60 @@
-+/* Definitions for RISC-V GNU/Linux systems with ELF format.
-+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
-+ 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+#undef WCHAR_TYPE
-+#define WCHAR_TYPE "int"
-+
-+#undef WCHAR_TYPE_SIZE
-+#define WCHAR_TYPE_SIZE 32
-+
-+#define TARGET_OS_CPP_BUILTINS() \
-+ do { \
-+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
-+ /* The GNU C++ standard library requires this. */ \
-+ if (c_dialect_cxx ()) \
-+ builtin_define ("_GNU_SOURCE"); \
-+ } while (0)
-+
-+#undef SUBTARGET_CPP_SPEC
-+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
-+
-+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
-+
-+/* Borrowed from sparc/linux.h */
-+#undef LINK_SPEC
-+#define LINK_SPEC \
-+ "%{shared:-shared} \
-+ %{!shared: \
-+ %{!static: \
-+ %{rdynamic:-export-dynamic} \
-+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
-+ %{static:-static}}"
-+
-+#undef LIB_SPEC
-+#define LIB_SPEC "\
-+%{pthread:-lpthread} \
-+%{shared:-lc} \
-+%{!shared: \
-+ %{profile:-lc_p} %{!profile:-lc}}"
-+
-+/* Similar to standard Linux, but adding -ffast-math support. */
-+#undef ENDFILE_SPEC
-+#define ENDFILE_SPEC \
-+ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
-diff -urN empty/gcc/config/riscv/peephole.md gcc-5.3.0/gcc/config/riscv/peephole.md
---- empty/gcc/config/riscv/peephole.md 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/peephole.md 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,121 @@
-+;;........................
-+;; DI -> SI optimizations
-+;;........................
-+
-+;; Simplify (int)(a + 1), etc.
-+(define_peephole2
-+ [(set (match_operand:DI 0 "register_operand")
-+ (match_operator:DI 4 "modular_operator"
-+ [(match_operand:DI 1 "register_operand")
-+ (match_operand:DI 2 "arith_operand")]))
-+ (set (match_operand:SI 3 "register_operand")
-+ (truncate:SI (match_dup 0)))]
-+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
-+ && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
-+ [(set (match_dup 3)
-+ (truncate:SI
-+ (match_op_dup:DI 4
-+ [(match_operand:DI 1 "register_operand")
-+ (match_operand:DI 2 "arith_operand")])))])
-+
-+;; Simplify (int)a + 1, etc.
-+(define_peephole2
-+ [(set (match_operand:SI 0 "register_operand")
-+ (truncate:SI (match_operand:DI 1 "register_operand")))
-+ (set (match_operand:SI 3 "register_operand")
-+ (match_operator:SI 4 "modular_operator"
-+ [(match_dup 0)
-+ (match_operand:SI 2 "arith_operand")]))]
-+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
-+ [(set (match_dup 3)
-+ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
-+
-+;; Simplify -(int)a, etc.
-+(define_peephole2
-+ [(set (match_operand:SI 0 "register_operand")
-+ (truncate:SI (match_operand:DI 2 "register_operand")))
-+ (set (match_operand:SI 3 "register_operand")
-+ (match_operator:SI 4 "modular_operator"
-+ [(match_operand:SI 1 "reg_or_0_operand")
-+ (match_dup 0)]))]
-+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
-+ [(set (match_dup 3)
-+ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
-+
-+;; Simplify (unsigned long)(unsigned int)a << const
-+(define_peephole2
-+ [(set (match_operand:DI 0 "register_operand")
-+ (ashift:DI (match_operand:DI 1 "register_operand")
-+ (match_operand 2 "const_int_operand")))
-+ (set (match_operand:DI 3 "register_operand")
-+ (lshiftrt:DI (match_dup 0) (match_dup 2)))
-+ (set (match_operand:DI 4 "register_operand")
-+ (ashift:DI (match_dup 3) (match_operand 5 "const_int_operand")))]
-+ "TARGET_64BIT
-+ && INTVAL (operands[5]) < INTVAL (operands[2])
-+ && (REGNO (operands[3]) == REGNO (operands[4])
-+ || peep2_reg_dead_p (3, operands[3]))"
-+ [(set (match_dup 0)
-+ (ashift:DI (match_dup 1) (match_dup 2)))
-+ (set (match_dup 4)
-+ (lshiftrt:DI (match_dup 0) (match_operand 5)))]
-+{
-+ operands[5] = GEN_INT (INTVAL (operands[2]) - INTVAL (operands[5]));
-+})
-+
-+;; Simplify PIC loads to static variables.
-+;; These will go away once we figure out how to emit auipc discretely.
-+(define_insn "*local_pic_load<mode>"
-+ [(set (match_operand:ANYI 0 "register_operand" "=r")
-+ (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
-+ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
-+ "<load>\t%0,%1"
-+ [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_load<mode>"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
-+ (clobber (match_scratch:DI 2 "=&r"))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
-+ "<load>\t%0,%1,%2"
-+ [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_load<mode>"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
-+ (clobber (match_scratch:SI 2 "=&r"))]
-+ "TARGET_HARD_FLOAT && !TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
-+ "<load>\t%0,%1,%2"
-+ [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_loadu<mode>"
-+ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
-+ (zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
-+ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
-+ "<load>u\t%0,%1"
-+ [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_storedi<mode>"
-+ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
-+ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
-+ (clobber (match_scratch:DI 2 "=&r"))]
-+ "TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
-+ "<store>\t%z1,%0,%2"
-+ [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_storesi<mode>"
-+ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
-+ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
-+ (clobber (match_scratch:SI 2 "=&r"))]
-+ "!TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
-+ "<store>\t%z1,%0,%2"
-+ [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_storedi<mode>"
-+ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
-+ (match_operand:ANYF 1 "register_operand" "f"))
-+ (clobber (match_scratch:DI 2 "=&r"))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
-+ "<store>\t%1,%0,%2"
-+ [(set (attr "length") (const_int 8))])
-+(define_insn "*local_pic_storesi<mode>"
-+ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
-+ (match_operand:ANYF 1 "register_operand" "f"))
-+ (clobber (match_scratch:SI 2 "=&r"))]
-+ "TARGET_HARD_FLOAT && !TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
-+ "<store>\t%1,%0,%2"
-+ [(set (attr "length") (const_int 8))])
-diff -urN empty/gcc/config/riscv/predicates.md gcc-5.3.0/gcc/config/riscv/predicates.md
---- empty/gcc/config/riscv/predicates.md 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/predicates.md 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,184 @@
-+;; Predicate description for RISC-V target.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+;;
-+;; This file is part of GCC.
-+;;
-+;; GCC is free software; you can redistribute it and/or modify
-+;; it under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+;;
-+;; GCC is distributed in the hope that it will be useful,
-+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+;; GNU General Public License for more details.
-+;;
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3. If not see
-+;; <http://www.gnu.org/licenses/ >.
-+
-+(define_predicate "const_arith_operand"
-+ (and (match_code "const_int")
-+ (match_test "SMALL_OPERAND (INTVAL (op))")))
-+
-+(define_predicate "arith_operand"
-+ (ior (match_operand 0 "const_arith_operand")
-+ (match_operand 0 "register_operand")))
-+
-+(define_predicate "sle_operand"
-+ (and (match_code "const_int")
-+ (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
-+
-+(define_predicate "sleu_operand"
-+ (and (match_operand 0 "sle_operand")
-+ (match_test "INTVAL (op) + 1 != 0")))
-+
-+(define_predicate "const_0_operand"
-+ (and (match_code "const_int,const_double,const_vector")
-+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
-+
-+(define_predicate "reg_or_0_operand"
-+ (ior (match_operand 0 "const_0_operand")
-+ (match_operand 0 "register_operand")))
-+
-+(define_predicate "const_1_operand"
-+ (and (match_code "const_int,const_double,const_vector")
-+ (match_test "op == CONST1_RTX (GET_MODE (op))")))
-+
-+(define_predicate "reg_or_1_operand"
-+ (ior (match_operand 0 "const_1_operand")
-+ (match_operand 0 "register_operand")))
-+
-+;; Only use branch-on-bit sequences when the mask is not an ANDI immediate.
-+(define_predicate "branch_on_bit_operand"
-+ (and (match_code "const_int")
-+ (match_test "INTVAL (op) >= IMM_BITS - 1")))
-+
-+;; This is used for indexing into vectors, and hence only accepts const_int.
-+(define_predicate "const_0_or_1_operand"
-+ (and (match_code "const_int")
-+ (ior (match_test "op == CONST0_RTX (GET_MODE (op))")
-+ (match_test "op == CONST1_RTX (GET_MODE (op))"))))
-+
-+(define_special_predicate "pc_or_label_operand"
-+ (match_code "pc,label_ref"))
-+
-+;; A legitimate CONST_INT operand that takes more than one instruction
-+;; to load.
-+(define_predicate "splittable_const_int_operand"
-+ (match_code "const_int")
-+{
-+ /* Don't handle multi-word moves this way; we don't want to introduce
-+ the individual word-mode moves until after reload. */
-+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
-+ return false;
-+
-+ /* Otherwise check whether the constant can be loaded in a single
-+ instruction. */
-+ return !LUI_OPERAND (INTVAL (op)) && !SMALL_OPERAND (INTVAL (op));
-+})
-+
-+(define_predicate "move_operand"
-+ (match_operand 0 "general_operand")
-+{
-+ enum riscv_symbol_type symbol_type;
-+
-+ /* The thinking here is as follows:
-+
-+ (1) The move expanders should split complex load sequences into
-+ individual instructions. Those individual instructions can
-+ then be optimized by all rtl passes.
-+
-+ (2) The target of pre-reload load sequences should not be used
-+ to store temporary results. If the target register is only
-+ assigned one value, reload can rematerialize that value
-+ on demand, rather than spill it to the stack.
-+
-+ (3) If we allowed pre-reload passes like combine and cse to recreate
-+ complex load sequences, we would want to be able to split the
-+ sequences before reload as well, so that the pre-reload scheduler
-+ can see the individual instructions. This falls foul of (2);
-+ the splitter would be forced to reuse the target register for
-+ intermediate results.
-+
-+ (4) We want to define complex load splitters for combine. These
-+ splitters can request a temporary scratch register, which avoids
-+ the problem in (2). They allow things like:
-+
-+ (set (reg T1) (high SYM))
-+ (set (reg T2) (low (reg T1) SYM))
-+ (set (reg X) (plus (reg T2) (const_int OFFSET)))
-+
-+ to be combined into:
-+
-+ (set (reg T3) (high SYM+OFFSET))
-+ (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
-+
-+ if T2 is only used this once. */
-+ switch (GET_CODE (op))
-+ {
-+ case CONST_INT:
-+ return !splittable_const_int_operand (op, mode);
-+
-+ case CONST:
-+ case SYMBOL_REF:
-+ case LABEL_REF:
-+ return (riscv_symbolic_constant_p (op, &symbol_type)
-+ && !riscv_hi_relocs[symbol_type]);
-+
-+ case HIGH:
-+ op = XEXP (op, 0);
-+ return riscv_symbolic_constant_p (op, &symbol_type);
-+
-+ default:
-+ return true;
-+ }
-+})
-+
-+(define_predicate "consttable_operand"
-+ (match_test "CONSTANT_P (op)"))
-+
-+(define_predicate "symbolic_operand"
-+ (match_code "const,symbol_ref,label_ref")
-+{
-+ enum riscv_symbol_type type;
-+ return riscv_symbolic_constant_p (op, &type);
-+})
-+
-+(define_predicate "absolute_symbolic_operand"
-+ (match_code "const,symbol_ref,label_ref")
-+{
-+ enum riscv_symbol_type type;
-+ return (riscv_symbolic_constant_p (op, &type)
-+ && type == SYMBOL_ABSOLUTE);
-+})
-+
-+(define_predicate "plt_symbolic_operand"
-+ (match_code "const,symbol_ref,label_ref")
-+{
-+ enum riscv_symbol_type type;
-+ return (riscv_symbolic_constant_p (op, &type)
-+ && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
-+})
-+
-+(define_predicate "call_insn_operand"
-+ (ior (match_operand 0 "absolute_symbolic_operand")
-+ (match_operand 0 "plt_symbolic_operand")
-+ (match_operand 0 "register_operand")))
-+
-+(define_predicate "symbol_ref_operand"
-+ (match_code "symbol_ref"))
-+
-+(define_predicate "modular_operator"
-+ (match_code "plus,minus,mult,ashift"))
-+
-+(define_predicate "equality_operator"
-+ (match_code "eq,ne"))
-+
-+(define_predicate "order_operator"
-+ (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
-+
-+(define_predicate "fp_order_operator"
-+ (match_code "eq,ne,lt,le,gt,ge"))
-diff -urN empty/gcc/config/riscv/riscv.c gcc-5.3.0/gcc/config/riscv/riscv.c
---- empty/gcc/config/riscv/riscv.c 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv.c 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,4311 @@
-+/* Subroutines used for code generation for RISC-V.
-+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+#include "config.h"
-+#include "system.h"
-+#include "coretypes.h"
-+#include "tm.h"
-+#include "rtl.h"
-+#include "regs.h"
-+#include "hard-reg-set.h"
-+#include "insn-config.h"
-+#include "conditions.h"
-+#include "insn-attr.h"
-+#include "recog.h"
-+#include "output.h"
-+#include "hash-set.h"
-+#include "machmode.h"
-+#include "vec.h"
-+#include "double-int.h"
-+#include "input.h"
-+#include "alias.h"
-+#include "symtab.h"
-+#include "wide-int.h"
-+#include "inchash.h"
-+#include "tree.h"
-+#include "fold-const.h"
-+#include "varasm.h"
-+#include "stringpool.h"
-+#include "stor-layout.h"
-+#include "calls.h"
-+#include "function.h"
-+#include "hashtab.h"
-+#include "flags.h"
-+#include "statistics.h"
-+#include "real.h"
-+#include "fixed-value.h"
-+#include "expmed.h"
-+#include "dojump.h"
-+#include "explow.h"
-+#include "emit-rtl.h"
-+#include "stmt.h"
-+#include "expr.h"
-+#include "insn-codes.h"
-+#include "optabs.h"
-+#include "libfuncs.h"
-+#include "reload.h"
-+#include "tm_p.h"
-+#include "ggc.h"
-+#include "gstab.h"
-+#include "hash-table.h"
-+#include "debug.h"
-+#include "target.h"
-+#include "target-def.h"
-+#include "common/common-target.h"
-+#include "langhooks.h"
-+#include "dominance.h"
-+#include "cfg.h"
-+#include "cfgrtl.h"
-+#include "cfganal.h"
-+#include "lcm.h"
-+#include "cfgbuild.h"
-+#include "cfgcleanup.h"
-+#include "predict.h"
-+#include "basic-block.h"
-+#include "sched-int.h"
-+#include "tree-ssa-alias.h"
-+#include "internal-fn.h"
-+#include "gimple-fold.h"
-+#include "tree-eh.h"
-+#include "gimple-expr.h"
-+#include "is-a.h"
-+#include "gimple.h"
-+#include "gimplify.h"
-+#include "bitmap.h"
-+#include "diagnostic.h"
-+#include "target-globals.h"
-+#include "opts.h"
-+#include "tree-pass.h"
-+#include "context.h"
-+#include "hash-map.h"
-+#include "plugin-api.h"
-+#include "ipa-ref.h"
-+#include "cgraph.h"
-+#include "builtins.h"
-+#include "rtl-iter.h"
-+#include <stdint.h>
-+
-+/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
-+#define UNSPEC_ADDRESS_P(X) \
-+ (GET_CODE (X) == UNSPEC \
-+ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
-+ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
-+
-+/* Extract the symbol or label from UNSPEC wrapper X. */
-+#define UNSPEC_ADDRESS(X) \
-+ XVECEXP (X, 0, 0)
-+
-+/* Extract the symbol type from UNSPEC wrapper X. */
-+#define UNSPEC_ADDRESS_TYPE(X) \
-+ ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
-+
-+/* The maximum distance between the top of the stack frame and the
-+ value sp has when we save and restore registers. This is set by the
-+ range of load/store offsets and must also preserve stack alignment. */
-+#define RISCV_MAX_FIRST_STACK_STEP (IMM_REACH/2 - 16)
-+
-+/* True if INSN is a riscv.md pattern or asm statement. */
-+#define USEFUL_INSN_P(INSN) \
-+ (NONDEBUG_INSN_P (INSN) \
-+ && GET_CODE (PATTERN (INSN)) != USE \
-+ && GET_CODE (PATTERN (INSN)) != CLOBBER \
-+ && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
-+ && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
-+
-+/* True if bit BIT is set in VALUE. */
-+#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
-+
-+/* Classifies an address.
-+
-+ ADDRESS_REG
-+ A natural register + offset address. The register satisfies
-+ riscv_valid_base_register_p and the offset is a const_arith_operand.
-+
-+ ADDRESS_LO_SUM
-+ A LO_SUM rtx. The first operand is a valid base register and
-+ the second operand is a symbolic address.
-+
-+ ADDRESS_CONST_INT
-+ A signed 16-bit constant address.
-+
-+ ADDRESS_SYMBOLIC:
-+ A constant symbolic address. */
-+enum riscv_address_type {
-+ ADDRESS_REG,
-+ ADDRESS_LO_SUM,
-+ ADDRESS_CONST_INT,
-+ ADDRESS_SYMBOLIC
-+};
-+
-+enum riscv_code_model riscv_cmodel = TARGET_DEFAULT_CMODEL;
-+
-+/* Macros to create an enumeration identifier for a function prototype. */
-+#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
-+#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
-+#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
-+#define RISCV_FTYPE_NAME4(A, B, C, D, E) RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
-+
-+/* Classifies the prototype of a built-in function. */
-+enum riscv_function_type {
-+#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
-+#include "config/riscv/riscv-ftypes.def"
-+#undef DEF_RISCV_FTYPE
-+ RISCV_MAX_FTYPE_MAX
-+};
-+
-+/* Specifies how a built-in function should be converted into rtl. */
-+enum riscv_builtin_type {
-+ /* The function corresponds directly to an .md pattern. The return
-+ value is mapped to operand 0 and the arguments are mapped to
-+ operands 1 and above. */
-+ RISCV_BUILTIN_DIRECT,
-+
-+ /* The function corresponds directly to an .md pattern. There is no return
-+ value and the arguments are mapped to operands 0 and above. */
-+ RISCV_BUILTIN_DIRECT_NO_TARGET
-+};
-+
-+/* Information about a function's frame layout. */
-+struct GTY(()) riscv_frame_info {
-+ /* The size of the frame in bytes. */
-+ HOST_WIDE_INT total_size;
-+
-+ /* Bit X is set if the function saves or restores GPR X. */
-+ unsigned int mask;
-+
-+ /* Likewise FPR X. */
-+ unsigned int fmask;
-+
-+ /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
-+ unsigned save_libcall_adjustment;
-+
-+ /* Offsets of fixed-point and floating-point save areas from frame bottom */
-+ HOST_WIDE_INT gp_sp_offset;
-+ HOST_WIDE_INT fp_sp_offset;
-+
-+ /* Offset of virtual frame pointer from stack pointer/frame bottom */
-+ HOST_WIDE_INT frame_pointer_offset;
-+
-+ /* Offset of hard frame pointer from stack pointer/frame bottom */
-+ HOST_WIDE_INT hard_frame_pointer_offset;
-+
-+ /* The offset of arg_pointer_rtx from the bottom of the frame. */
-+ HOST_WIDE_INT arg_pointer_offset;
-+};
-+
-+struct GTY(()) machine_function {
-+ /* The number of extra stack bytes taken up by register varargs.
-+ This area is allocated by the callee at the very top of the frame. */
-+ int varargs_size;
-+
-+ /* Cached return value of leaf_function_p. <0 if false, >0 if true. */
-+ int is_leaf;
-+
-+ /* The current frame information, calculated by riscv_compute_frame_info. */
-+ struct riscv_frame_info frame;
-+};
-+
-+/* Information about a single argument. */
-+struct riscv_arg_info {
-+ /* True if the argument is passed in a floating-point register, or
-+ would have been if we hadn't run out of registers. */
-+ bool fpr_p;
-+
-+ /* The number of words passed in registers, rounded up. */
-+ unsigned int reg_words;
-+
-+ /* For EABI, the offset of the first register from GP_ARG_FIRST or
-+ FP_ARG_FIRST. For other ABIs, the offset of the first register from
-+ the start of the ABI's argument structure (see the CUMULATIVE_ARGS
-+ comment for details).
-+
-+ The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
-+ on the stack. */
-+ unsigned int reg_offset;
-+
-+ /* The number of words that must be passed on the stack, rounded up. */
-+ unsigned int stack_words;
-+
-+ /* The offset from the start of the stack overflow area of the argument's
-+ first stack word. Only meaningful when STACK_WORDS is nonzero. */
-+ unsigned int stack_offset;
-+};
-+
-+/* Information about an address described by riscv_address_type.
-+
-+ ADDRESS_CONST_INT
-+ No fields are used.
-+
-+ ADDRESS_REG
-+ REG is the base register and OFFSET is the constant offset.
-+
-+ ADDRESS_LO_SUM
-+ REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
-+ is the type of symbol it references.
-+
-+ ADDRESS_SYMBOLIC
-+ SYMBOL_TYPE is the type of symbol that the address references. */
-+struct riscv_address_info {
-+ enum riscv_address_type type;
-+ rtx reg;
-+ rtx offset;
-+ enum riscv_symbol_type symbol_type;
-+};
-+
-+/* One stage in a constant building sequence. These sequences have
-+ the form:
-+
-+ A = VALUE[0]
-+ A = A CODE[1] VALUE[1]
-+ A = A CODE[2] VALUE[2]
-+ ...
-+
-+ where A is an accumulator, each CODE[i] is a binary rtl operation
-+ and each VALUE[i] is a constant integer. CODE[0] is undefined. */
-+struct riscv_integer_op {
-+ enum rtx_code code;
-+ unsigned HOST_WIDE_INT value;
-+};
-+
-+/* The largest number of operations needed to load an integer constant.
-+ The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI,
-+ but we may attempt and reject even worse sequences. */
-+#define RISCV_MAX_INTEGER_OPS 32
-+
-+/* Costs of various operations on the different architectures. */
-+
-+struct riscv_tune_info
-+{
-+ unsigned short fp_add[2];
-+ unsigned short fp_mul[2];
-+ unsigned short fp_div[2];
-+ unsigned short int_mul[2];
-+ unsigned short int_div[2];
-+ unsigned short issue_rate;
-+ unsigned short branch_cost;
-+ unsigned short memory_cost;
-+};
-+
-+/* Information about one CPU we know about. */
-+struct riscv_cpu_info {
-+ /* This CPU's canonical name. */
-+ const char *name;
-+
-+ /* The RISC-V ISA and extensions supported by this CPU. */
-+ const char *isa;
-+
-+ /* Tuning parameters for this CPU. */
-+ const struct riscv_tune_info *tune_info;
-+};
-+
-+/* Global variables for machine-dependent things. */
-+
-+/* Which tuning parameters to use. */
-+static const struct riscv_tune_info *tune_info;
-+
-+/* Index [M][R] is true if register R is allowed to hold a value of mode M. */
-+bool riscv_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
-+
-+/* riscv_lo_relocs[X] is the relocation to use when a symbol of type X
-+ appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
-+ if they are matched by a special .md file pattern. */
-+const char *riscv_lo_relocs[NUM_SYMBOL_TYPES];
-+
-+/* Likewise for HIGHs. */
-+const char *riscv_hi_relocs[NUM_SYMBOL_TYPES];
-+
-+/* Index R is the smallest register class that contains register R. */
-+const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
-+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
-+ GR_REGS, T_REGS, T_REGS, T_REGS,
-+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
-+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
-+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
-+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
-+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
-+ T_REGS, T_REGS, T_REGS, T_REGS,
-+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
-+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
-+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
-+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
-+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
-+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
-+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
-+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
-+ FRAME_REGS, FRAME_REGS,
-+};
-+
-+/* Costs to use when optimizing for size. */
-+static const struct riscv_tune_info rocket_tune_info = {
-+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
-+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
-+ {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
-+ {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
-+ {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
-+ 1, /* issue_rate */
-+ 3, /* branch_cost */
-+ 5 /* memory_cost */
-+};
-+
-+/* Costs to use when optimizing for size. */
-+static const struct riscv_tune_info optimize_size_tune_info = {
-+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
-+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
-+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
-+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
-+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
-+ 1, /* issue_rate */
-+ 1, /* branch_cost */
-+ 1 /* memory_cost */
-+};
-+
-+/* A table describing all the processors GCC knows about. */
-+static const struct riscv_cpu_info riscv_cpu_info_table[] = {
-+ /* Entries for generic ISAs. */
-+ { "rocket", "IMAFD", &rocket_tune_info },
-+};
-+
-+/* Return the riscv_cpu_info entry for the given name string. */
-+
-+static const struct riscv_cpu_info *
-+riscv_parse_cpu (const char *cpu_string)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
-+ if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
-+ return riscv_cpu_info_table + i;
-+
-+ error ("unknown cpu `%s'", cpu_string);
-+ return riscv_cpu_info_table;
-+}
-+
-+/* Fill CODES with a sequence of rtl operations to load VALUE.
-+ Return the number of operations needed. */
-+
-+static int
-+riscv_build_integer_1 (struct riscv_integer_op *codes, HOST_WIDE_INT value,
-+ enum machine_mode mode)
-+{
-+ HOST_WIDE_INT low_part = CONST_LOW_PART (value);
-+ int cost = INT_MAX, alt_cost;
-+ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
-+
-+ if (SMALL_OPERAND (value) || LUI_OPERAND (value))
-+ {
-+ /* Simply ADDI or LUI */
-+ codes[0].code = UNKNOWN;
-+ codes[0].value = value;
-+ return 1;
-+ }
-+
-+ /* End with ADDI */
-+ if (low_part != 0
-+ && !(mode == HImode && (int16_t)(value - low_part) != (value - low_part)))
-+ {
-+ cost = 1 + riscv_build_integer_1 (codes, value - low_part, mode);
-+ codes[cost-1].code = PLUS;
-+ codes[cost-1].value = low_part;
-+ }
-+
-+ /* End with XORI */
-+ if (cost > 2 && (low_part < 0 || mode == HImode))
-+ {
-+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
-+ alt_codes[alt_cost-1].code = XOR;
-+ alt_codes[alt_cost-1].value = low_part;
-+ if (alt_cost < cost)
-+ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
-+ }
-+
-+ /* Eliminate trailing zeros and end with SLLI */
-+ if (cost > 2 && (value & 1) == 0)
-+ {
-+ int shift = 0;
-+ while ((value & 1) == 0)
-+ shift++, value >>= 1;
-+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value, mode);
-+ alt_codes[alt_cost-1].code = ASHIFT;
-+ alt_codes[alt_cost-1].value = shift;
-+ if (alt_cost < cost)
-+ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
-+ }
-+
-+ gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
-+ return cost;
-+}
-+
-+static int
-+riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
-+ enum machine_mode mode)
-+{
-+ int cost = riscv_build_integer_1 (codes, value, mode);
-+
-+ /* Eliminate leading zeros and end with SRLI */
-+ if (value > 0 && cost > 2)
-+ {
-+ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
-+ int alt_cost, shift = 0;
-+ HOST_WIDE_INT shifted_val;
-+
-+ /* Try filling trailing bits with 1s */
-+ while ((value << shift) >= 0)
-+ shift++;
-+ shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
-+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
-+ alt_codes[alt_cost-1].code = LSHIFTRT;
-+ alt_codes[alt_cost-1].value = shift;
-+ if (alt_cost < cost)
-+ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
-+
-+ /* Try filling trailing bits with 0s */
-+ shifted_val = value << shift;
-+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
-+ alt_codes[alt_cost-1].code = LSHIFTRT;
-+ alt_codes[alt_cost-1].value = shift;
-+ if (alt_cost < cost)
-+ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
-+ }
-+
-+ return cost;
-+}
-+
-+static int
-+riscv_split_integer_cost (HOST_WIDE_INT val)
-+{
-+ int cost;
-+ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
-+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
-+
-+ cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
-+ if (loval != hival)
-+ cost += riscv_build_integer (codes, hival, VOIDmode);
-+
-+ return cost;
-+}
-+
-+static int
-+riscv_integer_cost (HOST_WIDE_INT val)
-+{
-+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
-+ return MIN (riscv_build_integer (codes, val, VOIDmode),
-+ riscv_split_integer_cost (val));
-+}
-+
-+/* Try to split a 64b integer into 32b parts, then reassemble. */
-+
-+static rtx
-+riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
-+{
-+ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
-+ rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
-+
-+ riscv_move_integer (hi, hi, hival);
-+ riscv_move_integer (lo, lo, loval);
-+
-+ hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
-+ hi = force_reg (mode, hi);
-+
-+ return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
-+}
-+
-+/* Return true if X is a thread-local symbol. */
-+
-+static bool
-+riscv_tls_symbol_p (const_rtx x)
-+{
-+ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
-+}
-+
-+static bool
-+riscv_symbol_binds_local_p (const_rtx x)
-+{
-+ return (SYMBOL_REF_DECL (x)
-+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
-+ : SYMBOL_REF_LOCAL_P (x));
-+}
-+
-+/* Return the method that should be used to access SYMBOL_REF or
-+ LABEL_REF X in context CONTEXT. */
-+
-+static enum riscv_symbol_type
-+riscv_classify_symbol (const_rtx x)
-+{
-+ if (riscv_tls_symbol_p (x))
-+ return SYMBOL_TLS;
-+
-+ if (GET_CODE (x) == LABEL_REF)
-+ {
-+ if (LABEL_REF_NONLOCAL_P (x))
-+ return SYMBOL_GOT_DISP;
-+ return SYMBOL_ABSOLUTE;
-+ }
-+
-+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
-+
-+ if (flag_pic && !riscv_symbol_binds_local_p (x))
-+ return SYMBOL_GOT_DISP;
-+
-+ return SYMBOL_ABSOLUTE;
-+}
-+
-+/* Classify the base of symbolic expression X, given that X appears in
-+ context CONTEXT. */
-+
-+static enum riscv_symbol_type
-+riscv_classify_symbolic_expression (rtx x)
-+{
-+ rtx offset;
-+
-+ split_const (x, &x, &offset);
-+ if (UNSPEC_ADDRESS_P (x))
-+ return UNSPEC_ADDRESS_TYPE (x);
-+
-+ return riscv_classify_symbol (x);
-+}
-+
-+/* Return true if X is a symbolic constant that can be used in context
-+ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
-+
-+bool
-+riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
-+{
-+ rtx offset;
-+
-+ split_const (x, &x, &offset);
-+ if (UNSPEC_ADDRESS_P (x))
-+ {
-+ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
-+ x = UNSPEC_ADDRESS (x);
-+ }
-+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
-+ *symbol_type = riscv_classify_symbol (x);
-+ else
-+ return false;
-+
-+ if (offset == const0_rtx)
-+ return true;
-+
-+ /* Check whether a nonzero offset is valid for the underlying
-+ relocations. */
-+ switch (*symbol_type)
-+ {
-+ case SYMBOL_ABSOLUTE:
-+ case SYMBOL_TLS_LE:
-+ return (int32_t) INTVAL (offset) == INTVAL (offset);
-+
-+ default:
-+ return false;
-+ }
-+ gcc_unreachable ();
-+}
-+
-+/* Returns the number of instructions necessary to reference a symbol. */
-+
-+static int riscv_symbol_insns (enum riscv_symbol_type type)
-+{
-+ switch (type)
-+ {
-+ case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
-+ case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference itself */
-+ case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference itself */
-+ case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference itself */
-+ default: gcc_unreachable();
-+ }
-+}
-+
-+/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
-+
-+static bool
-+riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
-+{
-+ return riscv_const_insns (x) > 0;
-+}
-+
-+/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
-+
-+static bool
-+riscv_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
-+{
-+ enum riscv_symbol_type type;
-+ rtx base, offset;
-+
-+ /* There is no assembler syntax for expressing an address-sized
-+ high part. */
-+ if (GET_CODE (x) == HIGH)
-+ return true;
-+
-+ split_const (x, &base, &offset);
-+ if (riscv_symbolic_constant_p (base, &type))
-+ {
-+ /* As an optimization, don't spill symbolic constants that are as
-+ cheap to rematerialize as to access in the constant pool. */
-+ if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
-+ return true;
-+
-+ /* As an optimization, avoid needlessly generate dynamic relocations. */
-+ if (flag_pic)
-+ return true;
-+ }
-+
-+ /* TLS symbols must be computed by riscv_legitimize_move. */
-+ if (tls_referenced_p (x))
-+ return true;
-+
-+ return false;
-+}
-+
-+/* Return true if register REGNO is a valid base register for mode MODE.
-+ STRICT_P is true if REG_OK_STRICT is in effect. */
-+
-+int
-+riscv_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
-+ bool strict_p)
-+{
-+ if (!HARD_REGISTER_NUM_P (regno))
-+ {
-+ if (!strict_p)
-+ return true;
-+ regno = reg_renumber[regno];
-+ }
-+
-+ /* These fake registers will be eliminated to either the stack or
-+ hard frame pointer, both of which are usually valid base registers.
-+ Reload deals with the cases where the eliminated form isn't valid. */
-+ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
-+ return true;
-+
-+ return GP_REG_P (regno);
-+}
-+
-+/* Return true if X is a valid base register for mode MODE.
-+ STRICT_P is true if REG_OK_STRICT is in effect. */
-+
-+static bool
-+riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
-+{
-+ if (!strict_p && GET_CODE (x) == SUBREG)
-+ x = SUBREG_REG (x);
-+
-+ return (REG_P (x)
-+ && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
-+}
-+
-+/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
-+ can address a value of mode MODE. */
-+
-+static bool
-+riscv_valid_offset_p (rtx x, enum machine_mode mode)
-+{
-+ /* Check that X is a signed 12-bit number. */
-+ if (!const_arith_operand (x, Pmode))
-+ return false;
-+
-+ /* We may need to split multiword moves, so make sure that every word
-+ is accessible. */
-+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
-+ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
-+ return false;
-+
-+ return true;
-+}
-+
-+/* Return true if a LO_SUM can address a value of mode MODE when the
-+ LO_SUM symbol has type SYMBOL_TYPE. */
-+
-+static bool
-+riscv_valid_lo_sum_p (enum riscv_symbol_type symbol_type, enum machine_mode mode)
-+{
-+ /* Check that symbols of type SYMBOL_TYPE can be used to access values
-+ of mode MODE. */
-+ if (riscv_symbol_insns (symbol_type) == 0)
-+ return false;
-+
-+ /* Check that there is a known low-part relocation. */
-+ if (riscv_lo_relocs[symbol_type] == NULL)
-+ return false;
-+
-+ /* We may need to split multiword moves, so make sure that each word
-+ can be accessed without inducing a carry. This is mainly needed
-+ for o64, which has historically only guaranteed 64-bit alignment
-+ for 128-bit types. */
-+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
-+ && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
-+ return false;
-+
-+ return true;
-+}
-+
-+/* Return true if X is a valid address for machine mode MODE. If it is,
-+ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
-+ effect. */
-+
-+static bool
-+riscv_classify_address (struct riscv_address_info *info, rtx x,
-+ enum machine_mode mode, bool strict_p)
-+{
-+ switch (GET_CODE (x))
-+ {
-+ case REG:
-+ case SUBREG:
-+ info->type = ADDRESS_REG;
-+ info->reg = x;
-+ info->offset = const0_rtx;
-+ return riscv_valid_base_register_p (info->reg, mode, strict_p);
-+
-+ case PLUS:
-+ info->type = ADDRESS_REG;
-+ info->reg = XEXP (x, 0);
-+ info->offset = XEXP (x, 1);
-+ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
-+ && riscv_valid_offset_p (info->offset, mode));
-+
-+ case LO_SUM:
-+ info->type = ADDRESS_LO_SUM;
-+ info->reg = XEXP (x, 0);
-+ info->offset = XEXP (x, 1);
-+ /* We have to trust the creator of the LO_SUM to do something vaguely
-+ sane. Target-independent code that creates a LO_SUM should also
-+ create and verify the matching HIGH. Target-independent code that
-+ adds an offset to a LO_SUM must prove that the offset will not
-+ induce a carry. Failure to do either of these things would be
-+ a bug, and we are not required to check for it here. The RISCV
-+ backend itself should only create LO_SUMs for valid symbolic
-+ constants, with the high part being either a HIGH or a copy
-+ of _gp. */
-+ info->symbol_type
-+ = riscv_classify_symbolic_expression (info->offset);
-+ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
-+ && riscv_valid_lo_sum_p (info->symbol_type, mode));
-+
-+ case CONST_INT:
-+ /* Small-integer addresses don't occur very often, but they
-+ are legitimate if $0 is a valid base register. */
-+ info->type = ADDRESS_CONST_INT;
-+ return SMALL_OPERAND (INTVAL (x));
-+
-+ default:
-+ return false;
-+ }
-+}
-+
-+/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
-+
-+static bool
-+riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
-+{
-+ struct riscv_address_info addr;
-+
-+ return riscv_classify_address (&addr, x, mode, strict_p);
-+}
-+
-+/* Return the number of instructions needed to load or store a value
-+ of mode MODE at address X. Return 0 if X isn't valid for MODE.
-+ Assume that multiword moves may need to be split into word moves
-+ if MIGHT_SPLIT_P, otherwise assume that a single load or store is
-+ enough. */
-+
-+int
-+riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
-+{
-+ struct riscv_address_info addr;
-+ int n = 1;
-+
-+ if (!riscv_classify_address (&addr, x, mode, false))
-+ return 0;
-+
-+ /* BLKmode is used for single unaligned loads and stores and should
-+ not count as a multiword mode. */
-+ if (mode != BLKmode && might_split_p)
-+ n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
-+
-+ if (addr.type == ADDRESS_LO_SUM)
-+ n += riscv_symbol_insns (addr.symbol_type) - 1;
-+
-+ return n;
-+}
-+
-+/* Return the number of instructions needed to load constant X.
-+ Return 0 if X isn't a valid constant. */
-+
-+int
-+riscv_const_insns (rtx x)
-+{
-+ enum riscv_symbol_type symbol_type;
-+ rtx offset;
-+
-+ switch (GET_CODE (x))
-+ {
-+ case HIGH:
-+ if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
-+ || !riscv_hi_relocs[symbol_type])
-+ return 0;
-+
-+ /* This is simply an LUI. */
-+ return 1;
-+
-+ case CONST_INT:
-+ {
-+ int cost = riscv_integer_cost (INTVAL (x));
-+ /* Force complicated constants to memory. */
-+ return cost < 4 ? cost : 0;
-+ }
-+
-+ case CONST_DOUBLE:
-+ case CONST_VECTOR:
-+ /* Allow zeros for normal mode, where we can use x0. */
-+ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
-+
-+ case CONST:
-+ /* See if we can refer to X directly. */
-+ if (riscv_symbolic_constant_p (x, &symbol_type))
-+ return riscv_symbol_insns (symbol_type);
-+
-+ /* Otherwise try splitting the constant into a base and offset. */
-+ split_const (x, &x, &offset);
-+ if (offset != 0)
-+ {
-+ int n = riscv_const_insns (x);
-+ if (n != 0)
-+ return n + riscv_integer_cost (INTVAL (offset));
-+ }
-+ return 0;
-+
-+ case SYMBOL_REF:
-+ case LABEL_REF:
-+ return riscv_symbol_insns (riscv_classify_symbol (x));
-+
-+ default:
-+ return 0;
-+ }
-+}
-+
-+/* X is a doubleword constant that can be handled by splitting it into
-+ two words and loading each word separately. Return the number of
-+ instructions required to do this. */
-+
-+int
-+riscv_split_const_insns (rtx x)
-+{
-+ unsigned int low, high;
-+
-+ low = riscv_const_insns (riscv_subword (x, false));
-+ high = riscv_const_insns (riscv_subword (x, true));
-+ gcc_assert (low > 0 && high > 0);
-+ return low + high;
-+}
-+
-+/* Return the number of instructions needed to implement INSN,
-+ given that it loads from or stores to MEM. */
-+
-+int
-+riscv_load_store_insns (rtx mem, rtx_insn *insn)
-+{
-+ enum machine_mode mode;
-+ bool might_split_p;
-+ rtx set;
-+
-+ gcc_assert (MEM_P (mem));
-+ mode = GET_MODE (mem);
-+
-+ /* Try to prove that INSN does not need to be split. */
-+ might_split_p = true;
-+ if (GET_MODE_BITSIZE (mode) == 64)
-+ {
-+ set = single_set (insn);
-+ if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
-+ might_split_p = false;
-+ }
-+
-+ return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
-+}
-+
-+/* Emit a move from SRC to DEST. Assume that the move expanders can
-+ handle all moves if !can_create_pseudo_p (). The distinction is
-+ important because, unlike emit_move_insn, the move expanders know
-+ how to force Pmode objects into the constant pool even when the
-+ constant pool address is not itself legitimate. */
-+
-+rtx
-+riscv_emit_move (rtx dest, rtx src)
-+{
-+ return (can_create_pseudo_p ()
-+ ? emit_move_insn (dest, src)
-+ : emit_move_insn_1 (dest, src));
-+}
-+
-+/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
-+
-+static void
-+riscv_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
-+{
-+ emit_insn (gen_rtx_SET (VOIDmode, target,
-+ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
-+}
-+
-+/* Compute (CODE OP0 OP1) and store the result in a new register
-+ of mode MODE. Return that new register. */
-+
-+static rtx
-+riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
-+{
-+ rtx reg;
-+
-+ reg = gen_reg_rtx (mode);
-+ riscv_emit_binary (code, reg, op0, op1);
-+ return reg;
-+}
-+
-+/* Copy VALUE to a register and return that register. If new pseudos
-+ are allowed, copy it into a new register, otherwise use DEST. */
-+
-+static rtx
-+riscv_force_temporary (rtx dest, rtx value)
-+{
-+ if (can_create_pseudo_p ())
-+ return force_reg (Pmode, value);
-+ else
-+ {
-+ riscv_emit_move (dest, value);
-+ return dest;
-+ }
-+}
-+
-+/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
-+ then add CONST_INT OFFSET to the result. */
-+
-+static rtx
-+riscv_unspec_address_offset (rtx base, rtx offset,
-+ enum riscv_symbol_type symbol_type)
-+{
-+ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
-+ UNSPEC_ADDRESS_FIRST + symbol_type);
-+ if (offset != const0_rtx)
-+ base = gen_rtx_PLUS (Pmode, base, offset);
-+ return gen_rtx_CONST (Pmode, base);
-+}
-+
-+/* Return an UNSPEC address with underlying address ADDRESS and symbol
-+ type SYMBOL_TYPE. */
-+
-+rtx
-+riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
-+{
-+ rtx base, offset;
-+
-+ split_const (address, &base, &offset);
-+ return riscv_unspec_address_offset (base, offset, symbol_type);
-+}
-+
-+/* If OP is an UNSPEC address, return the address to which it refers,
-+ otherwise return OP itself. */
-+
-+static rtx
-+riscv_strip_unspec_address (rtx op)
-+{
-+ rtx base, offset;
-+
-+ split_const (op, &base, &offset);
-+ if (UNSPEC_ADDRESS_P (base))
-+ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
-+ return op;
-+}
-+
-+/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
-+ high part to BASE and return the result. Just return BASE otherwise.
-+ TEMP is as for riscv_force_temporary.
-+
-+ The returned expression can be used as the first operand to a LO_SUM. */
-+
-+static rtx
-+riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
-+{
-+ addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
-+ return riscv_force_temporary (temp, addr);
-+}
-+
-+/* Load an entry from the GOT. */
-+static rtx riscv_got_load_tls_gd(rtx dest, rtx sym)
-+{
-+ return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym));
-+}
-+
-+static rtx riscv_got_load_tls_ie(rtx dest, rtx sym)
-+{
-+ return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym));
-+}
-+
-+static rtx riscv_tls_add_tp_le(rtx dest, rtx base, rtx sym)
-+{
-+ rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
-+ return (Pmode == DImode ? gen_tls_add_tp_ledi(dest, base, tp, sym) : gen_tls_add_tp_lesi(dest, base, tp, sym));
-+}
-+
-+/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
-+ it appears in a MEM of that mode. Return true if ADDR is a legitimate
-+ constant in that context and can be split into high and low parts.
-+ If so, and if LOW_OUT is nonnull, emit the high part and store the
-+ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
-+
-+ TEMP is as for riscv_force_temporary and is used to load the high
-+ part into a register.
-+
-+ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
-+ a legitimize SET_SRC for an .md pattern, otherwise the low part
-+ is guaranteed to be a legitimate address for mode MODE. */
-+
-+bool
-+riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
-+{
-+ enum riscv_symbol_type symbol_type;
-+ rtx high;
-+
-+ if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
-+ || !riscv_symbolic_constant_p (addr, &symbol_type)
-+ || riscv_symbol_insns (symbol_type) == 0
-+ || !riscv_hi_relocs[symbol_type])
-+ return false;
-+
-+ if (low_out)
-+ {
-+ switch (symbol_type)
-+ {
-+ case SYMBOL_ABSOLUTE:
-+ high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
-+ high = riscv_force_temporary (temp, high);
-+ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
-+ break;
-+
-+ default:
-+ gcc_unreachable ();
-+ }
-+ }
-+
-+ return true;
-+}
-+
-+/* Return a legitimate address for REG + OFFSET. TEMP is as for
-+ riscv_force_temporary; it is only needed when OFFSET is not a
-+ SMALL_OPERAND. */
-+
-+static rtx
-+riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
-+{
-+ if (!SMALL_OPERAND (offset))
-+ {
-+ rtx high;
-+
-+ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
-+ The addition inside the macro CONST_HIGH_PART may cause an
-+ overflow, so we need to force a sign-extension check. */
-+ high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
-+ offset = CONST_LOW_PART (offset);
-+ high = riscv_force_temporary (temp, high);
-+ reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
-+ }
-+ return plus_constant (Pmode, reg, offset);
-+}
-+
-+/* The __tls_get_attr symbol. */
-+static GTY(()) rtx riscv_tls_symbol;
-+
-+/* Return an instruction sequence that calls __tls_get_addr. SYM is
-+ the TLS symbol we are referencing and TYPE is the symbol type to use
-+ (either global dynamic or local dynamic). RESULT is an RTX for the
-+ return value location. */
-+
-+static rtx
-+riscv_call_tls_get_addr (rtx sym, rtx result)
-+{
-+ rtx insn, a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
-+
-+ if (!riscv_tls_symbol)
-+ riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
-+
-+ start_sequence ();
-+
-+ emit_insn (riscv_got_load_tls_gd (a0, sym));
-+ insn = riscv_expand_call (false, result, riscv_tls_symbol, const0_rtx);
-+ RTL_CONST_CALL_P (insn) = 1;
-+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
-+ insn = get_insns ();
-+
-+ end_sequence ();
-+
-+ return insn;
-+}
-+
-+/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
-+ its address. The return value will be both a valid address and a valid
-+ SET_SRC (either a REG or a LO_SUM). */
-+
-+static rtx
-+riscv_legitimize_tls_address (rtx loc)
-+{
-+ rtx dest, insn, tp, tmp1;
-+ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
-+
-+ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
-+ if (!flag_pic)
-+ model = TLS_MODEL_LOCAL_EXEC;
-+
-+ switch (model)
-+ {
-+ case TLS_MODEL_LOCAL_DYNAMIC:
-+ /* Rely on section anchors for the optimization that LDM TLS
-+ provides. The anchor's address is loaded with GD TLS. */
-+ case TLS_MODEL_GLOBAL_DYNAMIC:
-+ tmp1 = gen_rtx_REG (Pmode, GP_RETURN);
-+ insn = riscv_call_tls_get_addr (loc, tmp1);
-+ dest = gen_reg_rtx (Pmode);
-+ emit_libcall_block (insn, dest, tmp1, loc);
-+ break;
-+
-+ case TLS_MODEL_INITIAL_EXEC:
-+ /* la.tls.ie; tp-relative add */
-+ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
-+ tmp1 = gen_reg_rtx (Pmode);
-+ emit_insn (riscv_got_load_tls_ie (tmp1, loc));
-+ dest = gen_reg_rtx (Pmode);
-+ emit_insn (gen_add3_insn (dest, tmp1, tp));
-+ break;
-+
-+ case TLS_MODEL_LOCAL_EXEC:
-+ tmp1 = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
-+ dest = gen_reg_rtx (Pmode);
-+ emit_insn (riscv_tls_add_tp_le (dest, tmp1, loc));
-+ dest = gen_rtx_LO_SUM (Pmode, dest,
-+ riscv_unspec_address (loc, SYMBOL_TLS_LE));
-+ break;
-+
-+ default:
-+ gcc_unreachable ();
-+ }
-+ return dest;
-+}
-+
-+/* If X is not a valid address for mode MODE, force it into a register. */
-+
-+static rtx
-+riscv_force_address (rtx x, enum machine_mode mode)
-+{
-+ if (!riscv_legitimate_address_p (mode, x, false))
-+ x = force_reg (Pmode, x);
-+ return x;
-+}
-+
-+/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
-+ be legitimized in a way that the generic machinery might not expect,
-+ return a new address, otherwise return NULL. MODE is the mode of
-+ the memory being accessed. */
-+
-+static rtx
-+riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
-+ enum machine_mode mode)
-+{
-+ rtx addr;
-+
-+ if (riscv_tls_symbol_p (x))
-+ return riscv_legitimize_tls_address (x);
-+
-+ /* See if the address can split into a high part and a LO_SUM. */
-+ if (riscv_split_symbol (NULL, x, mode, &addr))
-+ return riscv_force_address (addr, mode);
-+
-+ /* Handle BASE + OFFSET using riscv_add_offset. */
-+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
-+ && INTVAL (XEXP (x, 1)) != 0)
-+ {
-+ rtx base = XEXP (x, 0);
-+ HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
-+
-+ if (!riscv_valid_base_register_p (base, mode, false))
-+ base = copy_to_mode_reg (Pmode, base);
-+ addr = riscv_add_offset (NULL, base, offset);
-+ return riscv_force_address (addr, mode);
-+ }
-+
-+ return x;
-+}
-+
-+/* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
-+
-+void
-+riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
-+{
-+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
-+ enum machine_mode mode;
-+ int i, num_ops;
-+ rtx x;
-+
-+ mode = GET_MODE (dest);
-+ num_ops = riscv_build_integer (codes, value, mode);
-+
-+ if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
-+ && num_ops >= riscv_split_integer_cost (value))
-+ x = riscv_split_integer (value, mode);
-+ else
-+ {
-+ /* Apply each binary operation to X. */
-+ x = GEN_INT (codes[0].value);
-+
-+ for (i = 1; i < num_ops; i++)
-+ {
-+ if (!can_create_pseudo_p ())
-+ {
-+ emit_insn (gen_rtx_SET (VOIDmode, temp, x));
-+ x = temp;
-+ }
-+ else
-+ x = force_reg (mode, x);
-+
-+ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
-+ }
-+ }
-+
-+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
-+}
-+
-+/* Subroutine of riscv_legitimize_move. Move constant SRC into register
-+ DEST given that SRC satisfies immediate_operand but doesn't satisfy
-+ move_operand. */
-+
-+static void
-+riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
-+{
-+ rtx base, offset;
-+
-+ /* Split moves of big integers into smaller pieces. */
-+ if (splittable_const_int_operand (src, mode))
-+ {
-+ riscv_move_integer (dest, dest, INTVAL (src));
-+ return;
-+ }
-+
-+ /* Split moves of symbolic constants into high/low pairs. */
-+ if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
-+ {
-+ emit_insn (gen_rtx_SET (VOIDmode, dest, src));
-+ return;
-+ }
-+
-+ /* Generate the appropriate access sequences for TLS symbols. */
-+ if (riscv_tls_symbol_p (src))
-+ {
-+ riscv_emit_move (dest, riscv_legitimize_tls_address (src));
-+ return;
-+ }
-+
-+ /* If we have (const (plus symbol offset)), and that expression cannot
-+ be forced into memory, load the symbol first and add in the offset. Also
-+ prefer to do this even if the constant _can_ be forced into memory, as it
-+ usually produces better code. */
-+ split_const (src, &base, &offset);
-+ if (offset != const0_rtx
-+ && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
-+ {
-+ base = riscv_force_temporary (dest, base);
-+ riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
-+ return;
-+ }
-+
-+ src = force_const_mem (mode, src);
-+
-+ /* When using explicit relocs, constant pool references are sometimes
-+ not legitimate addresses. */
-+ riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
-+ riscv_emit_move (dest, src);
-+}
-+
-+/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
-+ sequence that is valid. */
-+
-+bool
-+riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
-+{
-+ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
-+ {
-+ riscv_emit_move (dest, force_reg (mode, src));
-+ return true;
-+ }
-+
-+ /* We need to deal with constants that would be legitimate
-+ immediate_operands but aren't legitimate move_operands. */
-+ if (CONSTANT_P (src) && !move_operand (src, mode))
-+ {
-+ riscv_legitimize_const_move (mode, dest, src);
-+ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* Return true if there is an instruction that implements CODE and accepts
-+ X as an immediate operand. */
-+
-+static int
-+riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
-+{
-+ switch (code)
-+ {
-+ case ASHIFT:
-+ case ASHIFTRT:
-+ case LSHIFTRT:
-+ /* All shift counts are truncated to a valid constant. */
-+ return true;
-+
-+ case AND:
-+ case IOR:
-+ case XOR:
-+ case PLUS:
-+ case LT:
-+ case LTU:
-+ /* These instructions take 12-bit signed immediates. */
-+ return SMALL_OPERAND (x);
-+
-+ case LE:
-+ /* We add 1 to the immediate and use SLT. */
-+ return SMALL_OPERAND (x + 1);
-+
-+ case LEU:
-+ /* Likewise SLTU, but reject the always-true case. */
-+ return SMALL_OPERAND (x + 1) && x + 1 != 0;
-+
-+ case GE:
-+ case GEU:
-+ /* We can emulate an immediate of 1 by using GT/GTU against x0. */
-+ return x == 1;
-+
-+ default:
-+ /* By default assume that x0 can be used for 0. */
-+ return x == 0;
-+ }
-+}
-+
-+/* Return the cost of binary operation X, given that the instruction
-+ sequence for a word-sized or smaller operation takes SIGNLE_INSNS
-+ instructions and that the sequence of a double-word operation takes
-+ DOUBLE_INSNS instructions. */
-+
-+static int
-+riscv_binary_cost (rtx x, int single_insns, int double_insns)
-+{
-+ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
-+ return COSTS_N_INSNS (double_insns);
-+ return COSTS_N_INSNS (single_insns);
-+}
-+
-+/* Return the cost of sign-extending OP to mode MODE, not including the
-+ cost of OP itself. */
-+
-+static int
-+riscv_sign_extend_cost (enum machine_mode mode, rtx op)
-+{
-+ if (MEM_P (op))
-+ /* Extended loads are as cheap as unextended ones. */
-+ return 0;
-+
-+ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
-+ /* A sign extension from SImode to DImode in 64-bit mode is free. */
-+ return 0;
-+
-+ /* We need to use a shift left and a shift right. */
-+ return COSTS_N_INSNS (2);
-+}
-+
-+/* Return the cost of zero-extending OP to mode MODE, not including the
-+ cost of OP itself. */
-+
-+static int
-+riscv_zero_extend_cost (enum machine_mode mode, rtx op)
-+{
-+ if (MEM_P (op))
-+ /* Extended loads are as cheap as unextended ones. */
-+ return 0;
-+
-+ if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ||
-+ ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode))
-+ /* We need a shift left by 32 bits and a shift right by 32 bits. */
-+ return COSTS_N_INSNS (2);
-+
-+ /* We can use ANDI. */
-+ return COSTS_N_INSNS (1);
-+}
-+
-+/* Implement TARGET_RTX_COSTS. */
-+
-+static bool
-+riscv_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
-+ int *total, bool speed)
-+{
-+ enum machine_mode mode = GET_MODE (x);
-+ bool float_mode_p = FLOAT_MODE_P (mode);
-+ int cost;
-+
-+ switch (code)
-+ {
-+ case CONST_INT:
-+ if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
-+ {
-+ *total = 0;
-+ return true;
-+ }
-+ /* Fall through. */
-+
-+ case SYMBOL_REF:
-+ case LABEL_REF:
-+ case CONST_DOUBLE:
-+ case CONST:
-+ if (speed)
-+ *total = 1;
-+ else if ((cost = riscv_const_insns (x)) > 0)
-+ *total = COSTS_N_INSNS (cost);
-+ else /* The instruction will be fetched from the constant pool. */
-+ *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
-+ return true;
-+
-+ case MEM:
-+ /* If the address is legitimate, return the number of
-+ instructions it needs. */
-+ if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
-+ {
-+ *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
-+ return true;
-+ }
-+ /* Otherwise use the default handling. */
-+ return false;
-+
-+ case NOT:
-+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
-+ return false;
-+
-+ case AND:
-+ case IOR:
-+ case XOR:
-+ /* Double-word operations use two single-word operations. */
-+ *total = riscv_binary_cost (x, 1, 2);
-+ return false;
-+
-+ case ASHIFT:
-+ case ASHIFTRT:
-+ case LSHIFTRT:
-+ *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
-+ return false;
-+
-+ case ABS:
-+ *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
-+ return false;
-+
-+ case LO_SUM:
-+ *total = set_src_cost (XEXP (x, 0), speed);
-+ return true;
-+
-+ case LT:
-+ case LTU:
-+ case LE:
-+ case LEU:
-+ case GT:
-+ case GTU:
-+ case GE:
-+ case GEU:
-+ case EQ:
-+ case NE:
-+ case UNORDERED:
-+ case LTGT:
-+ /* Branch comparisons have VOIDmode, so use the first operand's
-+ mode instead. */
-+ mode = GET_MODE (XEXP (x, 0));
-+ if (float_mode_p)
-+ *total = tune_info->fp_add[mode == DFmode];
-+ else
-+ *total = riscv_binary_cost (x, 1, 3);
-+ return false;
-+
-+ case MINUS:
-+ if (float_mode_p
-+ && !HONOR_NANS (mode)
-+ && !HONOR_SIGNED_ZEROS (mode))
-+ {
-+ /* See if we can use NMADD or NMSUB. See riscv.md for the
-+ associated patterns. */
-+ rtx op0 = XEXP (x, 0);
-+ rtx op1 = XEXP (x, 1);
-+ if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
-+ {
-+ *total = (tune_info->fp_mul[mode == DFmode]
-+ + set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
-+ + set_src_cost (XEXP (op0, 1), speed)
-+ + set_src_cost (op1, speed));
-+ return true;
-+ }
-+ if (GET_CODE (op1) == MULT)
-+ {
-+ *total = (tune_info->fp_mul[mode == DFmode]
-+ + set_src_cost (op0, speed)
-+ + set_src_cost (XEXP (op1, 0), speed)
-+ + set_src_cost (XEXP (op1, 1), speed));
-+ return true;
-+ }
-+ }
-+ /* Fall through. */
-+
-+ case PLUS:
-+ if (float_mode_p)
-+ *total = tune_info->fp_add[mode == DFmode];
-+ else
-+ *total = riscv_binary_cost (x, 1, 4);
-+ return false;
-+
-+ case NEG:
-+ if (float_mode_p
-+ && !HONOR_NANS (mode)
-+ && HONOR_SIGNED_ZEROS (mode))
-+ {
-+ /* See if we can use NMADD or NMSUB. See riscv.md for the
-+ associated patterns. */
-+ rtx op = XEXP (x, 0);
-+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
-+ && GET_CODE (XEXP (op, 0)) == MULT)
-+ {
-+ *total = (tune_info->fp_mul[mode == DFmode]
-+ + set_src_cost (XEXP (XEXP (op, 0), 0), speed)
-+ + set_src_cost (XEXP (XEXP (op, 0), 1), speed)
-+ + set_src_cost (XEXP (op, 1), speed));
-+ return true;
-+ }
-+ }
-+
-+ if (float_mode_p)
-+ *total = tune_info->fp_add[mode == DFmode];
-+ else
-+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
-+ return false;
-+
-+ case MULT:
-+ if (float_mode_p)
-+ *total = tune_info->fp_mul[mode == DFmode];
-+ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
-+ *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
-+ else if (!speed)
-+ *total = COSTS_N_INSNS (1);
-+ else
-+ *total = tune_info->int_mul[mode == DImode];
-+ return false;
-+
-+ case DIV:
-+ case SQRT:
-+ case MOD:
-+ if (float_mode_p)
-+ {
-+ *total = tune_info->fp_div[mode == DFmode];
-+ return false;
-+ }
-+ /* Fall through. */
-+
-+ case UDIV:
-+ case UMOD:
-+ if (speed)
-+ *total = tune_info->int_div[mode == DImode];
-+ else
-+ *total = COSTS_N_INSNS (1);
-+ return false;
-+
-+ case SIGN_EXTEND:
-+ *total = riscv_sign_extend_cost (mode, XEXP (x, 0));
-+ return false;
-+
-+ case ZERO_EXTEND:
-+ *total = riscv_zero_extend_cost (mode, XEXP (x, 0));
-+ return false;
-+
-+ case FLOAT:
-+ case UNSIGNED_FLOAT:
-+ case FIX:
-+ case FLOAT_EXTEND:
-+ case FLOAT_TRUNCATE:
-+ *total = tune_info->fp_add[mode == DFmode];
-+ return false;
-+
-+ default:
-+ return false;
-+ }
-+}
-+
-+/* Implement TARGET_ADDRESS_COST. */
-+
-+static int
-+riscv_address_cost (rtx addr, enum machine_mode mode,
-+ addr_space_t as ATTRIBUTE_UNUSED,
-+ bool speed ATTRIBUTE_UNUSED)
-+{
-+ return riscv_address_insns (addr, mode, false);
-+}
-+
-+/* Return one word of double-word value OP. HIGH_P is true to select the
-+ high part or false to select the low part. */
-+
-+rtx
-+riscv_subword (rtx op, bool high_p)
-+{
-+ unsigned int byte;
-+ enum machine_mode mode;
-+
-+ mode = GET_MODE (op);
-+ if (mode == VOIDmode)
-+ mode = TARGET_64BIT ? TImode : DImode;
-+
-+ byte = high_p ? UNITS_PER_WORD : 0;
-+
-+ if (FP_REG_RTX_P (op))
-+ return gen_rtx_REG (word_mode, REGNO (op) + high_p);
-+
-+ if (MEM_P (op))
-+ return adjust_address (op, word_mode, byte);
-+
-+ return simplify_gen_subreg (word_mode, op, mode, byte);
-+}
-+
-+/* Return true if a 64-bit move from SRC to DEST should be split into two. */
-+
-+bool
-+riscv_split_64bit_move_p (rtx dest, rtx src)
-+{
-+ /* All 64b moves are legal in 64b mode. All 64b FPR <-> FPR and
-+ FPR <-> MEM moves are legal in 32b mode, too. Although
-+ FPR <-> GPR moves are not available in general in 32b mode,
-+ we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */
-+ return !(TARGET_64BIT
-+ || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
-+ || (FP_REG_RTX_P (dest) && MEM_P (src))
-+ || (FP_REG_RTX_P (src) && MEM_P (dest))
-+ || (FP_REG_RTX_P(dest) && src == CONST0_RTX(GET_MODE(src))));
-+}
-+
-+/* Split a doubleword move from SRC to DEST. On 32-bit targets,
-+ this function handles 64-bit moves for which riscv_split_64bit_move_p
-+ holds. For 64-bit targets, this function handles 128-bit moves. */
-+
-+void
-+riscv_split_doubleword_move (rtx dest, rtx src)
-+{
-+ rtx low_dest;
-+
-+ /* The operation can be split into two normal moves. Decide in
-+ which order to do them. */
-+ low_dest = riscv_subword (dest, false);
-+ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
-+ {
-+ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
-+ riscv_emit_move (low_dest, riscv_subword (src, false));
-+ }
-+ else
-+ {
-+ riscv_emit_move (low_dest, riscv_subword (src, false));
-+ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
-+ }
-+}
-+
-+/* Return the appropriate instructions to move SRC into DEST. Assume
-+ that SRC is operand 1 and DEST is operand 0. */
-+
-+const char *
-+riscv_output_move (rtx dest, rtx src)
-+{
-+ enum rtx_code dest_code, src_code;
-+ enum machine_mode mode;
-+ bool dbl_p;
-+
-+ dest_code = GET_CODE (dest);
-+ src_code = GET_CODE (src);
-+ mode = GET_MODE (dest);
-+ dbl_p = (GET_MODE_SIZE (mode) == 8);
-+
-+ if (dbl_p && riscv_split_64bit_move_p (dest, src))
-+ return "#";
-+
-+ if (dest_code == REG && GP_REG_P (REGNO (dest)))
-+ {
-+ if (src_code == REG && FP_REG_P (REGNO (src)))
-+ return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
-+
-+ if (src_code == MEM)
-+ switch (GET_MODE_SIZE (mode))
-+ {
-+ case 1: return "lbu\t%0,%1";
-+ case 2: return "lhu\t%0,%1";
-+ case 4: return "lw\t%0,%1";
-+ case 8: return "ld\t%0,%1";
-+ }
-+
-+ if (src_code == CONST_INT)
-+ return "li\t%0,%1";
-+
-+ if (src_code == HIGH)
-+ return "lui\t%0,%h1";
-+
-+ if (symbolic_operand (src, VOIDmode))
-+ switch (riscv_classify_symbolic_expression (src))
-+ {
-+ case SYMBOL_GOT_DISP: return "la\t%0,%1";
-+ case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
-+ default: gcc_unreachable();
-+ }
-+ }
-+ if ((src_code == REG && GP_REG_P (REGNO (src)))
-+ || (src == CONST0_RTX (mode)))
-+ {
-+ if (dest_code == REG)
-+ {
-+ if (GP_REG_P (REGNO (dest)))
-+ return "mv\t%0,%z1";
-+
-+ if (FP_REG_P (REGNO (dest)))
-+ {
-+ if (!dbl_p)
-+ return "fmv.s.x\t%0,%z1";
-+ if (TARGET_64BIT)
-+ return "fmv.d.x\t%0,%z1";
-+ /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
-+ gcc_assert (src == CONST0_RTX (mode));
-+ return "fcvt.d.w\t%0,x0";
-+ }
-+ }
-+ if (dest_code == MEM)
-+ switch (GET_MODE_SIZE (mode))
-+ {
-+ case 1: return "sb\t%z1,%0";
-+ case 2: return "sh\t%z1,%0";
-+ case 4: return "sw\t%z1,%0";
-+ case 8: return "sd\t%z1,%0";
-+ }
-+ }
-+ if (src_code == REG && FP_REG_P (REGNO (src)))
-+ {
-+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
-+ return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
-+
-+ if (dest_code == MEM)
-+ return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
-+ }
-+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
-+ {
-+ if (src_code == MEM)
-+ return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
-+ }
-+ gcc_unreachable ();
-+}
-+
-+/* Return true if CMP1 is a suitable second operand for integer ordering
-+ test CODE. See also the *sCC patterns in riscv.md. */
-+
-+static bool
-+riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
-+{
-+ switch (code)
-+ {
-+ case GT:
-+ case GTU:
-+ return reg_or_0_operand (cmp1, VOIDmode);
-+
-+ case GE:
-+ case GEU:
-+ return cmp1 == const1_rtx;
-+
-+ case LT:
-+ case LTU:
-+ return arith_operand (cmp1, VOIDmode);
-+
-+ case LE:
-+ return sle_operand (cmp1, VOIDmode);
-+
-+ case LEU:
-+ return sleu_operand (cmp1, VOIDmode);
-+
-+ default:
-+ gcc_unreachable ();
-+ }
-+}
-+
-+/* Return true if *CMP1 (of mode MODE) is a valid second operand for
-+ integer ordering test *CODE, or if an equivalent combination can
-+ be formed by adjusting *CODE and *CMP1. When returning true, update
-+ *CODE and *CMP1 with the chosen code and operand, otherwise leave
-+ them alone. */
-+
-+static bool
-+riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
-+ enum machine_mode mode)
-+{
-+ HOST_WIDE_INT plus_one;
-+
-+ if (riscv_int_order_operand_ok_p (*code, *cmp1))
-+ return true;
-+
-+ if (CONST_INT_P (*cmp1))
-+ switch (*code)
-+ {
-+ case LE:
-+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
-+ if (INTVAL (*cmp1) < plus_one)
-+ {
-+ *code = LT;
-+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
-+ return true;
-+ }
-+ break;
-+
-+ case LEU:
-+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
-+ if (plus_one != 0)
-+ {
-+ *code = LTU;
-+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
-+ return true;
-+ }
-+ break;
-+
-+ default:
-+ break;
-+ }
-+ return false;
-+}
-+
-+/* Compare CMP0 and CMP1 using ordering test CODE and store the result
-+ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
-+ is nonnull, it's OK to set TARGET to the inverse of the result and
-+ flip *INVERT_PTR instead. */
-+
-+static void
-+riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
-+ rtx target, rtx cmp0, rtx cmp1)
-+{
-+ enum machine_mode mode;
-+
-+ /* First see if there is a RISCV instruction that can do this operation.
-+ If not, try doing the same for the inverse operation. If that also
-+ fails, force CMP1 into a register and try again. */
-+ mode = GET_MODE (cmp0);
-+ if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
-+ riscv_emit_binary (code, target, cmp0, cmp1);
-+ else
-+ {
-+ enum rtx_code inv_code = reverse_condition (code);
-+ if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
-+ {
-+ cmp1 = force_reg (mode, cmp1);
-+ riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
-+ }
-+ else if (invert_ptr == 0)
-+ {
-+ rtx inv_target;
-+
-+ inv_target = riscv_force_binary (GET_MODE (target),
-+ inv_code, cmp0, cmp1);
-+ riscv_emit_binary (XOR, target, inv_target, const1_rtx);
-+ }
-+ else
-+ {
-+ *invert_ptr = !*invert_ptr;
-+ riscv_emit_binary (inv_code, target, cmp0, cmp1);
-+ }
-+ }
-+}
-+
-+/* Return a register that is zero iff CMP0 and CMP1 are equal.
-+ The register will have the same mode as CMP0. */
-+
-+static rtx
-+riscv_zero_if_equal (rtx cmp0, rtx cmp1)
-+{
-+ if (cmp1 == const0_rtx)
-+ return cmp0;
-+
-+ return expand_binop (GET_MODE (cmp0), sub_optab,
-+ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
-+}
-+
-+/* Return false if we can easily emit code for the FP comparison specified
-+ by *CODE. If not, set *CODE to its inverse and return true. */
-+
-+static bool
-+riscv_reversed_fp_cond (enum rtx_code *code)
-+{
-+ switch (*code)
-+ {
-+ case EQ:
-+ case LT:
-+ case LE:
-+ case GT:
-+ case GE:
-+ case LTGT:
-+ case ORDERED:
-+ /* We know how to emit code for these cases... */
-+ return false;
-+
-+ default:
-+ /* ...but we must invert these and rely on the others. */
-+ *code = reverse_condition_maybe_unordered (*code);
-+ return true;
-+ }
-+}
-+
-+/* Convert a comparison into something that can be used in a branch or
-+ conditional move. On entry, *OP0 and *OP1 are the values being
-+ compared and *CODE is the code used to compare them.
-+
-+ Update *CODE, *OP0 and *OP1 so that they describe the final comparison. */
-+
-+static void
-+riscv_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1)
-+{
-+ rtx cmp_op0 = *op0;
-+ rtx cmp_op1 = *op1;
-+
-+ if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
-+ {
-+ if (splittable_const_int_operand (cmp_op1, VOIDmode))
-+ {
-+ HOST_WIDE_INT rhs = INTVAL (cmp_op1), new_rhs;
-+ enum rtx_code new_code;
-+
-+ switch (*code)
-+ {
-+ case LTU: new_rhs = rhs - 1; new_code = LEU; goto try_new_rhs;
-+ case LEU: new_rhs = rhs + 1; new_code = LTU; goto try_new_rhs;
-+ case GTU: new_rhs = rhs + 1; new_code = GEU; goto try_new_rhs;
-+ case GEU: new_rhs = rhs - 1; new_code = GTU; goto try_new_rhs;
-+ case LT: new_rhs = rhs - 1; new_code = LE; goto try_new_rhs;
-+ case LE: new_rhs = rhs + 1; new_code = LT; goto try_new_rhs;
-+ case GT: new_rhs = rhs + 1; new_code = GE; goto try_new_rhs;
-+ case GE: new_rhs = rhs - 1; new_code = GT;
-+ try_new_rhs:
-+ /* Convert e.g. OP0 > 4095 into OP0 >= 4096. */
-+ if ((rhs < 0) == (new_rhs < 0)
-+ && riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs))
-+ {
-+ *op1 = GEN_INT (new_rhs);
-+ *code = new_code;
-+ }
-+ break;
-+
-+ case EQ:
-+ case NE:
-+ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
-+ if (SMALL_OPERAND (-rhs))
-+ {
-+ *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
-+ riscv_emit_binary (PLUS, *op0, cmp_op0, GEN_INT (-rhs));
-+ *op1 = const0_rtx;
-+ }
-+ default:
-+ break;
-+ }
-+ }
-+
-+ if (*op1 != const0_rtx)
-+ *op1 = force_reg (GET_MODE (cmp_op0), *op1);
-+ }
-+ else
-+ {
-+ /* For FP comparisons, set an integer register with the result of the
-+ comparison, then branch on it. */
-+ rtx tmp0, tmp1, final_op;
-+ enum rtx_code fp_code = *code;
-+ *code = riscv_reversed_fp_cond (&fp_code) ? EQ : NE;
-+
-+ switch (fp_code)
-+ {
-+ case ORDERED:
-+ /* a == a && b == b */
-+ tmp0 = gen_reg_rtx (SImode);
-+ riscv_emit_binary (EQ, tmp0, cmp_op0, cmp_op0);
-+ tmp1 = gen_reg_rtx (SImode);
-+ riscv_emit_binary (EQ, tmp1, cmp_op1, cmp_op1);
-+ final_op = gen_reg_rtx (SImode);
-+ riscv_emit_binary (AND, final_op, tmp0, tmp1);
-+ break;
-+
-+ case LTGT:
-+ /* a < b || a > b */
-+ tmp0 = gen_reg_rtx (SImode);
-+ riscv_emit_binary (LT, tmp0, cmp_op0, cmp_op1);
-+ tmp1 = gen_reg_rtx (SImode);
-+ riscv_emit_binary (GT, tmp1, cmp_op0, cmp_op1);
-+ final_op = gen_reg_rtx (SImode);
-+ riscv_emit_binary (IOR, final_op, tmp0, tmp1);
-+ break;
-+
-+ case EQ:
-+ case LE:
-+ case LT:
-+ case GE:
-+ case GT:
-+ /* We have instructions for these cases. */
-+ final_op = gen_reg_rtx (SImode);
-+ riscv_emit_binary (fp_code, final_op, cmp_op0, cmp_op1);
-+ break;
-+
-+ default:
-+ gcc_unreachable ();
-+ }
-+
-+ /* Compare the binary result against 0. */
-+ *op0 = final_op;
-+ *op1 = const0_rtx;
-+ }
-+}
-+
-+/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
-+ and OPERAND[3]. Store the result in OPERANDS[0].
-+
-+ On 64-bit targets, the mode of the comparison and target will always be
-+ SImode, thus possibly narrower than that of the comparison's operands. */
-+
-+void
-+riscv_expand_scc (rtx operands[])
-+{
-+ rtx target = operands[0];
-+ enum rtx_code code = GET_CODE (operands[1]);
-+ rtx op0 = operands[2];
-+ rtx op1 = operands[3];
-+
-+ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
-+
-+ if (code == EQ || code == NE)
-+ {
-+ rtx zie = riscv_zero_if_equal (op0, op1);
-+ riscv_emit_binary (code, target, zie, const0_rtx);
-+ }
-+ else
-+ riscv_emit_int_order_test (code, 0, target, op0, op1);
-+}
-+
-+/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
-+ CODE and jump to OPERANDS[3] if the condition holds. */
-+
-+void
-+riscv_expand_conditional_branch (rtx *operands)
-+{
-+ enum rtx_code code = GET_CODE (operands[0]);
-+ rtx op0 = operands[1];
-+ rtx op1 = operands[2];
-+ rtx condition;
-+
-+ riscv_emit_compare (&code, &op0, &op1);
-+ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
-+ emit_jump_insn (gen_condjump (condition, operands[3]));
-+}
-+
-+/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
-+ least PARM_BOUNDARY bits of alignment, but will be given anything up
-+ to STACK_BOUNDARY bits if the type requires it. */
-+
-+static unsigned int
-+riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
-+{
-+ unsigned int alignment;
-+
-+ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
-+ if (alignment < PARM_BOUNDARY)
-+ alignment = PARM_BOUNDARY;
-+ if (alignment > STACK_BOUNDARY)
-+ alignment = STACK_BOUNDARY;
-+ return alignment;
-+}
-+
-+/* Fill INFO with information about a single argument. CUM is the
-+ cumulative state for earlier arguments. MODE is the mode of this
-+ argument and TYPE is its type (if known). NAMED is true if this
-+ is a named (fixed) argument rather than a variable one. */
-+
-+static void
-+riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
-+ enum machine_mode mode, const_tree type, bool named)
-+{
-+ bool doubleword_aligned_p;
-+ unsigned int num_bytes, num_words, max_regs;
-+
-+ /* Work out the size of the argument. */
-+ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
-+ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
-+
-+ /* Scalar, complex and vector floating-point types are passed in
-+ floating-point registers, as long as this is a named rather
-+ than a variable argument. */
-+ info->fpr_p = (named
-+ && (type == 0 || FLOAT_TYPE_P (type))
-+ && (GET_MODE_CLASS (mode) == MODE_FLOAT
-+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
-+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
-+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
-+
-+ /* Complex floats should only go into FPRs if there are two FPRs free,
-+ otherwise they should be passed in the same way as a struct
-+ containing two floats. */
-+ if (info->fpr_p
-+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
-+ && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
-+ {
-+ if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
-+ info->fpr_p = false;
-+ else
-+ num_words = 2;
-+ }
-+
-+ /* See whether the argument has doubleword alignment. */
-+ doubleword_aligned_p = (riscv_function_arg_boundary (mode, type)
-+ > BITS_PER_WORD);
-+
-+ /* Set REG_OFFSET to the register count we're interested in.
-+ The EABI allocates the floating-point registers separately,
-+ but the other ABIs allocate them like integer registers. */
-+ info->reg_offset = cum->num_gprs;
-+
-+ /* Advance to an even register if the argument is doubleword-aligned. */
-+ if (doubleword_aligned_p)
-+ info->reg_offset += info->reg_offset & 1;
-+
-+ /* Work out the offset of a stack argument. */
-+ info->stack_offset = cum->stack_words;
-+ if (doubleword_aligned_p)
-+ info->stack_offset += info->stack_offset & 1;
-+
-+ max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
-+
-+ /* Partition the argument between registers and stack. */
-+ info->reg_words = MIN (num_words, max_regs);
-+ info->stack_words = num_words - info->reg_words;
-+}
-+
-+/* INFO describes a register argument that has the normal format for the
-+ argument's mode. Return the register it uses, assuming that FPRs are
-+ available if HARD_FLOAT_P. */
-+
-+static unsigned int
-+riscv_arg_regno (const struct riscv_arg_info *info, bool hard_float_p)
-+{
-+ if (!info->fpr_p || !hard_float_p)
-+ return GP_ARG_FIRST + info->reg_offset;
-+ else
-+ return FP_ARG_FIRST + info->reg_offset;
-+}
-+
-+/* Implement TARGET_FUNCTION_ARG. */
-+
-+static rtx
-+riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
-+ const_tree type, bool named)
-+{
-+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
-+ struct riscv_arg_info info;
-+
-+ if (mode == VOIDmode)
-+ return NULL;
-+
-+ riscv_get_arg_info (&info, cum, mode, type, named);
-+
-+ /* Return straight away if the whole argument is passed on the stack. */
-+ if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
-+ return NULL;
-+
-+ /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
-+ contains a double in its entirety, then that 64-bit chunk is passed
-+ in a floating-point register. */
-+ if (TARGET_HARD_FLOAT
-+ && named
-+ && type != 0
-+ && TREE_CODE (type) == RECORD_TYPE
-+ && TYPE_SIZE_UNIT (type)
-+ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
-+ {
-+ tree field;
-+
-+ /* First check to see if there is any such field. */
-+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
-+ if (TREE_CODE (field) == FIELD_DECL
-+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
-+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
-+ && tree_fits_shwi_p (bit_position (field))
-+ && int_bit_position (field) % BITS_PER_WORD == 0)
-+ break;
-+
-+ if (field != 0)
-+ {
-+ /* Now handle the special case by returning a PARALLEL
-+ indicating where each 64-bit chunk goes. INFO.REG_WORDS
-+ chunks are passed in registers. */
-+ unsigned int i;
-+ HOST_WIDE_INT bitpos;
-+ rtx ret;
-+
-+ /* assign_parms checks the mode of ENTRY_PARM, so we must
-+ use the actual mode here. */
-+ ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
-+
-+ bitpos = 0;
-+ field = TYPE_FIELDS (type);
-+ for (i = 0; i < info.reg_words; i++)
-+ {
-+ rtx reg;
-+
-+ for (; field; field = DECL_CHAIN (field))
-+ if (TREE_CODE (field) == FIELD_DECL
-+ && int_bit_position (field) >= bitpos)
-+ break;
-+
-+ if (field
-+ && int_bit_position (field) == bitpos
-+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
-+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
-+ reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
-+ else
-+ reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
-+
-+ XVECEXP (ret, 0, i)
-+ = gen_rtx_EXPR_LIST (VOIDmode, reg,
-+ GEN_INT (bitpos / BITS_PER_UNIT));
-+
-+ bitpos += BITS_PER_WORD;
-+ }
-+ return ret;
-+ }
-+ }
-+
-+ /* Handle the n32/n64 conventions for passing complex floating-point
-+ arguments in FPR pairs. The real part goes in the lower register
-+ and the imaginary part goes in the upper register. */
-+ if (info.fpr_p
-+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
-+ {
-+ rtx real, imag;
-+ enum machine_mode inner;
-+ unsigned int regno;
-+
-+ inner = GET_MODE_INNER (mode);
-+ regno = FP_ARG_FIRST + info.reg_offset;
-+ if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
-+ {
-+ /* Real part in registers, imaginary part on stack. */
-+ gcc_assert (info.stack_words == info.reg_words);
-+ return gen_rtx_REG (inner, regno);
-+ }
-+ else
-+ {
-+ gcc_assert (info.stack_words == 0);
-+ real = gen_rtx_EXPR_LIST (VOIDmode,
-+ gen_rtx_REG (inner, regno),
-+ const0_rtx);
-+ imag = gen_rtx_EXPR_LIST (VOIDmode,
-+ gen_rtx_REG (inner,
-+ regno + info.reg_words / 2),
-+ GEN_INT (GET_MODE_SIZE (inner)));
-+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
-+ }
-+ }
-+
-+ return gen_rtx_REG (mode, riscv_arg_regno (&info, TARGET_HARD_FLOAT));
-+}
-+
-+/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
-+
-+static void
-+riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
-+ const_tree type, bool named)
-+{
-+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
-+ struct riscv_arg_info info;
-+
-+ riscv_get_arg_info (&info, cum, mode, type, named);
-+
-+ /* Advance the register count. This has the effect of setting
-+ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
-+ argument required us to skip the final GPR and pass the whole
-+ argument on the stack. */
-+ cum->num_gprs = info.reg_offset + info.reg_words;
-+
-+ /* Advance the stack word count. */
-+ if (info.stack_words > 0)
-+ cum->stack_words = info.stack_offset + info.stack_words;
-+}
-+
-+/* Implement TARGET_ARG_PARTIAL_BYTES. */
-+
-+static int
-+riscv_arg_partial_bytes (cumulative_args_t cum,
-+ enum machine_mode mode, tree type, bool named)
-+{
-+ struct riscv_arg_info info;
-+
-+ riscv_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
-+ return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
-+}
-+
-+/* See whether VALTYPE is a record whose fields should be returned in
-+ floating-point registers. If so, return the number of fields and
-+ list them in FIELDS (which should have two elements). Return 0
-+ otherwise.
-+
-+ For n32 & n64, a structure with one or two fields is returned in
-+ floating-point registers as long as every field has a floating-point
-+ type. */
-+
-+static int
-+riscv_fpr_return_fields (const_tree valtype, tree *fields)
-+{
-+ tree field;
-+ int i;
-+
-+ if (TREE_CODE (valtype) != RECORD_TYPE)
-+ return 0;
-+
-+ i = 0;
-+ for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
-+ {
-+ if (TREE_CODE (field) != FIELD_DECL)
-+ continue;
-+
-+ if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
-+ return 0;
-+
-+ if (i == 2)
-+ return 0;
-+
-+ fields[i++] = field;
-+ }
-+ return i;
-+}
-+
-+/* Return true if the function return value MODE will get returned in a
-+ floating-point register. */
-+
-+static bool
-+riscv_return_mode_in_fpr_p (enum machine_mode mode)
-+{
-+ return ((GET_MODE_CLASS (mode) == MODE_FLOAT
-+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
-+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
-+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
-+}
-+
-+/* Return the representation of an FPR return register when the
-+ value being returned in FP_RETURN has mode VALUE_MODE and the
-+ return type itself has mode TYPE_MODE. On NewABI targets,
-+ the two modes may be different for structures like:
-+
-+ struct __attribute__((packed)) foo { float f; }
-+
-+ where we return the SFmode value of "f" in FP_RETURN, but where
-+ the structure itself has mode BLKmode. */
-+
-+static rtx
-+riscv_return_fpr_single (enum machine_mode type_mode,
-+ enum machine_mode value_mode)
-+{
-+ rtx x;
-+
-+ x = gen_rtx_REG (value_mode, FP_RETURN);
-+ if (type_mode != value_mode)
-+ {
-+ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
-+ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
-+ }
-+ return x;
-+}
-+
-+/* Return a composite value in a pair of floating-point registers.
-+ MODE1 and OFFSET1 are the mode and byte offset for the first value,
-+ likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
-+ complete value.
-+
-+ For n32 & n64, $f0 always holds the first value and $f2 the second.
-+ Otherwise the values are packed together as closely as possible. */
-+
-+static rtx
-+riscv_return_fpr_pair (enum machine_mode mode,
-+ enum machine_mode mode1, HOST_WIDE_INT offset1,
-+ enum machine_mode mode2, HOST_WIDE_INT offset2)
-+{
-+ return gen_rtx_PARALLEL
-+ (mode,
-+ gen_rtvec (2,
-+ gen_rtx_EXPR_LIST (VOIDmode,
-+ gen_rtx_REG (mode1, FP_RETURN),
-+ GEN_INT (offset1)),
-+ gen_rtx_EXPR_LIST (VOIDmode,
-+ gen_rtx_REG (mode2, FP_RETURN + 1),
-+ GEN_INT (offset2))));
-+
-+}
-+
-+/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
-+ VALTYPE is the return type and MODE is VOIDmode. For libcalls,
-+ VALTYPE is null and MODE is the mode of the return value. */
-+
-+rtx
-+riscv_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
-+{
-+ if (valtype)
-+ {
-+ tree fields[2];
-+ int unsigned_p;
-+
-+ mode = TYPE_MODE (valtype);
-+ unsigned_p = TYPE_UNSIGNED (valtype);
-+
-+ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
-+ return values, promote the mode here too. */
-+ mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
-+
-+ /* Handle structures whose fields are returned in $f0/$f2. */
-+ switch (riscv_fpr_return_fields (valtype, fields))
-+ {
-+ case 1:
-+ return riscv_return_fpr_single (mode,
-+ TYPE_MODE (TREE_TYPE (fields[0])));
-+
-+ case 2:
-+ return riscv_return_fpr_pair (mode,
-+ TYPE_MODE (TREE_TYPE (fields[0])),
-+ int_byte_position (fields[0]),
-+ TYPE_MODE (TREE_TYPE (fields[1])),
-+ int_byte_position (fields[1]));
-+ }
-+
-+ /* Only use FPRs for scalar, complex or vector types. */
-+ if (!FLOAT_TYPE_P (valtype))
-+ return gen_rtx_REG (mode, GP_RETURN);
-+ }
-+
-+ /* Handle long doubles for n32 & n64. */
-+ if (mode == TFmode)
-+ return riscv_return_fpr_pair (mode,
-+ DImode, 0,
-+ DImode, GET_MODE_SIZE (mode) / 2);
-+
-+ if (riscv_return_mode_in_fpr_p (mode))
-+ {
-+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
-+ return riscv_return_fpr_pair (mode,
-+ GET_MODE_INNER (mode), 0,
-+ GET_MODE_INNER (mode),
-+ GET_MODE_SIZE (mode) / 2);
-+ else
-+ return gen_rtx_REG (mode, FP_RETURN);
-+ }
-+
-+ return gen_rtx_REG (mode, GP_RETURN);
-+}
-+
-+/* Implement TARGET_RETURN_IN_MEMORY. Scalars and small structures
-+ that fit in two registers are returned in a0/a1. */
-+
-+static bool
-+riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
-+{
-+ return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD);
-+}
-+
-+/* Implement TARGET_PASS_BY_REFERENCE. */
-+
-+static bool
-+riscv_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
-+ enum machine_mode mode, const_tree type,
-+ bool named ATTRIBUTE_UNUSED)
-+{
-+ if (type && riscv_return_in_memory (type, NULL_TREE))
-+ return true;
-+ return targetm.calls.must_pass_in_stack (mode, type);
-+}
-+
-+/* Implement TARGET_SETUP_INCOMING_VARARGS. */
-+
-+static void
-+riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
-+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
-+ int no_rtl)
-+{
-+ CUMULATIVE_ARGS local_cum;
-+ int gp_saved;
-+
-+ /* The caller has advanced CUM up to, but not beyond, the last named
-+ argument. Advance a local copy of CUM past the last "real" named
-+ argument, to find out how many registers are left over. */
-+ local_cum = *get_cumulative_args (cum);
-+ riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
-+
-+ /* Found out how many registers we need to save. */
-+ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
-+
-+ if (!no_rtl && gp_saved > 0)
-+ {
-+ rtx ptr, mem;
-+
-+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
-+ REG_PARM_STACK_SPACE (cfun->decl)
-+ - gp_saved * UNITS_PER_WORD);
-+ mem = gen_frame_mem (BLKmode, ptr);
-+ set_mem_alias_set (mem, get_varargs_alias_set ());
-+
-+ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
-+ mem, gp_saved);
-+ }
-+ if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
-+ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
-+}
-+
-+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
-+
-+static void
-+riscv_va_start (tree valist, rtx nextarg)
-+{
-+ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
-+ std_expand_builtin_va_start (valist, nextarg);
-+}
-+
-+/* Expand a call of type TYPE. RESULT is where the result will go (null
-+ for "call"s and "sibcall"s), ADDR is the address of the function,
-+ ARGS_SIZE is the size of the arguments and AUX is the value passed
-+ to us by riscv_function_arg. Return the call itself. */
-+
-+rtx
-+riscv_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size)
-+{
-+ rtx pattern;
-+
-+ if (!call_insn_operand (addr, VOIDmode))
-+ {
-+ rtx reg = RISCV_PROLOGUE_TEMP (Pmode);
-+ riscv_emit_move (reg, addr);
-+ addr = reg;
-+ }
-+
-+ if (result == 0)
-+ {
-+ rtx (*fn) (rtx, rtx);
-+
-+ if (sibcall_p)
-+ fn = gen_sibcall_internal;
-+ else
-+ fn = gen_call_internal;
-+
-+ pattern = fn (addr, args_size);
-+ }
-+ else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
-+ {
-+ /* Handle return values created by riscv_return_fpr_pair. */
-+ rtx (*fn) (rtx, rtx, rtx, rtx);
-+ rtx reg1, reg2;
-+
-+ if (sibcall_p)
-+ fn = gen_sibcall_value_multiple_internal;
-+ else
-+ fn = gen_call_value_multiple_internal;
-+
-+ reg1 = XEXP (XVECEXP (result, 0, 0), 0);
-+ reg2 = XEXP (XVECEXP (result, 0, 1), 0);
-+ pattern = fn (reg1, addr, args_size, reg2);
-+ }
-+ else
-+ {
-+ rtx (*fn) (rtx, rtx, rtx);
-+
-+ if (sibcall_p)
-+ fn = gen_sibcall_value_internal;
-+ else
-+ fn = gen_call_value_internal;
-+
-+ /* Handle return values created by riscv_return_fpr_single. */
-+ if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
-+ result = XEXP (XVECEXP (result, 0, 0), 0);
-+ pattern = fn (result, addr, args_size);
-+ }
-+
-+ return emit_call_insn (pattern);
-+}
-+
-+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
-+ Assume that the areas do not overlap. */
-+
-+static void
-+riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
-+{
-+ HOST_WIDE_INT offset, delta;
-+ unsigned HOST_WIDE_INT bits;
-+ int i;
-+ enum machine_mode mode;
-+ rtx *regs;
-+
-+ bits = MAX( BITS_PER_UNIT,
-+ MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) );
-+
-+ mode = mode_for_size (bits, MODE_INT, 0);
-+ delta = bits / BITS_PER_UNIT;
-+
-+ /* Allocate a buffer for the temporary registers. */
-+ regs = XALLOCAVEC (rtx, length / delta);
-+
-+ /* Load as many BITS-sized chunks as possible. Use a normal load if
-+ the source has enough alignment, otherwise use left/right pairs. */
-+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
-+ {
-+ regs[i] = gen_reg_rtx (mode);
-+ riscv_emit_move (regs[i], adjust_address (src, mode, offset));
-+ }
-+
-+ /* Copy the chunks to the destination. */
-+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
-+ riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
-+
-+ /* Mop up any left-over bytes. */
-+ if (offset < length)
-+ {
-+ src = adjust_address (src, BLKmode, offset);
-+ dest = adjust_address (dest, BLKmode, offset);
-+ move_by_pieces (dest, src, length - offset,
-+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
-+ }
-+}
-+
-+/* Helper function for doing a loop-based block operation on memory
-+ reference MEM. Each iteration of the loop will operate on LENGTH
-+ bytes of MEM.
-+
-+ Create a new base register for use within the loop and point it to
-+ the start of MEM. Create a new memory reference that uses this
-+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
-+
-+static void
-+riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
-+ rtx *loop_reg, rtx *loop_mem)
-+{
-+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
-+
-+ /* Although the new mem does not refer to a known location,
-+ it does keep up to LENGTH bytes of alignment. */
-+ *loop_mem = change_address (mem, BLKmode, *loop_reg);
-+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
-+}
-+
-+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
-+ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
-+ the memory regions do not overlap. */
-+
-+static void
-+riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
-+ HOST_WIDE_INT bytes_per_iter)
-+{
-+ rtx label, src_reg, dest_reg, final_src, test;
-+ HOST_WIDE_INT leftover;
-+
-+ leftover = length % bytes_per_iter;
-+ length -= leftover;
-+
-+ /* Create registers and memory references for use within the loop. */
-+ riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
-+ riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
-+
-+ /* Calculate the value that SRC_REG should have after the last iteration
-+ of the loop. */
-+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
-+ 0, 0, OPTAB_WIDEN);
-+
-+ /* Emit the start of the loop. */
-+ label = gen_label_rtx ();
-+ emit_label (label);
-+
-+ /* Emit the loop body. */
-+ riscv_block_move_straight (dest, src, bytes_per_iter);
-+
-+ /* Move on to the next block. */
-+ riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
-+ riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
-+
-+ /* Emit the loop condition. */
-+ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
-+ if (Pmode == DImode)
-+ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
-+ else
-+ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
-+
-+ /* Mop up any left-over bytes. */
-+ if (leftover)
-+ riscv_block_move_straight (dest, src, leftover);
-+}
-+
-+/* Expand a movmemsi instruction, which copies LENGTH bytes from
-+ memory reference SRC to memory reference DEST. */
-+
-+bool
-+riscv_expand_block_move (rtx dest, rtx src, rtx length)
-+{
-+ if (CONST_INT_P (length))
-+ {
-+ HOST_WIDE_INT factor, align;
-+
-+ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
-+ factor = BITS_PER_WORD / align;
-+
-+ if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
-+ {
-+ riscv_block_move_straight (dest, src, INTVAL (length));
-+ return true;
-+ }
-+ else if (optimize && align >= BITS_PER_WORD)
-+ {
-+ riscv_block_move_loop (dest, src, INTVAL (length),
-+ RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / factor);
-+ return true;
-+ }
-+ }
-+ return false;
-+}
-+
-+/* (Re-)Initialize riscv_lo_relocs and riscv_hi_relocs. */
-+
-+static void
-+riscv_init_relocs (void)
-+{
-+ memset (riscv_hi_relocs, '\0', sizeof (riscv_hi_relocs));
-+ memset (riscv_lo_relocs, '\0', sizeof (riscv_lo_relocs));
-+
-+ if (!flag_pic && riscv_cmodel == CM_MEDLOW)
-+ {
-+ riscv_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
-+ riscv_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
-+ }
-+
-+ if (!flag_pic || flag_pie)
-+ {
-+ riscv_hi_relocs[SYMBOL_TLS_LE] = "%tprel_hi(";
-+ riscv_lo_relocs[SYMBOL_TLS_LE] = "%tprel_lo(";
-+ }
-+}
-+
-+/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
-+ in context CONTEXT. RELOCS is the array of relocations to use. */
-+
-+static void
-+riscv_print_operand_reloc (FILE *file, rtx op, const char **relocs)
-+{
-+ enum riscv_symbol_type symbol_type;
-+ const char *p;
-+
-+ symbol_type = riscv_classify_symbolic_expression (op);
-+ gcc_assert (relocs[symbol_type]);
-+
-+ fputs (relocs[symbol_type], file);
-+ output_addr_const (file, riscv_strip_unspec_address (op));
-+ for (p = relocs[symbol_type]; *p != 0; p++)
-+ if (*p == '(')
-+ fputc (')', file);
-+}
-+
-+static const char *
-+riscv_memory_model_suffix (enum memmodel model)
-+{
-+ switch (model)
-+ {
-+ case MEMMODEL_ACQ_REL:
-+ case MEMMODEL_SEQ_CST:
-+ case MEMMODEL_SYNC_SEQ_CST:
-+ return ".sc";
-+ case MEMMODEL_ACQUIRE:
-+ case MEMMODEL_CONSUME:
-+ case MEMMODEL_SYNC_ACQUIRE:
-+ return ".aq";
-+ case MEMMODEL_RELEASE:
-+ case MEMMODEL_SYNC_RELEASE:
-+ return ".rl";
-+ case MEMMODEL_RELAXED:
-+ return "";
-+ default:
-+ fprintf(stderr, "riscv_memory_model_suffix(%ld)\n", model);
-+ gcc_unreachable();
-+ }
-+}
-+
-+/* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
-+
-+ 'h' Print the high-part relocation associated with OP, after stripping
-+ any outermost HIGH.
-+ 'R' Print the low-part relocation associated with OP.
-+ 'C' Print the integer branch condition for comparison OP.
-+ 'A' Print the atomic operation suffix for memory model OP.
-+ 'z' Print $0 if OP is zero, otherwise print OP normally. */
-+
-+static void
-+riscv_print_operand (FILE *file, rtx op, int letter)
-+{
-+ enum rtx_code code;
-+
-+ gcc_assert (op);
-+ code = GET_CODE (op);
-+
-+ switch (letter)
-+ {
-+ case 'h':
-+ if (code == HIGH)
-+ op = XEXP (op, 0);
-+ riscv_print_operand_reloc (file, op, riscv_hi_relocs);
-+ break;
-+
-+ case 'R':
-+ riscv_print_operand_reloc (file, op, riscv_lo_relocs);
-+ break;
-+
-+ case 'C':
-+ /* The RTL names match the instruction names. */
-+ fputs (GET_RTX_NAME (code), file);
-+ break;
-+
-+ case 'A':
-+ fputs (riscv_memory_model_suffix ((enum memmodel)INTVAL (op)), file);
-+ break;
-+
-+ default:
-+ switch (code)
-+ {
-+ case REG:
-+ if (letter && letter != 'z')
-+ output_operand_lossage ("invalid use of '%%%c'", letter);
-+ fprintf (file, "%s", reg_names[REGNO (op)]);
-+ break;
-+
-+ case MEM:
-+ if (letter == 'y')
-+ fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]);
-+ else if (letter && letter != 'z')
-+ output_operand_lossage ("invalid use of '%%%c'", letter);
-+ else
-+ output_address (XEXP (op, 0));
-+ break;
-+
-+ default:
-+ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
-+ fputs (reg_names[GP_REG_FIRST], file);
-+ else if (letter && letter != 'z')
-+ output_operand_lossage ("invalid use of '%%%c'", letter);
-+ else
-+ output_addr_const (file, riscv_strip_unspec_address (op));
-+ break;
-+ }
-+ }
-+}
-+
-+/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
-+
-+static void
-+riscv_print_operand_address (FILE *file, rtx x)
-+{
-+ struct riscv_address_info addr;
-+
-+ if (riscv_classify_address (&addr, x, word_mode, true))
-+ switch (addr.type)
-+ {
-+ case ADDRESS_REG:
-+ riscv_print_operand (file, addr.offset, 0);
-+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
-+ return;
-+
-+ case ADDRESS_LO_SUM:
-+ riscv_print_operand_reloc (file, addr.offset, riscv_lo_relocs);
-+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
-+ return;
-+
-+ case ADDRESS_CONST_INT:
-+ output_addr_const (file, x);
-+ fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
-+ return;
-+
-+ case ADDRESS_SYMBOLIC:
-+ output_addr_const (file, riscv_strip_unspec_address (x));
-+ return;
-+ }
-+ gcc_unreachable ();
-+}
-+
-+static bool
-+riscv_size_ok_for_small_data_p (int size)
-+{
-+ return g_switch_value && IN_RANGE (size, 1, g_switch_value);
-+}
-+
-+/* Return true if EXP should be placed in the small data section. */
-+
-+static bool
-+riscv_in_small_data_p (const_tree x)
-+{
-+ if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
-+ return false;
-+
-+ if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
-+ {
-+ const char *sec = DECL_SECTION_NAME (x);
-+ return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
-+ }
-+
-+ return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
-+}
-+
-+/* Return a section for X, handling small data. */
-+
-+static section *
-+riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
-+ unsigned HOST_WIDE_INT align)
-+{
-+ section *s = default_elf_select_rtx_section (mode, x, align);
-+
-+ if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
-+ {
-+ if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
-+ {
-+ /* Rename .rodata.cst* to .srodata.cst*. */
-+ char *name = (char *) alloca (strlen (s->named.name) + 2);
-+ sprintf (name, ".s%s", s->named.name + 1);
-+ return get_section (name, s->named.common.flags, NULL);
-+ }
-+
-+ if (s == data_section)
-+ return sdata_section;
-+ }
-+
-+ return s;
-+}
-+
-+/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
-+
-+static void ATTRIBUTE_UNUSED
-+riscv_output_dwarf_dtprel (FILE *file, int size, rtx x)
-+{
-+ switch (size)
-+ {
-+ case 4:
-+ fputs ("\t.dtprelword\t", file);
-+ break;
-+
-+ case 8:
-+ fputs ("\t.dtpreldword\t", file);
-+ break;
-+
-+ default:
-+ gcc_unreachable ();
-+ }
-+ output_addr_const (file, x);
-+ fputs ("+0x800", file);
-+}
-+
-+/* Make the last instruction frame-related and note that it performs
-+ the operation described by FRAME_PATTERN. */
-+
-+static void
-+riscv_set_frame_expr (rtx frame_pattern)
-+{
-+ rtx insn;
-+
-+ insn = get_last_insn ();
-+ RTX_FRAME_RELATED_P (insn) = 1;
-+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
-+ frame_pattern,
-+ REG_NOTES (insn));
-+}
-+
-+/* Return a frame-related rtx that stores REG at MEM.
-+ REG must be a single register. */
-+
-+static rtx
-+riscv_frame_set (rtx mem, rtx reg)
-+{
-+ rtx set;
-+
-+ set = gen_rtx_SET (VOIDmode, mem, reg);
-+ RTX_FRAME_RELATED_P (set) = 1;
-+
-+ return set;
-+}
-+
-+/* Return true if the current function must save register REGNO. */
-+
-+static bool
-+riscv_save_reg_p (unsigned int regno)
-+{
-+ bool call_saved = !global_regs[regno] && !call_really_used_regs[regno];
-+ bool might_clobber = crtl->saves_all_registers
-+ || df_regs_ever_live_p (regno)
-+ || (regno == HARD_FRAME_POINTER_REGNUM
-+ && frame_pointer_needed);
-+
-+ return (call_saved && might_clobber)
-+ || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return);
-+}
-+
-+/* Determine whether to call GPR save/restore routines. */
-+static bool
-+riscv_use_save_libcall (const struct riscv_frame_info *frame)
-+{
-+ if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
-+ return false;
-+
-+ return frame->save_libcall_adjustment != 0;
-+}
-+
-+/* Determine which GPR save/restore routine to call. */
-+
-+static unsigned
-+riscv_save_libcall_count (unsigned mask)
-+{
-+ for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
-+ if (BITSET_P (mask, n))
-+ return CALLEE_SAVED_REG_NUMBER (n) + 1;
-+ abort ();
-+}
-+
-+/* Populate the current function's riscv_frame_info structure.
-+
-+ RISC-V stack frames grown downward. High addresses are at the top.
-+
-+ +-------------------------------+
-+ | |
-+ | incoming stack arguments |
-+ | |
-+ +-------------------------------+ <-- incoming stack pointer
-+ | |
-+ | callee-allocated save area |
-+ | for arguments that are |
-+ | split between registers and |
-+ | the stack |
-+ | |
-+ +-------------------------------+ <-- arg_pointer_rtx
-+ | |
-+ | callee-allocated save area |
-+ | for register varargs |
-+ | |
-+ +-------------------------------+ <-- hard_frame_pointer_rtx;
-+ | | stack_pointer_rtx + gp_sp_offset
-+ | GPR save area | + UNITS_PER_WORD
-+ | |
-+ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
-+ | | + UNITS_PER_HWVALUE
-+ | FPR save area |
-+ | |
-+ +-------------------------------+ <-- frame_pointer_rtx (virtual)
-+ | |
-+ | local variables |
-+ | |
-+ P +-------------------------------+
-+ | |
-+ | outgoing stack arguments |
-+ | |
-+ +-------------------------------+ <-- stack_pointer_rtx
-+
-+ Dynamic stack allocations such as alloca insert data at point P.
-+ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
-+ hard_frame_pointer_rtx unchanged. */
-+
-+static void
-+riscv_compute_frame_info (void)
-+{
-+ struct riscv_frame_info *frame;
-+ HOST_WIDE_INT offset;
-+ unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
-+
-+ frame = &cfun->machine->frame;
-+ memset (frame, 0, sizeof (*frame));
-+
-+ /* Find out which GPRs we need to save. */
-+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
-+ if (riscv_save_reg_p (regno))
-+ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
-+
-+ /* If this function calls eh_return, we must also save and restore the
-+ EH data registers. */
-+ if (crtl->calls_eh_return)
-+ for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
-+ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
-+
-+ /* Find out which FPRs we need to save. This loop must iterate over
-+ the same space as its companion in riscv_for_each_saved_reg. */
-+ if (TARGET_HARD_FLOAT)
-+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
-+ if (riscv_save_reg_p (regno))
-+ frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
-+
-+ /* At the bottom of the frame are any outgoing stack arguments. */
-+ offset = crtl->outgoing_args_size;
-+ /* Next are local stack variables. */
-+ offset += RISCV_STACK_ALIGN (get_frame_size ());
-+ /* The virtual frame pointer points above the local variables. */
-+ frame->frame_pointer_offset = offset;
-+ /* Next are the callee-saved FPRs. */
-+ if (frame->fmask)
-+ {
-+ offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FPREG);
-+ frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
-+ }
-+ /* Next are the callee-saved GPRs. */
-+ if (frame->mask)
-+ {
-+ unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
-+ unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
-+
-+ /* Only use save/restore routines if they don't alter the stack size. */
-+ if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
-+ frame->save_libcall_adjustment = x_save_size;
-+
-+ offset += x_save_size;
-+ frame->gp_sp_offset = offset - UNITS_PER_WORD;
-+ }
-+ /* The hard frame pointer points above the callee-saved GPRs. */
-+ frame->hard_frame_pointer_offset = offset;
-+ /* Above the hard frame pointer is the callee-allocated varags save area. */
-+ offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
-+ frame->arg_pointer_offset = offset;
-+ /* Next is the callee-allocated area for pretend stack arguments. */
-+ offset += crtl->args.pretend_args_size;
-+ frame->total_size = offset;
-+ /* Next points the incoming stack pointer and any incoming arguments. */
-+
-+ /* Only use save/restore routines when the GPRs are atop the frame. */
-+ if (frame->hard_frame_pointer_offset != frame->total_size)
-+ frame->save_libcall_adjustment = 0;
-+}
-+
-+/* Make sure that we're not trying to eliminate to the wrong hard frame
-+ pointer. */
-+
-+static bool
-+riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
-+{
-+ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
-+}
-+
-+/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
-+ or argument pointer. TO is either the stack pointer or hard frame
-+ pointer. */
-+
-+HOST_WIDE_INT
-+riscv_initial_elimination_offset (int from, int to)
-+{
-+ HOST_WIDE_INT src, dest;
-+
-+ riscv_compute_frame_info ();
-+
-+ if (to == HARD_FRAME_POINTER_REGNUM)
-+ dest = cfun->machine->frame.hard_frame_pointer_offset;
-+ else if (to == STACK_POINTER_REGNUM)
-+ dest = 0; /* this is the base of all offsets */
-+ else
-+ gcc_unreachable ();
-+
-+ if (from == FRAME_POINTER_REGNUM)
-+ src = cfun->machine->frame.frame_pointer_offset;
-+ else if (from == ARG_POINTER_REGNUM)
-+ src = cfun->machine->frame.arg_pointer_offset;
-+ else
-+ gcc_unreachable ();
-+
-+ return src - dest;
-+}
-+
-+/* Implement RETURN_ADDR_RTX. We do not support moving back to a
-+ previous frame. */
-+
-+rtx
-+riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
-+{
-+ if (count != 0)
-+ return const0_rtx;
-+
-+ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
-+}
-+
-+/* Emit code to change the current function's return address to
-+ ADDRESS. SCRATCH is available as a scratch register, if needed.
-+ ADDRESS and SCRATCH are both word-mode GPRs. */
-+
-+void
-+riscv_set_return_address (rtx address, rtx scratch)
-+{
-+ rtx slot_address;
-+
-+ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
-+ slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
-+ cfun->machine->frame.gp_sp_offset);
-+ riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
-+}
-+
-+/* A function to save or store a register. The first argument is the
-+ register and the second is the stack slot. */
-+typedef void (*riscv_save_restore_fn) (rtx, rtx);
-+
-+/* Use FN to save or restore register REGNO. MODE is the register's
-+ mode and OFFSET is the offset of its save slot from the current
-+ stack pointer. */
-+
-+static void
-+riscv_save_restore_reg (enum machine_mode mode, int regno,
-+ HOST_WIDE_INT offset, riscv_save_restore_fn fn)
-+{
-+ rtx mem;
-+
-+ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
-+ fn (gen_rtx_REG (mode, regno), mem);
-+}
-+
-+/* Call FN for each register that is saved by the current function.
-+ SP_OFFSET is the offset of the current stack pointer from the start
-+ of the frame. */
-+
-+static void
-+riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn)
-+{
-+ HOST_WIDE_INT offset;
-+ int regno;
-+
-+ /* Save the link register and s-registers. */
-+ offset = cfun->machine->frame.gp_sp_offset - sp_offset;
-+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
-+ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
-+ {
-+ riscv_save_restore_reg (word_mode, regno, offset, fn);
-+ offset -= UNITS_PER_WORD;
-+ }
-+
-+ /* This loop must iterate over the same space as its companion in
-+ riscv_compute_frame_info. */
-+ offset = cfun->machine->frame.fp_sp_offset - sp_offset;
-+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
-+ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
-+ {
-+ riscv_save_restore_reg (DFmode, regno, offset, fn);
-+ offset -= GET_MODE_SIZE (DFmode);
-+ }
-+}
-+
-+/* Save register REG to MEM. Make the instruction frame-related. */
-+
-+static void
-+riscv_save_reg (rtx reg, rtx mem)
-+{
-+ riscv_emit_move (mem, reg);
-+ riscv_set_frame_expr (riscv_frame_set (mem, reg));
-+}
-+
-+/* Restore register REG from MEM. */
-+
-+static void
-+riscv_restore_reg (rtx reg, rtx mem)
-+{
-+ riscv_emit_move (reg, mem);
-+}
-+
-+/* Return the code to invoke the GPR save routine. */
-+
-+const char *
-+riscv_output_gpr_save (unsigned mask)
-+{
-+ static char buf[GP_REG_NUM * 32];
-+ size_t len = 0;
-+ unsigned n = riscv_save_libcall_count (mask), i;
-+ unsigned frame_size = RISCV_STACK_ALIGN ((n + 1) * UNITS_PER_WORD);
-+
-+ len += sprintf (buf + len, "call\tt0,__riscv_save_%u", n);
-+
-+#ifdef DWARF2_UNWIND_INFO
-+ /* Describe the effect of the call to __riscv_save_X. */
-+ if (dwarf2out_do_cfi_asm ())
-+ {
-+ len += sprintf (buf + len, "\n\t.cfi_def_cfa_offset %u", frame_size);
-+
-+ for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
-+ if (BITSET_P (cfun->machine->frame.mask, i))
-+ len += sprintf (buf + len, "\n\t.cfi_offset %u,%d", i,
-+ (CALLEE_SAVED_REG_NUMBER (i) + 2) * -UNITS_PER_WORD);
-+ }
-+#endif
-+
-+ return buf;
-+}
-+
-+/* Expand the "prologue" pattern. */
-+
-+void
-+riscv_expand_prologue (void)
-+{
-+ struct riscv_frame_info *frame = &cfun->machine->frame;
-+ HOST_WIDE_INT size = frame->total_size;
-+ unsigned mask = frame->mask;
-+ rtx insn;
-+
-+ if (flag_stack_usage_info)
-+ current_function_static_stack_size = size;
-+
-+ /* When optimizing for size, call a subroutine to save the registers. */
-+ if (riscv_use_save_libcall (frame))
-+ {
-+ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
-+ size -= frame->save_libcall_adjustment;
-+ emit_insn (gen_gpr_save (GEN_INT (mask)));
-+ }
-+
-+ /* Save the registers. Allocate up to RISCV_MAX_FIRST_STACK_STEP
-+ bytes beforehand; this is enough to cover the register save area
-+ without going out of range. */
-+ if ((frame->mask | frame->fmask) != 0)
-+ {
-+ HOST_WIDE_INT step1;
-+
-+ step1 = MIN (size, RISCV_MAX_FIRST_STACK_STEP);
-+ insn = gen_add3_insn (stack_pointer_rtx,
-+ stack_pointer_rtx,
-+ GEN_INT (-step1));
-+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
-+ size -= step1;
-+ riscv_for_each_saved_reg (size, riscv_save_reg);
-+ }
-+
-+ frame->mask = mask; /* Undo the above fib. */
-+
-+ /* Set up the frame pointer, if we're using one. */
-+ if (frame_pointer_needed)
-+ {
-+ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
-+ GEN_INT (frame->hard_frame_pointer_offset - size));
-+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
-+ }
-+
-+ /* Allocate the rest of the frame. */
-+ if (size > 0)
-+ {
-+ if (SMALL_OPERAND (-size))
-+ {
-+ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
-+ GEN_INT (-size));
-+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
-+ }
-+ else
-+ {
-+ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
-+ emit_insn (gen_add3_insn (stack_pointer_rtx,
-+ stack_pointer_rtx,
-+ RISCV_PROLOGUE_TEMP (Pmode)));
-+
-+ /* Describe the effect of the previous instructions. */
-+ insn = plus_constant (Pmode, stack_pointer_rtx, -size);
-+ insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx, insn);
-+ riscv_set_frame_expr (insn);
-+ }
-+ }
-+}
-+
-+/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
-+ says which. */
-+
-+void
-+riscv_expand_epilogue (bool sibcall_p)
-+{
-+ /* Split the frame into two. STEP1 is the amount of stack we should
-+ deallocate before restoring the registers. STEP2 is the amount we
-+ should deallocate afterwards.
-+
-+ Start off by assuming that no registers need to be restored. */
-+ struct riscv_frame_info *frame = &cfun->machine->frame;
-+ unsigned mask = frame->mask;
-+ HOST_WIDE_INT step1 = frame->total_size;
-+ HOST_WIDE_INT step2 = 0;
-+ bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
-+ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
-+
-+ if (!sibcall_p && riscv_can_use_return_insn ())
-+ {
-+ emit_jump_insn (gen_return ());
-+ return;
-+ }
-+
-+ /* Move past any dynamic stack allocations. */
-+ if (cfun->calls_alloca)
-+ {
-+ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
-+ if (!SMALL_OPERAND (INTVAL (adjust)))
-+ {
-+ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
-+ adjust = RISCV_PROLOGUE_TEMP (Pmode);
-+ }
-+
-+ emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
-+ adjust));
-+ }
-+
-+ /* If we need to restore registers, deallocate as much stack as
-+ possible in the second step without going out of range. */
-+ if ((frame->mask | frame->fmask) != 0)
-+ {
-+ step2 = MIN (step1, RISCV_MAX_FIRST_STACK_STEP);
-+ step1 -= step2;
-+ }
-+
-+ /* Set TARGET to BASE + STEP1. */
-+ if (step1 > 0)
-+ {
-+ /* Get an rtx for STEP1 that we can add to BASE. */
-+ rtx adjust = GEN_INT (step1);
-+ if (!SMALL_OPERAND (step1))
-+ {
-+ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
-+ adjust = RISCV_PROLOGUE_TEMP (Pmode);
-+ }
-+
-+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
-+ }
-+
-+ if (use_restore_libcall)
-+ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
-+
-+ /* Restore the registers. */
-+ riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg);
-+
-+ if (use_restore_libcall)
-+ {
-+ frame->mask = mask; /* Undo the above fib. */
-+ gcc_assert (step2 >= frame->save_libcall_adjustment);
-+ step2 -= frame->save_libcall_adjustment;
-+ }
-+
-+ /* Deallocate the final bit of the frame. */
-+ if (step2 > 0)
-+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
-+ GEN_INT (step2)));
-+
-+ if (use_restore_libcall)
-+ {
-+ emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
-+ emit_jump_insn (gen_gpr_restore_return (ra));
-+ return;
-+ }
-+
-+ /* Add in the __builtin_eh_return stack adjustment. */
-+ if (crtl->calls_eh_return)
-+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
-+ EH_RETURN_STACKADJ_RTX));
-+
-+ if (!sibcall_p)
-+ emit_jump_insn (gen_simple_return_internal (ra));
-+}
-+
-+/* Return nonzero if this function is known to have a null epilogue.
-+ This allows the optimizer to omit jumps to jumps if no stack
-+ was created. */
-+
-+bool
-+riscv_can_use_return_insn (void)
-+{
-+ return reload_completed && cfun->machine->frame.total_size == 0;
-+}
-+
-+/* Implement TARGET_REGISTER_MOVE_COST. */
-+
-+static int
-+riscv_register_move_cost (enum machine_mode mode,
-+ reg_class_t from, reg_class_t to)
-+{
-+ return SECONDARY_MEMORY_NEEDED (from, to, mode) ? 8 : 2;
-+}
-+
-+/* Return true if register REGNO can store a value of mode MODE.
-+ The result of this function is cached in riscv_hard_regno_mode_ok. */
-+
-+static bool
-+riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
-+{
-+ unsigned int size = GET_MODE_SIZE (mode);
-+ enum mode_class mclass = GET_MODE_CLASS (mode);
-+
-+ /* This is hella bogus but ira_build segfaults on RV32 without it. */
-+ if (VECTOR_MODE_P (mode))
-+ return true;
-+
-+ if (GP_REG_P (regno))
-+ {
-+ if (size <= UNITS_PER_WORD)
-+ return true;
-+
-+ /* Double-word values must be even-register-aligned. */
-+ if (size <= 2 * UNITS_PER_WORD)
-+ return regno % 2 == 0;
-+ }
-+
-+ if (FP_REG_P (regno))
-+ {
-+ if (mclass == MODE_FLOAT
-+ || mclass == MODE_COMPLEX_FLOAT
-+ || mclass == MODE_VECTOR_FLOAT)
-+ return size <= UNITS_PER_FPVALUE;
-+ }
-+
-+ return false;
-+}
-+
-+/* Implement HARD_REGNO_NREGS. */
-+
-+unsigned int
-+riscv_hard_regno_nregs (int regno, enum machine_mode mode)
-+{
-+ if (FP_REG_P (regno))
-+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
-+
-+ /* All other registers are word-sized. */
-+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
-+}
-+
-+/* Implement CLASS_MAX_NREGS. */
-+
-+static unsigned char
-+riscv_class_max_nregs (reg_class_t rclass, enum machine_mode mode)
-+{
-+ if (reg_class_subset_p (FP_REGS, rclass))
-+ return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
-+
-+ if (reg_class_subset_p (GR_REGS, rclass))
-+ return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
-+
-+ return 0;
-+}
-+
-+/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
-+
-+static reg_class_t
-+riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
-+{
-+ return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
-+ reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
-+ rclass;
-+}
-+
-+/* Implement TARGET_MEMORY_MOVE_COST. */
-+
-+static int
-+riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
-+{
-+ return (tune_info->memory_cost
-+ + memory_move_secondary_cost (mode, rclass, in));
-+}
-+
-+/* Implement TARGET_MODE_REP_EXTENDED. */
-+
-+static int
-+riscv_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
-+{
-+ /* On 64-bit targets, SImode register values are sign-extended to DImode. */
-+ if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
-+ return SIGN_EXTEND;
-+
-+ return UNKNOWN;
-+}
-+
-+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
-+
-+static bool
-+riscv_scalar_mode_supported_p (enum machine_mode mode)
-+{
-+ if (ALL_FIXED_POINT_MODE_P (mode)
-+ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
-+ return true;
-+
-+ return default_scalar_mode_supported_p (mode);
-+}
-+
-+/* Return the number of instructions that can be issued per cycle. */
-+
-+static int
-+riscv_issue_rate (void)
-+{
-+ return tune_info->issue_rate;
-+}
-+
-+/* This structure describes a single built-in function. */
-+struct riscv_builtin_description {
-+ /* The code of the main .md file instruction. See riscv_builtin_type
-+ for more information. */
-+ enum insn_code icode;
-+
-+ /* The name of the built-in function. */
-+ const char *name;
-+
-+ /* Specifies how the function should be expanded. */
-+ enum riscv_builtin_type builtin_type;
-+
-+ /* The function's prototype. */
-+ enum riscv_function_type function_type;
-+
-+ /* Whether the function is available. */
-+ unsigned int (*avail) (void);
-+};
-+
-+static unsigned int
-+riscv_builtin_avail_riscv (void)
-+{
-+ return 1;
-+}
-+
-+/* Construct a riscv_builtin_description from the given arguments.
-+
-+ INSN is the name of the associated instruction pattern, without the
-+ leading CODE_FOR_riscv_.
-+
-+ CODE is the floating-point condition code associated with the
-+ function. It can be 'f' if the field is not applicable.
-+
-+ NAME is the name of the function itself, without the leading
-+ "__builtin_riscv_".
-+
-+ BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
-+
-+ AVAIL is the name of the availability predicate, without the leading
-+ riscv_builtin_avail_. */
-+#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \
-+ { CODE_FOR_ ## INSN, "__builtin_riscv_" NAME, \
-+ BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
-+
-+/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT function
-+ mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE and AVAIL
-+ are as for RISCV_BUILTIN. */
-+#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
-+ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
-+
-+/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
-+ function mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE
-+ and AVAIL are as for RISCV_BUILTIN. */
-+#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
-+ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET, \
-+ FUNCTION_TYPE, AVAIL)
-+
-+static const struct riscv_builtin_description riscv_builtins[] = {
-+ DIRECT_NO_TARGET_BUILTIN (nop, RISCV_VOID_FTYPE_VOID, riscv),
-+};
-+
-+/* Index I is the function declaration for riscv_builtins[I], or null if the
-+ function isn't defined on this target. */
-+static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
-+
-+
-+/* Source-level argument types. */
-+#define RISCV_ATYPE_VOID void_type_node
-+#define RISCV_ATYPE_INT integer_type_node
-+#define RISCV_ATYPE_POINTER ptr_type_node
-+#define RISCV_ATYPE_CPOINTER const_ptr_type_node
-+
-+/* Standard mode-based argument types. */
-+#define RISCV_ATYPE_UQI unsigned_intQI_type_node
-+#define RISCV_ATYPE_SI intSI_type_node
-+#define RISCV_ATYPE_USI unsigned_intSI_type_node
-+#define RISCV_ATYPE_DI intDI_type_node
-+#define RISCV_ATYPE_UDI unsigned_intDI_type_node
-+#define RISCV_ATYPE_SF float_type_node
-+#define RISCV_ATYPE_DF double_type_node
-+
-+/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
-+ their associated RISCV_ATYPEs. */
-+#define RISCV_FTYPE_ATYPES1(A, B) \
-+ RISCV_ATYPE_##A, RISCV_ATYPE_##B
-+
-+#define RISCV_FTYPE_ATYPES2(A, B, C) \
-+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
-+
-+#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
-+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
-+
-+#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
-+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
-+ RISCV_ATYPE_##E
-+
-+/* Return the function type associated with function prototype TYPE. */
-+
-+static tree
-+riscv_build_function_type (enum riscv_function_type type)
-+{
-+ static tree types[(int) RISCV_MAX_FTYPE_MAX];
-+
-+ if (types[(int) type] == NULL_TREE)
-+ switch (type)
-+ {
-+#define DEF_RISCV_FTYPE(NUM, ARGS) \
-+ case RISCV_FTYPE_NAME##NUM ARGS: \
-+ types[(int) type] \
-+ = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS, \
-+ NULL_TREE); \
-+ break;
-+#include "config/riscv/riscv-ftypes.def"
-+#undef DEF_RISCV_FTYPE
-+ default:
-+ gcc_unreachable ();
-+ }
-+
-+ return types[(int) type];
-+}
-+
-+/* Implement TARGET_INIT_BUILTINS. */
-+
-+static void
-+riscv_init_builtins (void)
-+{
-+ const struct riscv_builtin_description *d;
-+ unsigned int i;
-+
-+ /* Iterate through all of the bdesc arrays, initializing all of the
-+ builtin functions. */
-+ for (i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
-+ {
-+ d = &riscv_builtins[i];
-+ if (d->avail ())
-+ riscv_builtin_decls[i]
-+ = add_builtin_function (d->name,
-+ riscv_build_function_type (d->function_type),
-+ i, BUILT_IN_MD, NULL, NULL);
-+ }
-+}
-+
-+/* Implement TARGET_BUILTIN_DECL. */
-+
-+static tree
-+riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
-+{
-+ if (code >= ARRAY_SIZE (riscv_builtins))
-+ return error_mark_node;
-+ return riscv_builtin_decls[code];
-+}
-+
-+/* Take argument ARGNO from EXP's argument list and convert it into a
-+ form suitable for input operand OPNO of instruction ICODE. Return the
-+ value. */
-+
-+static rtx
-+riscv_prepare_builtin_arg (enum insn_code icode,
-+ unsigned int opno, tree exp, unsigned int argno)
-+{
-+ tree arg;
-+ rtx value;
-+ enum machine_mode mode;
-+
-+ arg = CALL_EXPR_ARG (exp, argno);
-+ value = expand_normal (arg);
-+ mode = insn_data[icode].operand[opno].mode;
-+ if (!insn_data[icode].operand[opno].predicate (value, mode))
-+ {
-+ /* We need to get the mode from ARG for two reasons:
-+
-+ - to cope with address operands, where MODE is the mode of the
-+ memory, rather than of VALUE itself.
-+
-+ - to cope with special predicates like pmode_register_operand,
-+ where MODE is VOIDmode. */
-+ value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
-+
-+ /* Check the predicate again. */
-+ if (!insn_data[icode].operand[opno].predicate (value, mode))
-+ {
-+ error ("invalid argument to built-in function");
-+ return const0_rtx;
-+ }
-+ }
-+
-+ return value;
-+}
-+
-+/* Return an rtx suitable for output operand OP of instruction ICODE.
-+ If TARGET is non-null, try to use it where possible. */
-+
-+static rtx
-+riscv_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
-+{
-+ enum machine_mode mode;
-+
-+ mode = insn_data[icode].operand[op].mode;
-+ if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
-+ target = gen_reg_rtx (mode);
-+
-+ return target;
-+}
-+
-+/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
-+ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
-+ and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
-+ suggests a good place to put the result. */
-+
-+static rtx
-+riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
-+ bool has_target_p)
-+{
-+ rtx ops[MAX_RECOG_OPERANDS];
-+ int opno, argno;
-+
-+ /* Map any target to operand 0. */
-+ opno = 0;
-+ if (has_target_p)
-+ {
-+ target = riscv_prepare_builtin_target (icode, opno, target);
-+ ops[opno] = target;
-+ opno++;
-+ }
-+
-+ /* Map the arguments to the other operands. The n_operands value
-+ for an expander includes match_dups and match_scratches as well as
-+ match_operands, so n_operands is only an upper bound on the number
-+ of arguments to the expander function. */
-+ gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
-+ for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
-+ ops[opno] = riscv_prepare_builtin_arg (icode, opno, exp, argno);
-+
-+ switch (opno)
-+ {
-+ case 2:
-+ emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
-+ break;
-+
-+ case 3:
-+ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
-+ break;
-+
-+ case 4:
-+ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
-+ break;
-+
-+ default:
-+ gcc_unreachable ();
-+ }
-+ return target;
-+}
-+
-+/* Implement TARGET_EXPAND_BUILTIN. */
-+
-+static rtx
-+riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
-+ enum machine_mode mode ATTRIBUTE_UNUSED,
-+ int ignore ATTRIBUTE_UNUSED)
-+{
-+ tree fndecl;
-+ unsigned int fcode, avail;
-+ const struct riscv_builtin_description *d;
-+
-+ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
-+ fcode = DECL_FUNCTION_CODE (fndecl);
-+ gcc_assert (fcode < ARRAY_SIZE (riscv_builtins));
-+ d = &riscv_builtins[fcode];
-+ avail = d->avail ();
-+ gcc_assert (avail != 0);
-+ switch (d->builtin_type)
-+ {
-+ case RISCV_BUILTIN_DIRECT:
-+ return riscv_expand_builtin_direct (d->icode, target, exp, true);
-+
-+ case RISCV_BUILTIN_DIRECT_NO_TARGET:
-+ return riscv_expand_builtin_direct (d->icode, target, exp, false);
-+ }
-+ gcc_unreachable ();
-+}
-+
-+/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
-+ in order to avoid duplicating too much logic from elsewhere. */
-+
-+static void
-+riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
-+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
-+ tree function)
-+{
-+ rtx this_rtx, temp1, temp2, fnaddr;
-+ rtx_insn *insn;
-+ bool use_sibcall_p;
-+
-+ /* Pretend to be a post-reload pass while generating rtl. */
-+ reload_completed = 1;
-+
-+ /* Mark the end of the (empty) prologue. */
-+ emit_note (NOTE_INSN_PROLOGUE_END);
-+
-+ /* Determine if we can use a sibcall to call FUNCTION directly. */
-+ fnaddr = XEXP (DECL_RTL (function), 0);
-+ use_sibcall_p = absolute_symbolic_operand (fnaddr, Pmode);
-+
-+ /* We need two temporary registers in some cases. */
-+ temp1 = gen_rtx_REG (Pmode, GP_TEMP_FIRST);
-+ temp2 = gen_rtx_REG (Pmode, GP_TEMP_FIRST + 1);
-+
-+ /* Find out which register contains the "this" pointer. */
-+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
-+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
-+ else
-+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
-+
-+ /* Add DELTA to THIS_RTX. */
-+ if (delta != 0)
-+ {
-+ rtx offset = GEN_INT (delta);
-+ if (!SMALL_OPERAND (delta))
-+ {
-+ riscv_emit_move (temp1, offset);
-+ offset = temp1;
-+ }
-+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
-+ }
-+
-+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
-+ if (vcall_offset != 0)
-+ {
-+ rtx addr;
-+
-+ /* Set TEMP1 to *THIS_RTX. */
-+ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
-+
-+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
-+ addr = riscv_add_offset (temp2, temp1, vcall_offset);
-+
-+ /* Load the offset and add it to THIS_RTX. */
-+ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
-+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
-+ }
-+
-+ /* Jump to the target function. Use a sibcall if direct jumps are
-+ allowed, otherwise load the address into a register first. */
-+ if (use_sibcall_p)
-+ {
-+ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
-+ SIBLING_CALL_P (insn) = 1;
-+ }
-+ else
-+ {
-+ riscv_emit_move(temp1, fnaddr);
-+ emit_jump_insn (gen_indirect_jump (temp1));
-+ }
-+
-+ /* Run just enough of rest_of_compilation. This sequence was
-+ "borrowed" from alpha.c. */
-+ insn = get_insns ();
-+ split_all_insns_noflow ();
-+ shorten_branches (insn);
-+ final_start_function (insn, file, 1);
-+ final (insn, file, 1);
-+ final_end_function ();
-+
-+ /* Clean up the vars set above. Note that final_end_function resets
-+ the global pointer for us. */
-+ reload_completed = 0;
-+}
-+
-+/* Allocate a chunk of memory for per-function machine-dependent data. */
-+
-+static struct machine_function *
-+riscv_init_machine_status (void)
-+{
-+ return ggc_cleared_alloc<machine_function> ();
-+}
-+
-+/* Implement TARGET_OPTION_OVERRIDE. */
-+
-+static void
-+riscv_option_override (void)
-+{
-+ int regno, mode;
-+ const struct riscv_cpu_info *cpu;
-+
-+#ifdef SUBTARGET_OVERRIDE_OPTIONS
-+ SUBTARGET_OVERRIDE_OPTIONS;
-+#endif
-+
-+ flag_pcc_struct_return = 0;
-+
-+ if (flag_pic)
-+ g_switch_value = 0;
-+
-+ /* Prefer a call to memcpy over inline code when optimizing for size,
-+ though see MOVE_RATIO in riscv.h. */
-+ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
-+ target_flags |= MASK_MEMCPY;
-+
-+ /* Handle -mtune. */
-+ cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
-+ RISCV_TUNE_STRING_DEFAULT);
-+ tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
-+
-+ /* If the user hasn't specified a branch cost, use the processor's
-+ default. */
-+ if (riscv_branch_cost == 0)
-+ riscv_branch_cost = tune_info->branch_cost;
-+
-+ /* Set up riscv_hard_regno_mode_ok. */
-+ for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
-+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
-+ riscv_hard_regno_mode_ok[mode][regno]
-+ = riscv_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
-+
-+ /* Function to allocate machine-dependent function status. */
-+ init_machine_status = &riscv_init_machine_status;
-+
-+ if (riscv_cmodel_string)
-+ {
-+ if (strcmp (riscv_cmodel_string, "medlow") == 0)
-+ riscv_cmodel = CM_MEDLOW;
-+ else if (strcmp (riscv_cmodel_string, "medany") == 0)
-+ riscv_cmodel = CM_MEDANY;
-+ else
-+ error ("unsupported code model: %s", riscv_cmodel_string);
-+ }
-+
-+ if (flag_pic)
-+ riscv_cmodel = CM_PIC;
-+
-+ riscv_init_relocs ();
-+}
-+
-+/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
-+
-+static void
-+riscv_conditional_register_usage (void)
-+{
-+ int regno;
-+
-+ if (!TARGET_HARD_FLOAT)
-+ {
-+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
-+ fixed_regs[regno] = call_used_regs[regno] = 1;
-+ }
-+}
-+
-+/* Return a register priority for hard reg REGNO. */
-+static int
-+riscv_register_priority (int regno)
-+{
-+ /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection. */
-+ if (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
-+ || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/* Implement TARGET_TRAMPOLINE_INIT. */
-+
-+static void
-+riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
-+{
-+ rtx addr, end_addr, mem;
-+ uint32_t trampoline[4];
-+ unsigned int i;
-+ HOST_WIDE_INT static_chain_offset, target_function_offset;
-+
-+ /* Work out the offsets of the pointers from the start of the
-+ trampoline code. */
-+ gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
-+ static_chain_offset = TRAMPOLINE_CODE_SIZE;
-+ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
-+
-+ /* Get pointers to the beginning and end of the code block. */
-+ addr = force_reg (Pmode, XEXP (m_tramp, 0));
-+ end_addr = riscv_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE));
-+
-+ /* auipc t0, 0
-+ l[wd] t1, target_function_offset(t0)
-+ l[wd] t0, static_chain_offset(t0)
-+ jr t1
-+ */
-+ trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
-+ trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
-+ | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
-+ | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
-+ | (target_function_offset << SHIFT_IMM);
-+ trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
-+ | (STATIC_CHAIN_REGNUM << SHIFT_RD)
-+ | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
-+ | (static_chain_offset << SHIFT_IMM);
-+ trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
-+
-+ /* Copy the trampoline code. */
-+ for (i = 0; i < ARRAY_SIZE (trampoline); i++)
-+ {
-+ mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
-+ riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
-+ }
-+
-+ /* Set up the static chain pointer field. */
-+ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
-+ riscv_emit_move (mem, chain_value);
-+
-+ /* Set up the target function field. */
-+ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
-+ riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
-+
-+ /* Flush the code part of the trampoline. */
-+ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
-+ emit_insn (gen_clear_cache (addr, end_addr));
-+}
-+
-+/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
-+
-+static bool
-+riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
-+ tree exp ATTRIBUTE_UNUSED)
-+{
-+ if (TARGET_SAVE_RESTORE)
-+ {
-+ /* When optimzing for size, don't use sibcalls in non-leaf routines */
-+ if (cfun->machine->is_leaf == 0)
-+ cfun->machine->is_leaf = leaf_function_p () ? 1 : -1;
-+
-+ return cfun->machine->is_leaf > 0;
-+ }
-+
-+ return true;
-+}
-+
-+/* Initialize the GCC target structure. */
-+#undef TARGET_ASM_ALIGNED_HI_OP
-+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
-+#undef TARGET_ASM_ALIGNED_SI_OP
-+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
-+#undef TARGET_ASM_ALIGNED_DI_OP
-+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
-+
-+#undef TARGET_OPTION_OVERRIDE
-+#define TARGET_OPTION_OVERRIDE riscv_option_override
-+
-+#undef TARGET_LEGITIMIZE_ADDRESS
-+#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
-+
-+#undef TARGET_SCHED_ISSUE_RATE
-+#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
-+
-+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
-+#define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
-+
-+#undef TARGET_REGISTER_MOVE_COST
-+#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
-+#undef TARGET_MEMORY_MOVE_COST
-+#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
-+#undef TARGET_RTX_COSTS
-+#define TARGET_RTX_COSTS riscv_rtx_costs
-+#undef TARGET_ADDRESS_COST
-+#define TARGET_ADDRESS_COST riscv_address_cost
-+
-+#undef TARGET_PREFERRED_RELOAD_CLASS
-+#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
-+
-+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
-+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
-+
-+#undef TARGET_EXPAND_BUILTIN_VA_START
-+#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
-+
-+#undef TARGET_PROMOTE_FUNCTION_MODE
-+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
-+
-+#undef TARGET_RETURN_IN_MEMORY
-+#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
-+
-+#undef TARGET_ASM_OUTPUT_MI_THUNK
-+#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
-+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
-+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
-+
-+#undef TARGET_PRINT_OPERAND
-+#define TARGET_PRINT_OPERAND riscv_print_operand
-+#undef TARGET_PRINT_OPERAND_ADDRESS
-+#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
-+
-+#undef TARGET_SETUP_INCOMING_VARARGS
-+#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
-+#undef TARGET_STRICT_ARGUMENT_NAMING
-+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
-+#undef TARGET_MUST_PASS_IN_STACK
-+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
-+#undef TARGET_PASS_BY_REFERENCE
-+#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
-+#undef TARGET_ARG_PARTIAL_BYTES
-+#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
-+#undef TARGET_FUNCTION_ARG
-+#define TARGET_FUNCTION_ARG riscv_function_arg
-+#undef TARGET_FUNCTION_ARG_ADVANCE
-+#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
-+#undef TARGET_FUNCTION_ARG_BOUNDARY
-+#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
-+
-+#undef TARGET_MODE_REP_EXTENDED
-+#define TARGET_MODE_REP_EXTENDED riscv_mode_rep_extended
-+
-+#undef TARGET_SCALAR_MODE_SUPPORTED_P
-+#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
-+
-+#undef TARGET_INIT_BUILTINS
-+#define TARGET_INIT_BUILTINS riscv_init_builtins
-+#undef TARGET_BUILTIN_DECL
-+#define TARGET_BUILTIN_DECL riscv_builtin_decl
-+#undef TARGET_EXPAND_BUILTIN
-+#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
-+
-+#undef TARGET_HAVE_TLS
-+#define TARGET_HAVE_TLS HAVE_AS_TLS
-+
-+#undef TARGET_CANNOT_FORCE_CONST_MEM
-+#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
-+
-+#undef TARGET_LEGITIMATE_CONSTANT_P
-+#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
-+
-+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
-+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
-+
-+#ifdef HAVE_AS_DTPRELWORD
-+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
-+#define TARGET_ASM_OUTPUT_DWARF_DTPREL riscv_output_dwarf_dtprel
-+#endif
-+
-+#undef TARGET_LEGITIMATE_ADDRESS_P
-+#define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
-+
-+#undef TARGET_CAN_ELIMINATE
-+#define TARGET_CAN_ELIMINATE riscv_can_eliminate
-+
-+#undef TARGET_CONDITIONAL_REGISTER_USAGE
-+#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
-+
-+#undef TARGET_CLASS_MAX_NREGS
-+#define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
-+
-+#undef TARGET_TRAMPOLINE_INIT
-+#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
-+
-+#undef TARGET_IN_SMALL_DATA_P
-+#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
-+
-+#undef TARGET_ASM_SELECT_RTX_SECTION
-+#define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
-+
-+#undef TARGET_MIN_ANCHOR_OFFSET
-+#define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
-+
-+#undef TARGET_MAX_ANCHOR_OFFSET
-+#define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
-+
-+#undef TARGET_LRA_P
-+#define TARGET_LRA_P hook_bool_void_true
-+
-+#undef TARGET_REGISTER_PRIORITY
-+#define TARGET_REGISTER_PRIORITY riscv_register_priority
-+
-+struct gcc_target targetm = TARGET_INITIALIZER;
-+
-+#include "gt-riscv.h"
-diff -urN empty/gcc/config/riscv/riscv-ftypes.def gcc-5.3.0/gcc/config/riscv/riscv-ftypes.def
---- empty/gcc/config/riscv/riscv-ftypes.def 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv-ftypes.def 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,39 @@
-+/* Definitions of prototypes for RISC-V built-in functions.
-+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
-+ MIPS built-in functions, where:
-+
-+ NARGS is the number of arguments.
-+ LIST contains the return-type code followed by the codes for each
-+ argument type.
-+
-+ Argument- and return-type codes are either modes or one of the following:
-+
-+ VOID for void_type_node
-+ INT for integer_type_node
-+ POINTER for ptr_type_node
-+
-+ (we don't use PTR because that's a ANSI-compatibillity macro).
-+
-+ Please keep this list lexicographically sorted by the LIST argument. */
-+
-+DEF_RISCV_FTYPE (1, (VOID, VOID))
-diff -urN empty/gcc/config/riscv/riscv.h gcc-5.3.0/gcc/config/riscv/riscv.h
---- empty/gcc/config/riscv/riscv.h 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv.h 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,1079 @@
-+/* Definition of RISC-V target for GNU compiler.
-+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is
-+ directly accessible, while the command-line options select
-+ TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI
-+ in use. */
-+#define TARGET_HARD_FLOAT TARGET_HARD_FLOAT_ABI
-+#define TARGET_SOFT_FLOAT TARGET_SOFT_FLOAT_ABI
-+
-+/* Target CPU builtins. */
-+#define TARGET_CPU_CPP_BUILTINS() \
-+ do \
-+ { \
-+ builtin_assert ("machine=riscv"); \
-+ \
-+ builtin_assert ("cpu=riscv"); \
-+ builtin_define ("__riscv__"); \
-+ builtin_define ("__riscv"); \
-+ builtin_define ("_riscv"); \
-+ builtin_define ("__riscv"); \
-+ \
-+ if (TARGET_64BIT) \
-+ { \
-+ builtin_define ("__riscv64"); \
-+ builtin_define ("_RISCV_SIM=_ABI64"); \
-+ } \
-+ else \
-+ { \
-+ builtin_define ("__riscv32"); \
-+ builtin_define ("_RISCV_SIM=_ABI32"); \
-+ } \
-+ \
-+ builtin_define ("_ABI32=1"); \
-+ builtin_define ("_ABI64=3"); \
-+ \
-+ \
-+ builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE); \
-+ builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE); \
-+ builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE); \
-+ \
-+ if (TARGET_RVC) \
-+ builtin_define ("__riscv_compressed"); \
-+ \
-+ if (TARGET_ATOMIC) \
-+ builtin_define ("__riscv_atomic"); \
-+ \
-+ if (TARGET_MULDIV) \
-+ builtin_define ("__riscv_muldiv"); \
-+ \
-+ if (TARGET_HARD_FLOAT_ABI) \
-+ { \
-+ builtin_define ("__riscv_hard_float"); \
-+ if (TARGET_FDIV) \
-+ { \
-+ builtin_define ("__riscv_fdiv"); \
-+ builtin_define ("__riscv_fsqrt"); \
-+ } \
-+ } \
-+ else \
-+ builtin_define ("__riscv_soft_float"); \
-+ \
-+ /* The base RISC-V ISA is always little-endian. */ \
-+ builtin_define_std ("RISCVEL"); \
-+ \
-+ if (riscv_cmodel == CM_MEDANY) \
-+ builtin_define ("_RISCV_CMODEL_MEDANY"); \
-+ } \
-+ while (0)
-+
-+/* Default target_flags if no switches are specified */
-+
-+#ifndef TARGET_DEFAULT
-+#define TARGET_DEFAULT 0
-+#endif
-+
-+#ifndef RISCV_ARCH_STRING_DEFAULT
-+#define RISCV_ARCH_STRING_DEFAULT "IMAFD"
-+#endif
-+
-+#ifndef RISCV_TUNE_STRING_DEFAULT
-+#define RISCV_TUNE_STRING_DEFAULT "rocket"
-+#endif
-+
-+#ifndef TARGET_64BIT_DEFAULT
-+#define TARGET_64BIT_DEFAULT 1
-+#endif
-+
-+#if TARGET_64BIT_DEFAULT
-+# define MULTILIB_ARCH_DEFAULT "m64"
-+# define OPT_ARCH64 "!m32"
-+# define OPT_ARCH32 "m32"
-+#else
-+# define MULTILIB_ARCH_DEFAULT "m32"
-+# define OPT_ARCH64 "m64"
-+# define OPT_ARCH32 "!m64"
-+#endif
-+
-+#ifndef MULTILIB_DEFAULTS
-+#define MULTILIB_DEFAULTS \
-+ { MULTILIB_ARCH_DEFAULT }
-+#endif
-+
-+
-+/* Support for a compile-time default CPU, et cetera. The rules are:
-+ --with-arch is ignored if -march is specified.
-+ --with-tune is ignored if -mtune is specified.
-+ --with-float is ignored if -mhard-float or -msoft-float are specified. */
-+#define OPTION_DEFAULT_SPECS \
-+ {"arch", "%{!march=*:-march=%(VALUE)}"}, \
-+ {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \
-+ {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \
-+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
-+ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
-+
-+#define DRIVER_SELF_SPECS ""
-+
-+#ifdef IN_LIBGCC2
-+#undef TARGET_64BIT
-+/* Make this compile time constant for libgcc2 */
-+#ifdef __riscv64
-+#define TARGET_64BIT 1
-+#else
-+#define TARGET_64BIT 0
-+#endif
-+#endif /* IN_LIBGCC2 */
-+
-+/* Tell collect what flags to pass to nm. */
-+#ifndef NM_FLAGS
-+#define NM_FLAGS "-Bn"
-+#endif
-+
-+#undef ASM_SPEC
-+#define ASM_SPEC "\
-+%(subtarget_asm_debugging_spec) \
-+%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \
-+%{mrvc} %{mno-rvc} \
-+%{msoft-float} %{mhard-float} \
-+%{fPIC|fpic|fPIE|fpie:-fpic} \
-+%{march=*} \
-+%(subtarget_asm_spec)"
-+
-+/* Extra switches sometimes passed to the linker. */
-+
-+#ifndef LINK_SPEC
-+#define LINK_SPEC "\
-+%{!T:-dT riscv.ld} \
-+%{m64:-melf64lriscv} \
-+%{m32:-melf32lriscv} \
-+%{shared}"
-+#endif /* LINK_SPEC defined */
-+
-+/* This macro defines names of additional specifications to put in the specs
-+ that can be used in various specifications like CC1_SPEC. Its definition
-+ is an initializer with a subgrouping for each command option.
-+
-+ Each subgrouping contains a string constant, that defines the
-+ specification name, and a string constant that used by the GCC driver
-+ program.
-+
-+ Do not define this macro if it does not need to do anything. */
-+
-+#define EXTRA_SPECS \
-+ { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT }, \
-+ SUBTARGET_EXTRA_SPECS
-+
-+#ifndef SUBTARGET_EXTRA_SPECS
-+#define SUBTARGET_EXTRA_SPECS
-+#endif
-+
-+#define TARGET_DEFAULT_CMODEL CM_MEDLOW
-+
-+/* By default, turn on GDB extensions. */
-+#define DEFAULT_GDB_EXTENSIONS 1
-+
-+#define LOCAL_LABEL_PREFIX "."
-+#define USER_LABEL_PREFIX ""
-+
-+#define DWARF2_DEBUGGING_INFO 1
-+#define DWARF2_ASM_LINE_DEBUG_INFO 0
-+
-+/* The mapping from gcc register number to DWARF 2 CFA column number. */
-+#define DWARF_FRAME_REGNUM(REGNO) \
-+ (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
-+
-+/* The DWARF 2 CFA column which tracks the return address. */
-+#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
-+
-+/* Don't emit .cfi_sections, as it does not work */
-+#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
-+#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0
-+
-+/* Before the prologue, RA lives in r31. */
-+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
-+
-+/* Describe how we implement __builtin_eh_return. */
-+#define EH_RETURN_DATA_REGNO(N) \
-+ ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
-+
-+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
-+
-+/* Target machine storage layout */
-+
-+#define BITS_BIG_ENDIAN 0
-+#define BYTES_BIG_ENDIAN 0
-+#define WORDS_BIG_ENDIAN 0
-+
-+#define MAX_BITS_PER_WORD 64
-+
-+/* Width of a word, in units (bytes). */
-+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
-+#ifndef IN_LIBGCC2
-+#define MIN_UNITS_PER_WORD 4
-+#endif
-+
-+/* We currently require both or neither of the `F' and `D' extensions. */
-+#define UNITS_PER_FPREG 8
-+
-+/* The largest size of value that can be held in floating-point
-+ registers and moved with a single instruction. */
-+#define UNITS_PER_HWFPVALUE \
-+ (TARGET_SOFT_FLOAT_ABI ? 0 : UNITS_PER_FPREG)
-+
-+/* The largest size of value that can be held in floating-point
-+ registers. */
-+#define UNITS_PER_FPVALUE \
-+ (TARGET_SOFT_FLOAT_ABI ? 0 \
-+ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
-+
-+/* The number of bytes in a double. */
-+#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
-+
-+/* Set the sizes of the core types. */
-+#define SHORT_TYPE_SIZE 16
-+#define INT_TYPE_SIZE 32
-+#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32)
-+#define LONG_LONG_TYPE_SIZE 64
-+
-+#define FLOAT_TYPE_SIZE 32
-+#define DOUBLE_TYPE_SIZE 64
-+/* XXX The ABI says long doubles are IEEE-754-2008 float128s. */
-+#define LONG_DOUBLE_TYPE_SIZE 64
-+
-+#ifdef IN_LIBGCC2
-+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
-+#endif
-+
-+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
-+#define PARM_BOUNDARY BITS_PER_WORD
-+
-+/* Allocation boundary (in *bits*) for the code of a function. */
-+#define FUNCTION_BOUNDARY (TARGET_RVC ? 16 : 32)
-+
-+/* There is no point aligning anything to a rounder boundary than this. */
-+#define BIGGEST_ALIGNMENT 128
-+
-+/* All accesses must be aligned. */
-+#define STRICT_ALIGNMENT 1
-+
-+/* Define this if you wish to imitate the way many other C compilers
-+ handle alignment of bitfields and the structures that contain
-+ them.
-+
-+ The behavior is that the type written for a bit-field (`int',
-+ `short', or other integer type) imposes an alignment for the
-+ entire structure, as if the structure really did contain an
-+ ordinary field of that type. In addition, the bit-field is placed
-+ within the structure so that it would fit within such a field,
-+ not crossing a boundary for it.
-+
-+ Thus, on most machines, a bit-field whose type is written as `int'
-+ would not cross a four-byte boundary, and would force four-byte
-+ alignment for the whole structure. (The alignment used may not
-+ be four bytes; it is controlled by the other alignment
-+ parameters.)
-+
-+ If the macro is defined, its definition should be a C expression;
-+ a nonzero value for the expression enables this behavior. */
-+
-+#define PCC_BITFIELD_TYPE_MATTERS 1
-+
-+/* If defined, a C expression to compute the alignment given to a
-+ constant that is being placed in memory. CONSTANT is the constant
-+ and ALIGN is the alignment that the object would ordinarily have.
-+ The value of this macro is used instead of that alignment to align
-+ the object.
-+
-+ If this macro is not defined, then ALIGN is used.
-+
-+ The typical use of this macro is to increase alignment for string
-+ constants to be word aligned so that `strcpy' calls that copy
-+ constants can be done inline. */
-+
-+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
-+ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
-+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
-+
-+/* If defined, a C expression to compute the alignment for a static
-+ variable. TYPE is the data type, and ALIGN is the alignment that
-+ the object would ordinarily have. The value of this macro is used
-+ instead of that alignment to align the object.
-+
-+ If this macro is not defined, then ALIGN is used.
-+
-+ One use of this macro is to increase alignment of medium-size
-+ data to make it all fit in fewer cache lines. Another is to
-+ cause character arrays to be word-aligned so that `strcpy' calls
-+ that copy constants to character arrays can be done inline. */
-+
-+#undef DATA_ALIGNMENT
-+#define DATA_ALIGNMENT(TYPE, ALIGN) \
-+ ((((ALIGN) < BITS_PER_WORD) \
-+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
-+ || TREE_CODE (TYPE) == UNION_TYPE \
-+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
-+
-+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
-+ character arrays to be word-aligned so that `strcpy' calls that copy
-+ constants to character arrays can be done inline, and 'strcmp' can be
-+ optimised to use word loads. */
-+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
-+ DATA_ALIGNMENT (TYPE, ALIGN)
-+
-+/* Define if operations between registers always perform the operation
-+ on the full register even if a narrower mode is specified. */
-+#define WORD_REGISTER_OPERATIONS
-+
-+/* When in 64-bit mode, move insns will sign extend SImode and CCmode
-+ moves. All other references are zero extended. */
-+#define LOAD_EXTEND_OP(MODE) \
-+ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
-+ ? SIGN_EXTEND : ZERO_EXTEND)
-+
-+/* Define this macro if it is advisable to hold scalars in registers
-+ in a wider mode than that declared by the program. In such cases,
-+ the value is constrained to be within the bounds of the declared
-+ type, but kept valid in the wider mode. The signedness of the
-+ extension may differ from that of the type. */
-+
-+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
-+ if (GET_MODE_CLASS (MODE) == MODE_INT \
-+ && GET_MODE_SIZE (MODE) < 4) \
-+ { \
-+ (MODE) = Pmode; \
-+ }
-+
-+/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
-+ Extensions of pointers to word_mode must be signed. */
-+#define POINTERS_EXTEND_UNSIGNED false
-+
-+/* When floating-point registers are wider than integer ones, moves between
-+ them must go through memory. */
-+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
-+ (GET_MODE_SIZE (MODE) > UNITS_PER_WORD \
-+ && ((CLASS1) == FP_REGS) != ((CLASS2) == FP_REGS))
-+
-+/* Define if loading short immediate values into registers sign extends. */
-+#define SHORT_IMMEDIATES_SIGN_EXTEND
-+
-+/* Standard register usage. */
-+
-+/* Number of hardware registers. We have:
-+
-+ - 32 integer registers
-+ - 32 floating point registers
-+ - 32 vector integer registers
-+ - 32 vector floating point registers
-+ - 2 fake registers:
-+ - ARG_POINTER_REGNUM
-+ - FRAME_POINTER_REGNUM */
-+
-+#define FIRST_PSEUDO_REGISTER 66
-+
-+/* x0, sp, gp, and tp are fixed. */
-+
-+#define FIXED_REGISTERS \
-+{ /* General registers. */ \
-+ 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
-+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
-+ /* Floating-point registers. */ \
-+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
-+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
-+ /* Others. */ \
-+ 1, 1 \
-+}
-+
-+
-+/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
-+ The call RTLs themselves clobber ra. */
-+
-+#define CALL_USED_REGISTERS \
-+{ /* General registers. */ \
-+ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
-+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
-+ /* Floating-point registers. */ \
-+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
-+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
-+ /* Others. */ \
-+ 1, 1 \
-+}
-+
-+#define CALL_REALLY_USED_REGISTERS \
-+{ /* General registers. */ \
-+ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
-+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
-+ /* Floating-point registers. */ \
-+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
-+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
-+ /* Others. */ \
-+ 1, 1 \
-+}
-+
-+/* Internal macros to classify an ISA register's type. */
-+
-+#define GP_REG_FIRST 0
-+#define GP_REG_LAST 31
-+#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
-+
-+#define FP_REG_FIRST 32
-+#define FP_REG_LAST 63
-+#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
-+
-+/* The DWARF 2 CFA column which tracks the return address from a
-+ signal handler context. This means that to maintain backwards
-+ compatibility, no hard register can be assigned this column if it
-+ would need to be handled by the DWARF unwinder. */
-+#define DWARF_ALT_FRAME_RETURN_COLUMN 64
-+
-+#define GP_REG_P(REGNO) \
-+ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
-+#define FP_REG_P(REGNO) \
-+ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
-+
-+#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
-+
-+/* Return coprocessor number from register number. */
-+
-+#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) \
-+ (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2' \
-+ : COP3_REG_P (REGNO) ? '3' : '?')
-+
-+
-+#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
-+
-+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
-+ riscv_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
-+
-+#define MODES_TIEABLE_P(MODE1, MODE2) \
-+ ((MODE1) == (MODE2) || (GET_MODE_CLASS (MODE1) == MODE_INT \
-+ && GET_MODE_CLASS (MODE2) == MODE_INT))
-+
-+/* Use s0 as the frame pointer if it is so requested. */
-+#define HARD_FRAME_POINTER_REGNUM 8
-+#define STACK_POINTER_REGNUM 2
-+#define THREAD_POINTER_REGNUM 4
-+
-+/* These two registers don't really exist: they get eliminated to either
-+ the stack or hard frame pointer. */
-+#define ARG_POINTER_REGNUM 64
-+#define FRAME_POINTER_REGNUM 65
-+
-+#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
-+#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
-+
-+/* Register in which static-chain is passed to a function. */
-+#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
-+
-+/* Registers used as temporaries in prologue/epilogue code.
-+
-+ The prologue registers mustn't conflict with any
-+ incoming arguments, the static chain pointer, or the frame pointer.
-+ The epilogue temporary mustn't conflict with the return registers,
-+ the frame pointer, the EH stack adjustment, or the EH data registers. */
-+
-+#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
-+#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
-+
-+#define FUNCTION_PROFILER(STREAM, LABELNO) \
-+{ \
-+ sorry ("profiler support for RISC-V"); \
-+}
-+
-+/* Define this macro if it is as good or better to call a constant
-+ function address than to call an address kept in a register. */
-+#define NO_FUNCTION_CSE 1
-+
-+/* Define the classes of registers for register constraints in the
-+ machine description. Also define ranges of constants.
-+
-+ One of the classes must always be named ALL_REGS and include all hard regs.
-+ If there is more than one class, another class must be named NO_REGS
-+ and contain no registers.
-+
-+ The name GENERAL_REGS must be the name of a class (or an alias for
-+ another name such as ALL_REGS). This is the class of registers
-+ that is allowed by "g" or "r" in a register constraint.
-+ Also, registers outside this class are allocated only when
-+ instructions express preferences for them.
-+
-+ The classes must be numbered in nondecreasing order; that is,
-+ a larger-numbered class must never be contained completely
-+ in a smaller-numbered class.
-+
-+ For any two classes, it is very desirable that there be another
-+ class that represents their union. */
-+
-+enum reg_class
-+{
-+ NO_REGS, /* no registers in set */
-+ T_REGS, /* registers used by indirect sibcalls */
-+ JALR_REGS, /* registers used by indirect calls */
-+ GR_REGS, /* integer registers */
-+ FP_REGS, /* floating point registers */
-+ FRAME_REGS, /* $arg and $frame */
-+ ALL_REGS, /* all registers */
-+ LIM_REG_CLASSES /* max value + 1 */
-+};
-+
-+#define N_REG_CLASSES (int) LIM_REG_CLASSES
-+
-+#define GENERAL_REGS GR_REGS
-+
-+/* An initializer containing the names of the register classes as C
-+ string constants. These names are used in writing some of the
-+ debugging dumps. */
-+
-+#define REG_CLASS_NAMES \
-+{ \
-+ "NO_REGS", \
-+ "T_REGS", \
-+ "JALR_REGS", \
-+ "GR_REGS", \
-+ "FP_REGS", \
-+ "FRAME_REGS", \
-+ "ALL_REGS" \
-+}
-+
-+/* An initializer containing the contents of the register classes,
-+ as integers which are bit masks. The Nth integer specifies the
-+ contents of class N. The way the integer MASK is interpreted is
-+ that register R is in the class if `MASK & (1 << R)' is 1.
-+
-+ When the machine has more than 32 registers, an integer does not
-+ suffice. Then the integers are replaced by sub-initializers,
-+ braced groupings containing several integers. Each
-+ sub-initializer must be suitable as an initializer for the type
-+ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
-+
-+#define REG_CLASS_CONTENTS \
-+{ \
-+ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
-+ { 0xf0000040, 0x00000000, 0x00000000 }, /* T_REGS */ \
-+ { 0xffffff40, 0x00000000, 0x00000000 }, /* JALR_REGS */ \
-+ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \
-+ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
-+ { 0x00000000, 0x00000000, 0x00000003 }, /* FRAME_REGS */ \
-+ { 0xffffffff, 0xffffffff, 0x00000003 } /* ALL_REGS */ \
-+}
-+
-+/* A C expression whose value is a register class containing hard
-+ register REGNO. In general there is more that one such class;
-+ choose a class which is "minimal", meaning that no smaller class
-+ also contains the register. */
-+
-+#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
-+
-+/* A macro whose definition is the name of the class to which a
-+ valid base register must belong. A base register is one used in
-+ an address which is the register value plus a displacement. */
-+
-+#define BASE_REG_CLASS GR_REGS
-+
-+/* A macro whose definition is the name of the class to which a
-+ valid index register must belong. An index register is one used
-+ in an address where its value is either multiplied by a scale
-+ factor or added to another register (as well as added to a
-+ displacement). */
-+
-+#define INDEX_REG_CLASS NO_REGS
-+
-+/* We generally want to put call-clobbered registers ahead of
-+ call-saved ones. (IRA expects this.) */
-+
-+#define REG_ALLOC_ORDER \
-+{ \
-+ /* Call-clobbered GPRs. */ \
-+ 15, 14, 13, 12, 11, 10, 16, 17, 6, 28, 29, 30, 31, 5, 7, 1, \
-+ /* Call-saved GPRs. */ \
-+ 8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \
-+ /* GPRs that can never be exposed to the register allocator. */ \
-+ 0, 2, 3, 4, \
-+ /* Call-clobbered FPRs. */ \
-+ 47, 46, 45, 44, 43, 42, 32, 33, 34, 35, 36, 37, 38, 39, 48, 49, \
-+ 60, 61, 62, 63, \
-+ /* Call-saved FPRs. */ \
-+ 40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
-+ /* None of the remaining classes have defined call-saved \
-+ registers. */ \
-+ 64, 65 \
-+}
-+
-+/* True if VALUE is a signed 12-bit number. */
-+
-+#define SMALL_OPERAND(VALUE) \
-+ ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH/2 < IMM_REACH)
-+
-+/* True if VALUE can be loaded into a register using LUI. */
-+
-+#define LUI_OPERAND(VALUE) \
-+ (((VALUE) | ((1UL<<31) - IMM_REACH)) == ((1UL<<31) - IMM_REACH) \
-+ || ((VALUE) | ((1UL<<31) - IMM_REACH)) + IMM_REACH == 0)
-+
-+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
-+ reg_classes_intersect_p (FP_REGS, CLASS)
-+
-+/* Stack layout; function entry, exit and calling. */
-+
-+#define STACK_GROWS_DOWNWARD
-+
-+#define FRAME_GROWS_DOWNWARD 1
-+
-+#define STARTING_FRAME_OFFSET 0
-+
-+#define RETURN_ADDR_RTX riscv_return_addr
-+
-+#define ELIMINABLE_REGS \
-+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
-+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
-+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
-+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
-+
-+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
-+ (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
-+
-+/* Allocate stack space for arguments at the beginning of each function. */
-+#define ACCUMULATE_OUTGOING_ARGS 1
-+
-+/* The argument pointer always points to the first argument. */
-+#define FIRST_PARM_OFFSET(FNDECL) 0
-+
-+#define REG_PARM_STACK_SPACE(FNDECL) 0
-+
-+/* Define this if it is the responsibility of the caller to
-+ allocate the area reserved for arguments passed in registers.
-+ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
-+ of this macro is to determine whether the space is included in
-+ `crtl->outgoing_args_size'. */
-+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
-+
-+#define STACK_BOUNDARY 128
-+
-+/* Symbolic macros for the registers used to return integer and floating
-+ point values. */
-+
-+#define GP_RETURN GP_ARG_FIRST
-+#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : FP_ARG_FIRST)
-+
-+#define MAX_ARGS_IN_REGISTERS 8
-+
-+/* Symbolic macros for the first/last argument registers. */
-+
-+#define GP_ARG_FIRST (GP_REG_FIRST + 10)
-+#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
-+#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
-+#define FP_ARG_FIRST (FP_REG_FIRST + 10)
-+#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
-+
-+#define CALLEE_SAVED_REG_NUMBER(REGNO) \
-+ ((REGNO) >= 8 && (REGNO) <= 9 ? (REGNO) - 8 : \
-+ (REGNO) >= 18 && (REGNO) <= 27 ? (REGNO) - 16 : -1)
-+
-+#define LIBCALL_VALUE(MODE) \
-+ riscv_function_value (NULL_TREE, NULL_TREE, MODE)
-+
-+#define FUNCTION_VALUE(VALTYPE, FUNC) \
-+ riscv_function_value (VALTYPE, FUNC, VOIDmode)
-+
-+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
-+
-+/* 1 if N is a possible register number for function argument passing.
-+ We have no FP argument registers when soft-float. When FP registers
-+ are 32 bits, we can't directly reference the odd numbered ones. */
-+
-+/* Accept arguments in a0-a7 and/or fa0-fa7. */
-+#define FUNCTION_ARG_REGNO_P(N) \
-+ (IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST) \
-+ || IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST))
-+
-+/* The ABI views the arguments as a structure, of which the first 8
-+ words go in registers and the rest go on the stack. If I < 8, N, the Ith
-+ word might go in the Ith integer argument register or the Ith
-+ floating-point argument register. */
-+
-+typedef struct {
-+ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
-+ unsigned int num_gprs;
-+
-+ /* Number of words passed on the stack. */
-+ unsigned int stack_words;
-+} CUMULATIVE_ARGS;
-+
-+/* Initialize a variable CUM of type CUMULATIVE_ARGS
-+ for a call to a function whose data type is FNTYPE.
-+ For a library call, FNTYPE is 0. */
-+
-+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
-+ memset (&(CUM), 0, sizeof (CUM))
-+
-+#define EPILOGUE_USES(REGNO) ((REGNO) == RETURN_ADDR_REGNUM)
-+
-+/* ABI requires 16-byte alignment, even on ven on RV32. */
-+#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
-+
-+#define NO_PROFILE_COUNTERS 1
-+
-+/* Define this macro if the code for function profiling should come
-+ before the function prologue. Normally, the profiling code comes
-+ after. */
-+
-+/* #define PROFILE_BEFORE_PROLOGUE */
-+
-+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
-+ the stack pointer does not matter. The value is tested only in
-+ functions that have frame pointers.
-+ No definition is equivalent to always zero. */
-+
-+#define EXIT_IGNORE_STACK 1
-+
-+
-+/* Trampolines are a block of code followed by two pointers. */
-+
-+#define TRAMPOLINE_CODE_SIZE 16
-+#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
-+#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
-+
-+/* Addressing modes, and classification of registers for them. */
-+
-+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
-+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
-+ riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
-+
-+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
-+ and check its validity for a certain class.
-+ We have two alternate definitions for each of them.
-+ The usual definition accepts all pseudo regs; the other rejects them all.
-+ The symbol REG_OK_STRICT causes the latter definition to be used.
-+
-+ Most source files want to accept pseudo regs in the hope that
-+ they will get allocated to the class that the insn wants them to be in.
-+ Some source files that are used after register allocation
-+ need to be strict. */
-+
-+#ifndef REG_OK_STRICT
-+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
-+ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
-+#else
-+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
-+ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
-+#endif
-+
-+#define REG_OK_FOR_INDEX_P(X) 0
-+
-+
-+/* Maximum number of registers that can appear in a valid memory address. */
-+
-+#define MAX_REGS_PER_ADDRESS 1
-+
-+#define CONSTANT_ADDRESS_P(X) \
-+ (CONSTANT_P (X) && memory_address_p (SImode, X))
-+
-+/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
-+ 'the start of the function that this code is output in'. */
-+
-+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
-+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
-+ asm_fprintf ((FILE), "%U%s", \
-+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
-+ else \
-+ asm_fprintf ((FILE), "%U%s", (NAME))
-+
-+/* This flag marks functions that cannot be lazily bound. */
-+#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
-+#define SYMBOL_REF_BIND_NOW_P(RTX) \
-+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
-+
-+#define JUMP_TABLES_IN_TEXT_SECTION 0
-+#define CASE_VECTOR_MODE SImode
-+#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
-+
-+/* Define this as 1 if `char' should by default be signed; else as 0. */
-+#define DEFAULT_SIGNED_CHAR 0
-+
-+/* Consider using fld/fsd to move 8 bytes at a time for RV32IFD. */
-+#define MOVE_MAX UNITS_PER_WORD
-+#define MAX_MOVE_MAX 8
-+
-+#define SLOW_BYTE_ACCESS 0
-+
-+#define SHIFT_COUNT_TRUNCATED 1
-+
-+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
-+ is done just by pretending it is already truncated. */
-+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
-+ (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
-+
-+/* Specify the machine mode that pointers have.
-+ After generation of rtl, the compiler makes no further distinction
-+ between pointers and any other objects of this machine mode. */
-+
-+#ifndef Pmode
-+#define Pmode (TARGET_64BIT ? DImode : SImode)
-+#endif
-+
-+/* Give call MEMs SImode since it is the "most permissive" mode
-+ for both 32-bit and 64-bit targets. */
-+
-+#define FUNCTION_MODE SImode
-+
-+/* A C expression for the cost of a branch instruction. A value of 2
-+ seems to minimize code size. */
-+
-+#define BRANCH_COST(speed_p, predictable_p) \
-+ ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
-+
-+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
-+
-+/* Control the assembler format that we output. */
-+
-+/* Output to assembler file text saying following lines
-+ may contain character constants, extra white space, comments, etc. */
-+
-+#ifndef ASM_APP_ON
-+#define ASM_APP_ON " #APP\n"
-+#endif
-+
-+/* Output to assembler file text saying following lines
-+ no longer contain unusual constructs. */
-+
-+#ifndef ASM_APP_OFF
-+#define ASM_APP_OFF " #NO_APP\n"
-+#endif
-+
-+#define REGISTER_NAMES \
-+{ "zero","ra", "sp", "gp", "tp", "t0", "t1", "t2", \
-+ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", \
-+ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", \
-+ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", \
-+ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", \
-+ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", \
-+ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", \
-+ "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11", \
-+ "arg", "frame", }
-+
-+#define ADDITIONAL_REGISTER_NAMES \
-+{ \
-+ { "x0", 0 + GP_REG_FIRST }, \
-+ { "x1", 1 + GP_REG_FIRST }, \
-+ { "x2", 2 + GP_REG_FIRST }, \
-+ { "x3", 3 + GP_REG_FIRST }, \
-+ { "x4", 4 + GP_REG_FIRST }, \
-+ { "x5", 5 + GP_REG_FIRST }, \
-+ { "x6", 6 + GP_REG_FIRST }, \
-+ { "x7", 7 + GP_REG_FIRST }, \
-+ { "x8", 8 + GP_REG_FIRST }, \
-+ { "x9", 9 + GP_REG_FIRST }, \
-+ { "x10", 10 + GP_REG_FIRST }, \
-+ { "x11", 11 + GP_REG_FIRST }, \
-+ { "x12", 12 + GP_REG_FIRST }, \
-+ { "x13", 13 + GP_REG_FIRST }, \
-+ { "x14", 14 + GP_REG_FIRST }, \
-+ { "x15", 15 + GP_REG_FIRST }, \
-+ { "x16", 16 + GP_REG_FIRST }, \
-+ { "x17", 17 + GP_REG_FIRST }, \
-+ { "x18", 18 + GP_REG_FIRST }, \
-+ { "x19", 19 + GP_REG_FIRST }, \
-+ { "x20", 20 + GP_REG_FIRST }, \
-+ { "x21", 21 + GP_REG_FIRST }, \
-+ { "x22", 22 + GP_REG_FIRST }, \
-+ { "x23", 23 + GP_REG_FIRST }, \
-+ { "x24", 24 + GP_REG_FIRST }, \
-+ { "x25", 25 + GP_REG_FIRST }, \
-+ { "x26", 26 + GP_REG_FIRST }, \
-+ { "x27", 27 + GP_REG_FIRST }, \
-+ { "x28", 28 + GP_REG_FIRST }, \
-+ { "x29", 29 + GP_REG_FIRST }, \
-+ { "x30", 30 + GP_REG_FIRST }, \
-+ { "x31", 31 + GP_REG_FIRST }, \
-+ { "f0", 0 + FP_REG_FIRST }, \
-+ { "f1", 1 + FP_REG_FIRST }, \
-+ { "f2", 2 + FP_REG_FIRST }, \
-+ { "f3", 3 + FP_REG_FIRST }, \
-+ { "f4", 4 + FP_REG_FIRST }, \
-+ { "f5", 5 + FP_REG_FIRST }, \
-+ { "f6", 6 + FP_REG_FIRST }, \
-+ { "f7", 7 + FP_REG_FIRST }, \
-+ { "f8", 8 + FP_REG_FIRST }, \
-+ { "f9", 9 + FP_REG_FIRST }, \
-+ { "f10", 10 + FP_REG_FIRST }, \
-+ { "f11", 11 + FP_REG_FIRST }, \
-+ { "f12", 12 + FP_REG_FIRST }, \
-+ { "f13", 13 + FP_REG_FIRST }, \
-+ { "f14", 14 + FP_REG_FIRST }, \
-+ { "f15", 15 + FP_REG_FIRST }, \
-+ { "f16", 16 + FP_REG_FIRST }, \
-+ { "f17", 17 + FP_REG_FIRST }, \
-+ { "f18", 18 + FP_REG_FIRST }, \
-+ { "f19", 19 + FP_REG_FIRST }, \
-+ { "f20", 20 + FP_REG_FIRST }, \
-+ { "f21", 21 + FP_REG_FIRST }, \
-+ { "f22", 22 + FP_REG_FIRST }, \
-+ { "f23", 23 + FP_REG_FIRST }, \
-+ { "f24", 24 + FP_REG_FIRST }, \
-+ { "f25", 25 + FP_REG_FIRST }, \
-+ { "f26", 26 + FP_REG_FIRST }, \
-+ { "f27", 27 + FP_REG_FIRST }, \
-+ { "f28", 28 + FP_REG_FIRST }, \
-+ { "f29", 29 + FP_REG_FIRST }, \
-+ { "f30", 30 + FP_REG_FIRST }, \
-+ { "f31", 31 + FP_REG_FIRST }, \
-+}
-+
-+/* Globalizing directive for a label. */
-+#define GLOBAL_ASM_OP "\t.globl\t"
-+
-+/* This is how to store into the string LABEL
-+ the symbol_ref name of an internal numbered label where
-+ PREFIX is the class of label and NUM is the number within the class.
-+ This is suitable for output with `assemble_name'. */
-+
-+#undef ASM_GENERATE_INTERNAL_LABEL
-+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
-+ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
-+
-+/* This is how to output an element of a case-vector that is absolute. */
-+
-+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
-+ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
-+
-+/* This is how to output an element of a PIC case-vector. */
-+
-+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
-+ fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n", \
-+ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
-+
-+/* This is how to output an assembler line
-+ that says to advance the location counter
-+ to a multiple of 2**LOG bytes. */
-+
-+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
-+ fprintf (STREAM, "\t.align\t%d\n", (LOG))
-+
-+/* Define the strings to put out for each section in the object file. */
-+#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
-+#define DATA_SECTION_ASM_OP "\t.data" /* large data */
-+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
-+#define BSS_SECTION_ASM_OP "\t.bss"
-+#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\",@nobits"
-+#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\",@progbits"
-+
-+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
-+do \
-+ { \
-+ fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \
-+ reg_names[STACK_POINTER_REGNUM], \
-+ reg_names[STACK_POINTER_REGNUM], \
-+ TARGET_64BIT ? "sd" : "sw", \
-+ reg_names[REGNO], \
-+ reg_names[STACK_POINTER_REGNUM]); \
-+ } \
-+while (0)
-+
-+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
-+do \
-+ { \
-+ fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n", \
-+ TARGET_64BIT ? "ld" : "lw", \
-+ reg_names[REGNO], \
-+ reg_names[STACK_POINTER_REGNUM], \
-+ reg_names[STACK_POINTER_REGNUM], \
-+ reg_names[STACK_POINTER_REGNUM]); \
-+ } \
-+while (0)
-+
-+#define ASM_COMMENT_START "#"
-+
-+#undef SIZE_TYPE
-+#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
-+
-+#undef PTRDIFF_TYPE
-+#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
-+
-+/* The maximum number of bytes that can be copied by one iteration of
-+ a movmemsi loop; see riscv_block_move_loop. */
-+#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
-+
-+/* The maximum number of bytes that can be copied by a straight-line
-+ implementation of movmemsi; see riscv_block_move_straight. We want
-+ to make sure that any loop-based implementation will iterate at
-+ least twice. */
-+#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
-+
-+/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
-+
-+#define RISCV_CALL_RATIO 6
-+
-+/* Any loop-based implementation of movmemsi will have at least
-+ RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
-+ moves, so allow individual copies of fewer elements.
-+
-+ When movmemsi is not available, use a value approximating
-+ the length of a memcpy call sequence, so that move_by_pieces
-+ will generate inline code if it is shorter than a function call.
-+ Since move_by_pieces_ninsns counts memory-to-memory moves, but
-+ we'll have to generate a load/store pair for each, halve the
-+ value of RISCV_CALL_RATIO to take that into account. */
-+
-+#define MOVE_RATIO(speed) \
-+ (HAVE_movmemsi \
-+ ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
-+ : RISCV_CALL_RATIO / 2)
-+
-+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
-+ of the length of a memset call, but use the default otherwise. */
-+
-+#define CLEAR_RATIO(speed)\
-+ ((speed) ? 15 : RISCV_CALL_RATIO)
-+
-+/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
-+ optimizing for size adjust the ratio to account for the overhead of
-+ loading the constant and replicating it across the word. */
-+
-+#define SET_RATIO(speed) \
-+ ((speed) ? 15 : RISCV_CALL_RATIO - 2)
-+
-+#ifndef HAVE_AS_TLS
-+#define HAVE_AS_TLS 0
-+#endif
-+
-+#ifndef USED_FOR_TARGET
-+
-+extern const enum reg_class riscv_regno_to_class[];
-+extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
-+extern const char* riscv_hi_relocs[];
-+#endif
-+
-+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
-+ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
-+
-+/* ISA constants needed for code generation. */
-+#define OPCODE_LW 0x2003
-+#define OPCODE_LD 0x3003
-+#define OPCODE_AUIPC 0x17
-+#define OPCODE_JALR 0x67
-+#define SHIFT_RD 7
-+#define SHIFT_RS1 15
-+#define SHIFT_IMM 20
-+#define IMM_BITS 12
-+
-+#define IMM_REACH (1LL << IMM_BITS)
-+#define CONST_HIGH_PART(VALUE) (((VALUE) + (IMM_REACH/2)) & ~(IMM_REACH-1))
-+#define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE))
-diff -urN empty/gcc/config/riscv/riscv.md gcc-5.3.0/gcc/config/riscv/riscv.md
---- empty/gcc/config/riscv/riscv.md 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv.md 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,2421 @@
-+;; Machine description for RISC-V for GNU compiler.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+
-+;; This file is part of GCC.
-+
-+;; GCC is free software; you can redistribute it and/or modify
-+;; it under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+
-+;; GCC is distributed in the hope that it will be useful,
-+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+;; GNU General Public License for more details.
-+
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3. If not see
-+;; <http://www.gnu.org/licenses/ >.
-+
-+(define_c_enum "unspec" [
-+ ;; Floating-point moves.
-+ UNSPEC_LOAD_LOW
-+ UNSPEC_LOAD_HIGH
-+ UNSPEC_STORE_WORD
-+
-+ ;; GP manipulation.
-+ UNSPEC_EH_RETURN
-+
-+ ;; Symbolic accesses.
-+ UNSPEC_ADDRESS_FIRST
-+ UNSPEC_LOAD_GOT
-+ UNSPEC_TLS
-+ UNSPEC_TLS_LE
-+ UNSPEC_TLS_IE
-+ UNSPEC_TLS_GD
-+
-+ ;; Register save and restore.
-+ UNSPEC_GPR_SAVE
-+ UNSPEC_GPR_RESTORE
-+
-+ ;; Blockage and synchronisation.
-+ UNSPEC_BLOCKAGE
-+ UNSPEC_FENCE
-+ UNSPEC_FENCE_I
-+])
-+
-+(define_constants
-+ [(RETURN_ADDR_REGNUM 1)
-+ (T0_REGNUM 5)
-+ (T1_REGNUM 6)
-+])
-+
-+(include "predicates.md")
-+(include "constraints.md")
-+
-+;; ....................
-+;;
-+;; Attributes
-+;;
-+;; ....................
-+
-+(define_attr "got" "unset,xgot_high,load"
-+ (const_string "unset"))
-+
-+;; Classification of moves, extensions and truncations. Most values
-+;; are as for "type" (see below) but there are also the following
-+;; move-specific values:
-+;;
-+;; andi a single ANDI instruction
-+;; shift_shift a shift left followed by a shift right
-+;;
-+;; This attribute is used to determine the instruction's length and
-+;; scheduling type. For doubleword moves, the attribute always describes
-+;; the split instructions; in some cases, it is more appropriate for the
-+;; scheduling type to be "multi" instead.
-+(define_attr "move_type"
-+ "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
-+ const,logical,arith,andi,shift_shift"
-+ (const_string "unknown"))
-+
-+;; Main data type used by the insn
-+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
-+ (const_string "unknown"))
-+
-+;; True if the main data type is twice the size of a word.
-+(define_attr "dword_mode" "no,yes"
-+ (cond [(and (eq_attr "mode" "DI,DF")
-+ (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
-+ (const_string "yes")
-+
-+ (and (eq_attr "mode" "TI,TF")
-+ (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
-+ (const_string "yes")]
-+ (const_string "no")))
-+
-+;; Classification of each insn.
-+;; branch conditional branch
-+;; jump unconditional jump
-+;; call unconditional call
-+;; load load instruction(s)
-+;; fpload floating point load
-+;; store store instruction(s)
-+;; fpstore floating point store
-+;; mtc transfer to coprocessor
-+;; mfc transfer from coprocessor
-+;; const load constant
-+;; arith integer arithmetic instructions
-+;; logical integer logical instructions
-+;; shift integer shift instructions
-+;; slt set less than instructions
-+;; imul integer multiply
-+;; idiv integer divide
-+;; move integer register move (addi rd, rs1, 0)
-+;; fmove floating point register move
-+;; fadd floating point add/subtract
-+;; fmul floating point multiply
-+;; fmadd floating point multiply-add
-+;; fdiv floating point divide
-+;; fcmp floating point compare
-+;; fcvt floating point convert
-+;; fsqrt floating point square root
-+;; multi multiword sequence (or user asm statements)
-+;; nop no operation
-+;; ghost an instruction that produces no real code
-+(define_attr "type"
-+ "unknown,branch,jump,call,load,fpload,store,fpstore,
-+ mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
-+ fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
-+ (cond [(eq_attr "got" "load") (const_string "load")
-+
-+ ;; If a doubleword move uses these expensive instructions,
-+ ;; it is usually better to schedule them in the same way
-+ ;; as the singleword form, rather than as "multi".
-+ (eq_attr "move_type" "load") (const_string "load")
-+ (eq_attr "move_type" "fpload") (const_string "fpload")
-+ (eq_attr "move_type" "store") (const_string "store")
-+ (eq_attr "move_type" "fpstore") (const_string "fpstore")
-+ (eq_attr "move_type" "mtc") (const_string "mtc")
-+ (eq_attr "move_type" "mfc") (const_string "mfc")
-+
-+ ;; These types of move are always single insns.
-+ (eq_attr "move_type" "fmove") (const_string "fmove")
-+ (eq_attr "move_type" "arith") (const_string "arith")
-+ (eq_attr "move_type" "logical") (const_string "logical")
-+ (eq_attr "move_type" "andi") (const_string "logical")
-+
-+ ;; These types of move are always split.
-+ (eq_attr "move_type" "shift_shift")
-+ (const_string "multi")
-+
-+ ;; These types of move are split for doubleword modes only.
-+ (and (eq_attr "move_type" "move,const")
-+ (eq_attr "dword_mode" "yes"))
-+ (const_string "multi")
-+ (eq_attr "move_type" "move") (const_string "move")
-+ (eq_attr "move_type" "const") (const_string "const")]
-+ (const_string "unknown")))
-+
-+;; Mode for conversion types (fcvt)
-+;; I2S integer to float single (SI/DI to SF)
-+;; I2D integer to float double (SI/DI to DF)
-+;; S2I float to integer (SF to SI/DI)
-+;; D2I float to integer (DF to SI/DI)
-+;; D2S double to float single
-+;; S2D float single to double
-+
-+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D"
-+ (const_string "unknown"))
-+
-+;; Length of instruction in bytes.
-+(define_attr "length" ""
-+ (cond [
-+ ;; Direct branch instructions have a range of [-0x1000,0xffc],
-+ ;; relative to the address of the delay slot. If a branch is
-+ ;; outside this range, convert a branch like:
-+ ;;
-+ ;; bne r1,r2,target
-+ ;;
-+ ;; to:
-+ ;;
-+ ;; beq r1,r2,1f
-+ ;; j target
-+ ;; 1:
-+ ;;
-+ (eq_attr "type" "branch")
-+ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
-+ (le (minus (pc) (match_dup 0)) (const_int 4092)))
-+ (const_int 4)
-+ (const_int 8))
-+
-+ ;; Conservatively assume calls take two instructions, as in:
-+ ;; auipc t0, %pcrel_hi(target)
-+ ;; jalr ra, t0, %lo(target)
-+ ;; The linker will relax these into JAL when appropriate.
-+ (eq_attr "type" "call") (const_int 8)
-+
-+ ;; "Ghost" instructions occupy no space.
-+ (eq_attr "type" "ghost") (const_int 0)
-+
-+ (eq_attr "got" "load") (const_int 8)
-+
-+ (eq_attr "type" "fcmp") (const_int 8)
-+
-+ ;; SHIFT_SHIFTs are decomposed into two separate instructions.
-+ (eq_attr "move_type" "shift_shift")
-+ (const_int 8)
-+
-+ ;; Check for doubleword moves that are decomposed into two
-+ ;; instructions.
-+ (and (eq_attr "move_type" "mtc,mfc,move")
-+ (eq_attr "dword_mode" "yes"))
-+ (const_int 8)
-+
-+ ;; Doubleword CONST{,N} moves are split into two word
-+ ;; CONST{,N} moves.
-+ (and (eq_attr "move_type" "const")
-+ (eq_attr "dword_mode" "yes"))
-+ (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
-+
-+ ;; Otherwise, constants, loads and stores are handled by external
-+ ;; routines.
-+ (eq_attr "move_type" "load,fpload")
-+ (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
-+ (eq_attr "move_type" "store,fpstore")
-+ (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
-+ ] (const_int 4)))
-+
-+;; Describe a user's asm statement.
-+(define_asm_attributes
-+ [(set_attr "type" "multi")])
-+
-+;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
-+;; from the same template.
-+(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
-+(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
-+
-+;; A copy of GPR that can be used when a pattern has two independent
-+;; modes.
-+(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
-+
-+;; This mode iterator allows :P to be used for patterns that operate on
-+;; pointer-sized quantities. Exactly one of the two alternatives will match.
-+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
-+
-+;; 32-bit integer moves for which we provide move patterns.
-+(define_mode_iterator IMOVE32 [SI])
-+
-+;; 64-bit modes for which we provide move patterns.
-+(define_mode_iterator MOVE64 [DI DF])
-+
-+;; 128-bit modes for which we provide move patterns on 64-bit targets.
-+(define_mode_iterator MOVE128 [TI TF])
-+
-+;; This mode iterator allows the QI and HI extension patterns to be
-+;; defined from the same template.
-+(define_mode_iterator SHORT [QI HI])
-+
-+;; Likewise the 64-bit truncate-and-shift patterns.
-+(define_mode_iterator SUBDI [QI HI SI])
-+(define_mode_iterator HISI [HI SI])
-+(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
-+
-+;; This mode iterator allows :ANYF to be used wherever a scalar or vector
-+;; floating-point mode is allowed.
-+(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
-+ (DF "TARGET_HARD_FLOAT")])
-+(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
-+ (SF "TARGET_HARD_FLOAT")
-+ (DF "TARGET_HARD_FLOAT")])
-+
-+;; Like ANYF, but only applies to scalar modes.
-+(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT")
-+ (DF "TARGET_HARD_FLOAT")])
-+
-+;; A floating-point mode for which moves involving FPRs may need to be split.
-+(define_mode_iterator SPLITF
-+ [(DF "!TARGET_64BIT")
-+ (DI "!TARGET_64BIT")
-+ (TF "TARGET_64BIT")])
-+
-+;; This attribute gives the length suffix for a sign- or zero-extension
-+;; instruction.
-+(define_mode_attr size [(QI "b") (HI "h")])
-+
-+;; Mode attributes for loads.
-+(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
-+
-+;; Instruction names for stores.
-+(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
-+
-+;; This attribute gives the best constraint to use for registers of
-+;; a given mode.
-+(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
-+
-+;; This attribute gives the format suffix for floating-point operations.
-+(define_mode_attr fmt [(SF "s") (DF "d")])
-+
-+;; This attribute gives the format suffix for atomic memory operations.
-+(define_mode_attr amo [(SI "w") (DI "d")])
-+
-+;; This attribute gives the upper-case mode name for one unit of a
-+;; floating-point mode.
-+(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
-+
-+;; This attribute gives the integer mode that has half the size of
-+;; the controlling mode.
-+(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
-+
-+;; This code iterator allows signed and unsigned widening multiplications
-+;; to use the same template.
-+(define_code_iterator any_extend [sign_extend zero_extend])
-+
-+;; This code iterator allows the two right shift instructions to be
-+;; generated from the same template.
-+(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
-+
-+;; This code iterator allows the three shift instructions to be generated
-+;; from the same template.
-+(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
-+
-+;; This code iterator allows unsigned and signed division to be generated
-+;; from the same template.
-+(define_code_iterator any_div [div udiv])
-+
-+;; This code iterator allows unsigned and signed modulus to be generated
-+;; from the same template.
-+(define_code_iterator any_mod [mod umod])
-+
-+;; These code iterators allow the signed and unsigned scc operations to use
-+;; the same template.
-+(define_code_iterator any_gt [gt gtu])
-+(define_code_iterator any_ge [ge geu])
-+(define_code_iterator any_lt [lt ltu])
-+(define_code_iterator any_le [le leu])
-+
-+;; <u> expands to an empty string when doing a signed operation and
-+;; "u" when doing an unsigned operation.
-+(define_code_attr u [(sign_extend "") (zero_extend "u")
-+ (div "") (udiv "u")
-+ (mod "") (umod "u")
-+ (gt "") (gtu "u")
-+ (ge "") (geu "u")
-+ (lt "") (ltu "u")
-+ (le "") (leu "u")])
-+
-+;; <su> is like <u>, but the signed form expands to "s" rather than "".
-+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
-+
-+;; <optab> expands to the name of the optab for a particular code.
-+(define_code_attr optab [(ashift "ashl")
-+ (ashiftrt "ashr")
-+ (lshiftrt "lshr")
-+ (ior "ior")
-+ (xor "xor")
-+ (and "and")
-+ (plus "add")
-+ (minus "sub")])
-+
-+;; <insn> expands to the name of the insn that implements a particular code.
-+(define_code_attr insn [(ashift "sll")
-+ (ashiftrt "sra")
-+ (lshiftrt "srl")
-+ (ior "or")
-+ (xor "xor")
-+ (and "and")
-+ (plus "add")
-+ (minus "sub")])
-+
-+;; Ghost instructions produce no real code and introduce no hazards.
-+;; They exist purely to express an effect on dataflow.
-+(define_insn_reservation "ghost" 0
-+ (eq_attr "type" "ghost")
-+ "nothing")
-+
-+;;
-+;; ....................
-+;;
-+;; ADDITION
-+;;
-+;; ....................
-+;;
-+
-+(define_insn "add<mode>3"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")))]
-+ ""
-+ "fadd.<fmt>\t%0,%1,%2"
-+ [(set_attr "type" "fadd")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_expand "add<mode>3"
-+ [(set (match_operand:GPR 0 "register_operand")
-+ (plus:GPR (match_operand:GPR 1 "register_operand")
-+ (match_operand:GPR 2 "arith_operand")))]
-+ "")
-+
-+(define_insn "*addsi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r,r")
-+ (plus:SI (match_operand:GPR 1 "register_operand" "r,r")
-+ (match_operand:GPR2 2 "arith_operand" "r,Q")))]
-+ ""
-+ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*adddi3"
-+ [(set (match_operand:DI 0 "register_operand" "=r,r")
-+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
-+ (match_operand:DI 2 "arith_operand" "r,Q")))]
-+ "TARGET_64BIT"
-+ "add\t%0,%1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn "*addsi3_extended"
-+ [(set (match_operand:DI 0 "register_operand" "=r,r")
-+ (sign_extend:DI
-+ (plus:SI (match_operand:SI 1 "register_operand" "r,r")
-+ (match_operand:SI 2 "arith_operand" "r,Q"))))]
-+ "TARGET_64BIT"
-+ "addw\t%0,%1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*adddisi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r,r")
-+ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
-+ (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
-+ "TARGET_64BIT"
-+ "addw\t%0,%1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*adddisisi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r,r")
-+ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
-+ (match_operand:SI 2 "arith_operand" "r,Q")))]
-+ "TARGET_64BIT"
-+ "addw\t%0,%1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*adddi3_truncsi"
-+ [(set (match_operand:SI 0 "register_operand" "=r,r")
-+ (truncate:SI
-+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
-+ (match_operand:DI 2 "arith_operand" "r,Q"))))]
-+ "TARGET_64BIT"
-+ "addw\t%0,%1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+;;
-+;; ....................
-+;;
-+;; SUBTRACTION
-+;;
-+;; ....................
-+;;
-+
-+(define_insn "sub<mode>3"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")))]
-+ ""
-+ "fsub.<fmt>\t%0,%1,%2"
-+ [(set_attr "type" "fadd")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_expand "sub<mode>3"
-+ [(set (match_operand:GPR 0 "register_operand")
-+ (minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
-+ (match_operand:GPR 2 "register_operand")))]
-+ "")
-+
-+(define_insn "*subdi3"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
-+ (match_operand:DI 2 "register_operand" "r")))]
-+ "TARGET_64BIT"
-+ "sub\t%0,%z1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn "*subsi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
-+ (match_operand:GPR2 2 "register_operand" "r")))]
-+ ""
-+ { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*subsi3_extended"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (sign_extend:DI
-+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
-+ (match_operand:SI 2 "register_operand" "r"))))]
-+ "TARGET_64BIT"
-+ "subw\t%0,%z1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn "*subdisi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
-+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
-+ "TARGET_64BIT"
-+ "subw\t%0,%z1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*subdisisi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
-+ (match_operand:SI 2 "register_operand" "r")))]
-+ "TARGET_64BIT"
-+ "subw\t%0,%z1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*subsidisi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
-+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
-+ "TARGET_64BIT"
-+ "subw\t%0,%z1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*subdi3_truncsi"
-+ [(set (match_operand:SI 0 "register_operand" "=r,r")
-+ (truncate:SI
-+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
-+ (match_operand:DI 2 "arith_operand" "r,Q"))))]
-+ "TARGET_64BIT"
-+ "subw\t%0,%z1,%2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "SI")])
-+
-+;;
-+;; ....................
-+;;
-+;; MULTIPLICATION
-+;;
-+;; ....................
-+;;
-+
-+(define_insn "mul<mode>3"
-+ [(set (match_operand:SCALARF 0 "register_operand" "=f")
-+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f")
-+ (match_operand:SCALARF 2 "register_operand" "f")))]
-+ ""
-+ "fmul.<fmt>\t%0,%1,%2"
-+ [(set_attr "type" "fmul")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_expand "mul<mode>3"
-+ [(set (match_operand:GPR 0 "register_operand")
-+ (mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
-+ (match_operand:GPR 2 "register_operand")))]
-+ "TARGET_MULDIV")
-+
-+(define_insn "*mulsi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (mult:SI (match_operand:GPR 1 "register_operand" "r")
-+ (match_operand:GPR2 2 "register_operand" "r")))]
-+ "TARGET_MULDIV"
-+ { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
-+ [(set_attr "type" "imul")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*muldisi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
-+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "mulw\t%0,%1,%2"
-+ [(set_attr "type" "imul")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*muldi3_truncsi"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (truncate:SI
-+ (mult:DI (match_operand:DI 1 "register_operand" "r")
-+ (match_operand:DI 2 "register_operand" "r"))))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "mulw\t%0,%1,%2"
-+ [(set_attr "type" "imul")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*muldi3"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (mult:DI (match_operand:DI 1 "register_operand" "r")
-+ (match_operand:DI 2 "register_operand" "r")))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "mul\t%0,%1,%2"
-+ [(set_attr "type" "imul")
-+ (set_attr "mode" "DI")])
-+
-+;;
-+;; ........................
-+;;
-+;; MULTIPLICATION HIGH-PART
-+;;
-+;; ........................
-+;;
-+
-+
-+;; Using a clobber here is ghetto, but I'm not smart enough to do better. '
-+(define_insn_and_split "<u>mulditi3"
-+ [(set (match_operand:TI 0 "register_operand" "=r")
-+ (mult:TI (any_extend:TI
-+ (match_operand:DI 1 "register_operand" "r"))
-+ (any_extend:TI
-+ (match_operand:DI 2 "register_operand" "r"))))
-+ (clobber (match_scratch:DI 3 "=r"))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "#"
-+ "reload_completed"
-+ [
-+ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
-+ (set (match_dup 4) (truncate:DI
-+ (lshiftrt:TI
-+ (mult:TI (any_extend:TI (match_dup 1))
-+ (any_extend:TI (match_dup 2)))
-+ (const_int 64))))
-+ (set (match_dup 5) (match_dup 3))
-+ ]
-+{
-+ operands[4] = riscv_subword (operands[0], true);
-+ operands[5] = riscv_subword (operands[0], false);
-+}
-+ )
-+
-+(define_insn "<u>muldi3_highpart"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (truncate:DI
-+ (lshiftrt:TI
-+ (mult:TI (any_extend:TI
-+ (match_operand:DI 1 "register_operand" "r"))
-+ (any_extend:TI
-+ (match_operand:DI 2 "register_operand" "r")))
-+ (const_int 64))))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "mulh<u>\t%0,%1,%2"
-+ [(set_attr "type" "imul")
-+ (set_attr "mode" "DI")])
-+
-+
-+(define_insn_and_split "usmulditi3"
-+ [(set (match_operand:TI 0 "register_operand" "=r")
-+ (mult:TI (zero_extend:TI
-+ (match_operand:DI 1 "register_operand" "r"))
-+ (sign_extend:TI
-+ (match_operand:DI 2 "register_operand" "r"))))
-+ (clobber (match_scratch:DI 3 "=r"))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "#"
-+ "reload_completed"
-+ [
-+ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
-+ (set (match_dup 4) (truncate:DI
-+ (lshiftrt:TI
-+ (mult:TI (zero_extend:TI (match_dup 1))
-+ (sign_extend:TI (match_dup 2)))
-+ (const_int 64))))
-+ (set (match_dup 5) (match_dup 3))
-+ ]
-+{
-+ operands[4] = riscv_subword (operands[0], true);
-+ operands[5] = riscv_subword (operands[0], false);
-+}
-+ )
-+
-+(define_insn "usmuldi3_highpart"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (truncate:DI
-+ (lshiftrt:TI
-+ (mult:TI (zero_extend:TI
-+ (match_operand:DI 1 "register_operand" "r"))
-+ (sign_extend:TI
-+ (match_operand:DI 2 "register_operand" "r")))
-+ (const_int 64))))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "mulhsu\t%0,%2,%1"
-+ [(set_attr "type" "imul")
-+ (set_attr "mode" "DI")])
-+
-+(define_expand "<u>mulsidi3"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (mult:DI (any_extend:DI
-+ (match_operand:SI 1 "register_operand" "r"))
-+ (any_extend:DI
-+ (match_operand:SI 2 "register_operand" "r"))))
-+ (clobber (match_scratch:SI 3 "=r"))]
-+ "TARGET_MULDIV && !TARGET_64BIT"
-+{
-+ rtx temp = gen_reg_rtx (SImode);
-+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
-+ emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
-+ operands[1], operands[2]));
-+ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
-+ DONE;
-+}
-+ )
-+
-+(define_insn "<u>mulsi3_highpart"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (truncate:SI
-+ (lshiftrt:DI
-+ (mult:DI (any_extend:DI
-+ (match_operand:SI 1 "register_operand" "r"))
-+ (any_extend:DI
-+ (match_operand:SI 2 "register_operand" "r")))
-+ (const_int 32))))]
-+ "TARGET_MULDIV && !TARGET_64BIT"
-+ "mulh<u>\t%0,%1,%2"
-+ [(set_attr "type" "imul")
-+ (set_attr "mode" "SI")])
-+
-+
-+(define_expand "usmulsidi3"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (mult:DI (zero_extend:DI
-+ (match_operand:SI 1 "register_operand" "r"))
-+ (sign_extend:DI
-+ (match_operand:SI 2 "register_operand" "r"))))
-+ (clobber (match_scratch:SI 3 "=r"))]
-+ "TARGET_MULDIV && !TARGET_64BIT"
-+{
-+ rtx temp = gen_reg_rtx (SImode);
-+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
-+ emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
-+ operands[1], operands[2]));
-+ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
-+ DONE;
-+}
-+ )
-+
-+(define_insn "usmulsi3_highpart"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (truncate:SI
-+ (lshiftrt:DI
-+ (mult:DI (zero_extend:DI
-+ (match_operand:SI 1 "register_operand" "r"))
-+ (sign_extend:DI
-+ (match_operand:SI 2 "register_operand" "r")))
-+ (const_int 32))))]
-+ "TARGET_MULDIV && !TARGET_64BIT"
-+ "mulhsu\t%0,%2,%1"
-+ [(set_attr "type" "imul")
-+ (set_attr "mode" "SI")])
-+
-+;;
-+;; ....................
-+;;
-+;; DIVISION and REMAINDER
-+;;
-+;; ....................
-+;;
-+
-+(define_insn "<u>divsi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (any_div:SI (match_operand:SI 1 "register_operand" "r")
-+ (match_operand:SI 2 "register_operand" "r")))]
-+ "TARGET_MULDIV"
-+ { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; }
-+ [(set_attr "type" "idiv")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "<u>divdi3"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (any_div:DI (match_operand:DI 1 "register_operand" "r")
-+ (match_operand:DI 2 "register_operand" "r")))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "div<u>\t%0,%1,%2"
-+ [(set_attr "type" "idiv")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn "<u>modsi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (any_mod:SI (match_operand:SI 1 "register_operand" "r")
-+ (match_operand:SI 2 "register_operand" "r")))]
-+ "TARGET_MULDIV"
-+ { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; }
-+ [(set_attr "type" "idiv")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "<u>moddi3"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (any_mod:DI (match_operand:DI 1 "register_operand" "r")
-+ (match_operand:DI 2 "register_operand" "r")))]
-+ "TARGET_MULDIV && TARGET_64BIT"
-+ "rem<u>\t%0,%1,%2"
-+ [(set_attr "type" "idiv")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn "div<mode>3"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (div:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT && TARGET_FDIV"
-+ "fdiv.<fmt>\t%0,%1,%2"
-+ [(set_attr "type" "fdiv")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+;;
-+;; ....................
-+;;
-+;; SQUARE ROOT
-+;;
-+;; ....................
-+
-+(define_insn "sqrt<mode>2"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT && TARGET_FDIV"
-+{
-+ return "fsqrt.<fmt>\t%0,%1";
-+}
-+ [(set_attr "type" "fsqrt")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+;; Floating point multiply accumulate instructions.
-+
-+(define_insn "fma<mode>4"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (fma:ANYF
-+ (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")
-+ (match_operand:ANYF 3 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fmadd.<fmt>\t%0,%1,%2,%3"
-+ [(set_attr "type" "fmadd")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "fms<mode>4"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (fma:ANYF
-+ (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")
-+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
-+ "TARGET_HARD_FLOAT"
-+ "fmsub.<fmt>\t%0,%1,%2,%3"
-+ [(set_attr "type" "fmadd")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "nfma<mode>4"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (neg:ANYF
-+ (fma:ANYF
-+ (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")
-+ (match_operand:ANYF 3 "register_operand" "f"))))]
-+ "TARGET_HARD_FLOAT"
-+ "fnmadd.<fmt>\t%0,%1,%2,%3"
-+ [(set_attr "type" "fmadd")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "nfms<mode>4"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (neg:ANYF
-+ (fma:ANYF
-+ (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")
-+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
-+ "TARGET_HARD_FLOAT"
-+ "fnmsub.<fmt>\t%0,%1,%2,%3"
-+ [(set_attr "type" "fmadd")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+;; modulo signed zeros, -(a*b+c) == -c-a*b
-+(define_insn "*nfma<mode>4_fastmath"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (minus:ANYF
-+ (match_operand:ANYF 3 "register_operand" "f")
-+ (mult:ANYF
-+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
-+ (match_operand:ANYF 2 "register_operand" "f"))))]
-+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
-+ "fnmadd.<fmt>\t%0,%1,%2,%3"
-+ [(set_attr "type" "fmadd")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+;; modulo signed zeros, -(a*b-c) == c-a*b
-+(define_insn "*nfms<mode>4_fastmath"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (minus:ANYF
-+ (match_operand:ANYF 3 "register_operand" "f")
-+ (mult:ANYF
-+ (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f"))))]
-+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
-+ "fnmsub.<fmt>\t%0,%1,%2,%3"
-+ [(set_attr "type" "fmadd")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+;;
-+;; ....................
-+;;
-+;; ABSOLUTE VALUE
-+;;
-+;; ....................
-+
-+(define_insn "abs<mode>2"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fabs.<fmt>\t%0,%1"
-+ [(set_attr "type" "fmove")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+
-+;;
-+;; ....................
-+;;
-+;; MIN/MAX
-+;;
-+;; ....................
-+
-+(define_insn "smin<mode>3"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fmin.<fmt>\t%0,%1,%2"
-+ [(set_attr "type" "fmove")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "smax<mode>3"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
-+ (match_operand:ANYF 2 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fmax.<fmt>\t%0,%1,%2"
-+ [(set_attr "type" "fmove")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+
-+;;
-+;; ....................
-+;;
-+;; NEGATION and ONE'S COMPLEMENT '
-+;;
-+;; ....................
-+
-+(define_insn "neg<mode>2"
-+ [(set (match_operand:ANYF 0 "register_operand" "=f")
-+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fneg.<fmt>\t%0,%1"
-+ [(set_attr "type" "fmove")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "one_cmpl<mode>2"
-+ [(set (match_operand:GPR 0 "register_operand" "=r")
-+ (not:GPR (match_operand:GPR 1 "register_operand" "r")))]
-+ ""
-+ "not\t%0,%1"
-+ [(set_attr "type" "logical")
-+ (set_attr "mode" "<MODE>")])
-+
-+;;
-+;; ....................
-+;;
-+;; LOGICAL
-+;;
-+;; ....................
-+;;
-+
-+(define_insn "and<mode>3"
-+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
-+ (and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
-+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
-+ ""
-+ "and\t%0,%1,%2"
-+ [(set_attr "type" "logical")
-+ (set_attr "mode" "<MODE>")])
-+
-+(define_insn "ior<mode>3"
-+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
-+ (ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
-+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
-+ ""
-+ "or\t%0,%1,%2"
-+ [(set_attr "type" "logical")
-+ (set_attr "mode" "<MODE>")])
-+
-+(define_insn "xor<mode>3"
-+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
-+ (xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
-+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
-+ ""
-+ "xor\t%0,%1,%2"
-+ [(set_attr "type" "logical")
-+ (set_attr "mode" "<MODE>")])
-+
-+;;
-+;; ....................
-+;;
-+;; TRUNCATION
-+;;
-+;; ....................
-+
-+(define_insn "truncdfsf2"
-+ [(set (match_operand:SF 0 "register_operand" "=f")
-+ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.s.d\t%0,%1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "cnv_mode" "D2S")
-+ (set_attr "mode" "SF")])
-+
-+;; Integer truncation patterns. Truncating to HImode/QImode is a no-op.
-+;; Truncating from DImode to SImode is not, because we always keep SImode
-+;; values sign-extended in a register so we can safely use DImode branches
-+;; and comparisons on SImode values.
-+
-+(define_insn "truncdisi2"
-+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
-+ (truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
-+ "TARGET_64BIT"
-+ "@
-+ sext.w\t%0,%1
-+ sw\t%1,%0"
-+ [(set_attr "move_type" "arith,store")
-+ (set_attr "mode" "SI")])
-+
-+;; Combiner patterns to optimize shift/truncate combinations.
-+
-+(define_insn "*ashr_trunc<mode>"
-+ [(set (match_operand:SUBDI 0 "register_operand" "=r")
-+ (truncate:SUBDI
-+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
-+ (match_operand:DI 2 "const_arith_operand" ""))))]
-+ "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
-+ "sra\t%0,%1,%2"
-+ [(set_attr "type" "shift")
-+ (set_attr "mode" "<MODE>")])
-+
-+(define_insn "*lshr32_trunc<mode>"
-+ [(set (match_operand:SUBDI 0 "register_operand" "=r")
-+ (truncate:SUBDI
-+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
-+ (const_int 32))))]
-+ "TARGET_64BIT"
-+ "sra\t%0,%1,32"
-+ [(set_attr "type" "shift")
-+ (set_attr "mode" "<MODE>")])
-+
-+;;
-+;; ....................
-+;;
-+;; ZERO EXTENSION
-+;;
-+;; ....................
-+
-+;; Extension insns.
-+
-+(define_insn_and_split "zero_extendsidi2"
-+ [(set (match_operand:DI 0 "register_operand" "=r,r")
-+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
-+ "TARGET_64BIT"
-+ "@
-+ #
-+ lwu\t%0,%1"
-+ "&& reload_completed && REG_P (operands[1])"
-+ [(set (match_dup 0)
-+ (ashift:DI (match_dup 1) (const_int 32)))
-+ (set (match_dup 0)
-+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
-+ { operands[1] = gen_lowpart (DImode, operands[1]); }
-+ [(set_attr "move_type" "shift_shift,load")
-+ (set_attr "mode" "DI")])
-+
-+;; Combine is not allowed to convert this insn into a zero_extendsidi2
-+;; because of TRULY_NOOP_TRUNCATION.
-+
-+(define_insn_and_split "*clear_upper32"
-+ [(set (match_operand:DI 0 "register_operand" "=r,r")
-+ (and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
-+ (const_int 4294967295)))]
-+ "TARGET_64BIT"
-+{
-+ if (which_alternative == 0)
-+ return "#";
-+
-+ operands[1] = gen_lowpart (SImode, operands[1]);
-+ return "lwu\t%0,%1";
-+}
-+ "&& reload_completed && REG_P (operands[1])"
-+ [(set (match_dup 0)
-+ (ashift:DI (match_dup 1) (const_int 32)))
-+ (set (match_dup 0)
-+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
-+ ""
-+ [(set_attr "move_type" "shift_shift,load")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn_and_split "zero_extendhi<GPR:mode>2"
-+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
-+ (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
-+ ""
-+ "@
-+ #
-+ lhu\t%0,%1"
-+ "&& reload_completed && REG_P (operands[1])"
-+ [(set (match_dup 0)
-+ (ashift:GPR (match_dup 1) (match_dup 2)))
-+ (set (match_dup 0)
-+ (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
-+ {
-+ operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
-+ operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
-+ }
-+ [(set_attr "move_type" "shift_shift,load")
-+ (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "zero_extendqi<SUPERQI:mode>2"
-+ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
-+ (zero_extend:SUPERQI
-+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
-+ ""
-+ "@
-+ and\t%0,%1,0xff
-+ lbu\t%0,%1"
-+ [(set_attr "move_type" "andi,load")
-+ (set_attr "mode" "<SUPERQI:MODE>")])
-+
-+;;
-+;; ....................
-+;;
-+;; SIGN EXTENSION
-+;;
-+;; ....................
-+
-+;; Extension insns.
-+;; Those for integer source operand are ordered widest source type first.
-+
-+;; When TARGET_64BIT, all SImode integer registers should already be in
-+;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2). We can
-+;; therefore get rid of register->register instructions if we constrain
-+;; the source to be in the same register as the destination.
-+;;
-+;; The register alternative has type "arith" so that the pre-reload
-+;; scheduler will treat it as a move. This reflects what happens if
-+;; the register alternative needs a reload.
-+(define_insn_and_split "extendsidi2"
-+ [(set (match_operand:DI 0 "register_operand" "=r,r")
-+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
-+ "TARGET_64BIT"
-+ "@
-+ #
-+ lw\t%0,%1"
-+ "&& reload_completed && register_operand (operands[1], VOIDmode)"
-+ [(set (match_dup 0) (match_dup 1))]
-+{
-+ if (REGNO (operands[0]) == REGNO (operands[1]))
-+ {
-+ emit_note (NOTE_INSN_DELETED);
-+ DONE;
-+ }
-+ operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
-+}
-+ [(set_attr "move_type" "move,load")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
-+ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
-+ (sign_extend:SUPERQI
-+ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
-+ ""
-+ "@
-+ #
-+ l<SHORT:size>\t%0,%1"
-+ "&& reload_completed && REG_P (operands[1])"
-+ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
-+ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
-+{
-+ operands[0] = gen_lowpart (SImode, operands[0]);
-+ operands[1] = gen_lowpart (SImode, operands[1]);
-+ operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
-+ - GET_MODE_BITSIZE (<SHORT:MODE>mode));
-+}
-+ [(set_attr "move_type" "shift_shift,load")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "extendsfdf2"
-+ [(set (match_operand:DF 0 "register_operand" "=f")
-+ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.d.s\t%0,%1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "cnv_mode" "S2D")
-+ (set_attr "mode" "DF")])
-+
-+;;
-+;; ....................
-+;;
-+;; CONVERSIONS
-+;;
-+;; ....................
-+
-+(define_insn "fix_truncdfsi2"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (fix:SI (match_operand:DF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.w.d %0,%1,rtz"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "DF")
-+ (set_attr "cnv_mode" "D2I")])
-+
-+
-+(define_insn "fix_truncsfsi2"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (fix:SI (match_operand:SF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.w.s %0,%1,rtz"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "SF")
-+ (set_attr "cnv_mode" "S2I")])
-+
-+
-+(define_insn "fix_truncdfdi2"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (fix:DI (match_operand:DF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT"
-+ "fcvt.l.d %0,%1,rtz"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "DF")
-+ (set_attr "cnv_mode" "D2I")])
-+
-+
-+(define_insn "fix_truncsfdi2"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (fix:DI (match_operand:SF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT"
-+ "fcvt.l.s %0,%1,rtz"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "SF")
-+ (set_attr "cnv_mode" "S2I")])
-+
-+
-+(define_insn "floatsidf2"
-+ [(set (match_operand:DF 0 "register_operand" "=f")
-+ (float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.d.w\t%0,%z1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "DF")
-+ (set_attr "cnv_mode" "I2D")])
-+
-+
-+(define_insn "floatdidf2"
-+ [(set (match_operand:DF 0 "register_operand" "=f")
-+ (float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT"
-+ "fcvt.d.l\t%0,%z1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "DF")
-+ (set_attr "cnv_mode" "I2D")])
-+
-+
-+(define_insn "floatsisf2"
-+ [(set (match_operand:SF 0 "register_operand" "=f")
-+ (float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.s.w\t%0,%z1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "SF")
-+ (set_attr "cnv_mode" "I2S")])
-+
-+
-+(define_insn "floatdisf2"
-+ [(set (match_operand:SF 0 "register_operand" "=f")
-+ (float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT"
-+ "fcvt.s.l\t%0,%z1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "SF")
-+ (set_attr "cnv_mode" "I2S")])
-+
-+
-+(define_insn "floatunssidf2"
-+ [(set (match_operand:DF 0 "register_operand" "=f")
-+ (unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.d.wu\t%0,%z1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "DF")
-+ (set_attr "cnv_mode" "I2D")])
-+
-+
-+(define_insn "floatunsdidf2"
-+ [(set (match_operand:DF 0 "register_operand" "=f")
-+ (unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT"
-+ "fcvt.d.lu\t%0,%z1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "DF")
-+ (set_attr "cnv_mode" "I2D")])
-+
-+
-+(define_insn "floatunssisf2"
-+ [(set (match_operand:SF 0 "register_operand" "=f")
-+ (unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.s.wu\t%0,%z1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "SF")
-+ (set_attr "cnv_mode" "I2S")])
-+
-+
-+(define_insn "floatunsdisf2"
-+ [(set (match_operand:SF 0 "register_operand" "=f")
-+ (unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT"
-+ "fcvt.s.lu\t%0,%z1"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "SF")
-+ (set_attr "cnv_mode" "I2S")])
-+
-+
-+(define_insn "fixuns_truncdfsi2"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.wu.d %0,%1,rtz"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "DF")
-+ (set_attr "cnv_mode" "D2I")])
-+
-+
-+(define_insn "fixuns_truncsfsi2"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT"
-+ "fcvt.wu.s %0,%1,rtz"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "SF")
-+ (set_attr "cnv_mode" "S2I")])
-+
-+
-+(define_insn "fixuns_truncdfdi2"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT"
-+ "fcvt.lu.d %0,%1,rtz"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "DF")
-+ (set_attr "cnv_mode" "D2I")])
-+
-+
-+(define_insn "fixuns_truncsfdi2"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
-+ "TARGET_HARD_FLOAT && TARGET_64BIT"
-+ "fcvt.lu.s %0,%1,rtz"
-+ [(set_attr "type" "fcvt")
-+ (set_attr "mode" "SF")
-+ (set_attr "cnv_mode" "S2I")])
-+
-+;;
-+;; ....................
-+;;
-+;; DATA MOVEMENT
-+;;
-+;; ....................
-+
-+;; Lower-level instructions for loading an address from the GOT.
-+;; We could use MEMs, but an unspec gives more optimization
-+;; opportunities.
-+
-+(define_insn "got_load<mode>"
-+ [(set (match_operand:P 0 "register_operand" "=r")
-+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
-+ UNSPEC_LOAD_GOT))]
-+ "flag_pic"
-+ "la\t%0,%1"
-+ [(set_attr "got" "load")
-+ (set_attr "mode" "<MODE>")])
-+
-+(define_insn "tls_add_tp_le<mode>"
-+ [(set (match_operand:P 0 "register_operand" "=r")
-+ (unspec:P [(match_operand:P 1 "register_operand" "r")
-+ (match_operand:P 2 "register_operand" "r")
-+ (match_operand:P 3 "symbolic_operand" "")]
-+ UNSPEC_TLS_LE))]
-+ "!flag_pic || flag_pie"
-+ "add\t%0,%1,%2,%%tprel_add(%3)"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "<MODE>")])
-+
-+(define_insn "got_load_tls_gd<mode>"
-+ [(set (match_operand:P 0 "register_operand" "=r")
-+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
-+ UNSPEC_TLS_GD))]
-+ "flag_pic"
-+ "la.tls.gd\t%0,%1"
-+ [(set_attr "got" "load")
-+ (set_attr "mode" "<MODE>")])
-+
-+(define_insn "got_load_tls_ie<mode>"
-+ [(set (match_operand:P 0 "register_operand" "=r")
-+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
-+ UNSPEC_TLS_IE))]
-+ "flag_pic"
-+ "la.tls.ie\t%0,%1"
-+ [(set_attr "got" "load")
-+ (set_attr "mode" "<MODE>")])
-+
-+;; Instructions for adding the low 16 bits of an address to a register.
-+;; Operand 2 is the address: riscv_print_operand works out which relocation
-+;; should be applied.
-+
-+(define_insn "*low<mode>"
-+ [(set (match_operand:P 0 "register_operand" "=r")
-+ (lo_sum:P (match_operand:P 1 "register_operand" "r")
-+ (match_operand:P 2 "immediate_operand" "")))]
-+ ""
-+ "add\t%0,%1,%R2"
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "<MODE>")])
-+
-+;; Allow combine to split complex const_int load sequences, using operand 2
-+;; to store the intermediate results. See move_operand for details.
-+(define_split
-+ [(set (match_operand:GPR 0 "register_operand")
-+ (match_operand:GPR 1 "splittable_const_int_operand"))
-+ (clobber (match_operand:GPR 2 "register_operand"))]
-+ ""
-+ [(const_int 0)]
-+{
-+ riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
-+ DONE;
-+})
-+
-+;; Likewise, for symbolic operands.
-+(define_split
-+ [(set (match_operand:P 0 "register_operand")
-+ (match_operand:P 1))
-+ (clobber (match_operand:P 2 "register_operand"))]
-+ "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
-+ [(set (match_dup 0) (match_dup 3))]
-+{
-+ riscv_split_symbol (operands[2], operands[1],
-+ MAX_MACHINE_MODE, &operands[3]);
-+})
-+
-+;; 64-bit integer moves
-+
-+;; Unlike most other insns, the move insns can't be split with '
-+;; different predicates, because register spilling and other parts of
-+;; the compiler, have memoized the insn number already.
-+
-+(define_expand "movdi"
-+ [(set (match_operand:DI 0 "")
-+ (match_operand:DI 1 ""))]
-+ ""
-+{
-+ if (riscv_legitimize_move (DImode, operands[0], operands[1]))
-+ DONE;
-+})
-+
-+(define_insn "*movdi_32bit"
-+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
-+ (match_operand:DI 1 "move_operand" "r,i,m,r,*J*r,*m,*f,*f"))]
-+ "!TARGET_64BIT
-+ && (register_operand (operands[0], DImode)
-+ || reg_or_0_operand (operands[1], DImode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn "*movdi_64bit"
-+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
-+ (match_operand:DI 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
-+ "TARGET_64BIT
-+ && (register_operand (operands[0], DImode)
-+ || reg_or_0_operand (operands[1], DImode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
-+ (set_attr "mode" "DI")])
-+
-+;; 32-bit Integer moves
-+
-+;; Unlike most other insns, the move insns can't be split with
-+;; different predicates, because register spilling and other parts of
-+;; the compiler, have memoized the insn number already.
-+
-+(define_expand "mov<mode>"
-+ [(set (match_operand:IMOVE32 0 "")
-+ (match_operand:IMOVE32 1 ""))]
-+ ""
-+{
-+ if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
-+ DONE;
-+})
-+
-+(define_insn "*mov<mode>_internal"
-+ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
-+ (match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
-+ "(register_operand (operands[0], <MODE>mode)
-+ || reg_or_0_operand (operands[1], <MODE>mode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
-+ (set_attr "mode" "SI")])
-+
-+;; 16-bit Integer moves
-+
-+;; Unlike most other insns, the move insns can't be split with
-+;; different predicates, because register spilling and other parts of
-+;; the compiler, have memoized the insn number already.
-+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
-+
-+(define_expand "movhi"
-+ [(set (match_operand:HI 0 "")
-+ (match_operand:HI 1 ""))]
-+ ""
-+{
-+ if (riscv_legitimize_move (HImode, operands[0], operands[1]))
-+ DONE;
-+})
-+
-+(define_insn "*movhi_internal"
-+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
-+ (match_operand:HI 1 "move_operand" "r,T,m,rJ,*r*J,*f"))]
-+ "(register_operand (operands[0], HImode)
-+ || reg_or_0_operand (operands[1], HImode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
-+ (set_attr "mode" "HI")])
-+
-+;; HImode constant generation; see riscv_move_integer for details.
-+;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
-+
-+(define_insn "add<mode>hi3"
-+ [(set (match_operand:HI 0 "register_operand" "=r,r")
-+ (plus:HI (match_operand:HISI 1 "register_operand" "r,r")
-+ (match_operand:HISI 2 "arith_operand" "r,Q")))]
-+ ""
-+ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
-+ [(set_attr "type" "arith")
-+ (set_attr "mode" "HI")])
-+
-+(define_insn "xor<mode>hi3"
-+ [(set (match_operand:HI 0 "register_operand" "=r,r")
-+ (xor:HI (match_operand:HISI 1 "register_operand" "r,r")
-+ (match_operand:HISI 2 "arith_operand" "r,Q")))]
-+ ""
-+ "xor\t%0,%1,%2"
-+ [(set_attr "type" "logical")
-+ (set_attr "mode" "HI")])
-+
-+;; 8-bit Integer moves
-+
-+(define_expand "movqi"
-+ [(set (match_operand:QI 0 "")
-+ (match_operand:QI 1 ""))]
-+ ""
-+{
-+ if (riscv_legitimize_move (QImode, operands[0], operands[1]))
-+ DONE;
-+})
-+
-+(define_insn "*movqi_internal"
-+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
-+ (match_operand:QI 1 "move_operand" "r,I,m,rJ,*r*J,*f"))]
-+ "(register_operand (operands[0], QImode)
-+ || reg_or_0_operand (operands[1], QImode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
-+ (set_attr "mode" "QI")])
-+
-+;; 32-bit floating point moves
-+
-+(define_expand "movsf"
-+ [(set (match_operand:SF 0 "")
-+ (match_operand:SF 1 ""))]
-+ ""
-+{
-+ if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
-+ DONE;
-+})
-+
-+(define_insn "*movsf_hardfloat"
-+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
-+ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
-+ "TARGET_HARD_FLOAT
-+ && (register_operand (operands[0], SFmode)
-+ || reg_or_0_operand (operands[1], SFmode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
-+ (set_attr "mode" "SF")])
-+
-+(define_insn "*movsf_softfloat"
-+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
-+ (match_operand:SF 1 "move_operand" "Gr,m,r"))]
-+ "TARGET_SOFT_FLOAT
-+ && (register_operand (operands[0], SFmode)
-+ || reg_or_0_operand (operands[1], SFmode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "move,load,store")
-+ (set_attr "mode" "SF")])
-+
-+;; 64-bit floating point moves
-+
-+(define_expand "movdf"
-+ [(set (match_operand:DF 0 "")
-+ (match_operand:DF 1 ""))]
-+ ""
-+{
-+ if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
-+ DONE;
-+})
-+
-+;; In RV32, we lack mtf.d/mff.d. Go through memory instead.
-+;; (except for moving a constant 0 to an FPR. for that we use fcvt.d.w.)
-+(define_insn "*movdf_hardfloat_rv32"
-+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
-+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
-+ "!TARGET_64BIT && TARGET_HARD_FLOAT
-+ && (register_operand (operands[0], DFmode)
-+ || reg_or_0_operand (operands[1], DFmode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
-+ (set_attr "mode" "DF")])
-+
-+(define_insn "*movdf_hardfloat_rv64"
-+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
-+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
-+ "TARGET_64BIT && TARGET_HARD_FLOAT
-+ && (register_operand (operands[0], DFmode)
-+ || reg_or_0_operand (operands[1], DFmode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
-+ (set_attr "mode" "DF")])
-+
-+(define_insn "*movdf_softfloat"
-+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
-+ (match_operand:DF 1 "move_operand" "rG,m,rG"))]
-+ "TARGET_SOFT_FLOAT
-+ && (register_operand (operands[0], DFmode)
-+ || reg_or_0_operand (operands[1], DFmode))"
-+ { return riscv_output_move (operands[0], operands[1]); }
-+ [(set_attr "move_type" "move,load,store")
-+ (set_attr "mode" "DF")])
-+
-+;; 128-bit integer moves
-+
-+(define_expand "movti"
-+ [(set (match_operand:TI 0)
-+ (match_operand:TI 1))]
-+ "TARGET_64BIT"
-+{
-+ if (riscv_legitimize_move (TImode, operands[0], operands[1]))
-+ DONE;
-+})
-+
-+(define_insn "*movti"
-+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m")
-+ (match_operand:TI 1 "move_operand" "r,i,m,rJ"))]
-+ "TARGET_64BIT
-+ && (register_operand (operands[0], TImode)
-+ || reg_or_0_operand (operands[1], TImode))"
-+ "#"
-+ [(set_attr "move_type" "move,const,load,store")
-+ (set_attr "mode" "TI")])
-+
-+(define_split
-+ [(set (match_operand:MOVE64 0 "nonimmediate_operand")
-+ (match_operand:MOVE64 1 "move_operand"))]
-+ "reload_completed && !TARGET_64BIT
-+ && riscv_split_64bit_move_p (operands[0], operands[1])"
-+ [(const_int 0)]
-+{
-+ riscv_split_doubleword_move (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_split
-+ [(set (match_operand:MOVE128 0 "nonimmediate_operand")
-+ (match_operand:MOVE128 1 "move_operand"))]
-+ "TARGET_64BIT && reload_completed"
-+ [(const_int 0)]
-+{
-+ riscv_split_doubleword_move (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+;; 64-bit paired-single floating point moves
-+
-+;; Load the low word of operand 0 with operand 1.
-+(define_insn "load_low<mode>"
-+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
-+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")]
-+ UNSPEC_LOAD_LOW))]
-+ "TARGET_HARD_FLOAT"
-+{
-+ operands[0] = riscv_subword (operands[0], 0);
-+ return riscv_output_move (operands[0], operands[1]);
-+}
-+ [(set_attr "move_type" "mtc,fpload")
-+ (set_attr "mode" "<HALFMODE>")])
-+
-+;; Load the high word of operand 0 from operand 1, preserving the value
-+;; in the low word.
-+(define_insn "load_high<mode>"
-+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
-+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")
-+ (match_operand:SPLITF 2 "register_operand" "0,0")]
-+ UNSPEC_LOAD_HIGH))]
-+ "TARGET_HARD_FLOAT"
-+{
-+ operands[0] = riscv_subword (operands[0], 1);
-+ return riscv_output_move (operands[0], operands[1]);
-+}
-+ [(set_attr "move_type" "mtc,fpload")
-+ (set_attr "mode" "<HALFMODE>")])
-+
-+;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the
-+;; high word and 0 to store the low word.
-+(define_insn "store_word<mode>"
-+ [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=r,m")
-+ (unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f")
-+ (match_operand 2 "const_int_operand")]
-+ UNSPEC_STORE_WORD))]
-+ "TARGET_HARD_FLOAT"
-+{
-+ operands[1] = riscv_subword (operands[1], INTVAL (operands[2]));
-+ return riscv_output_move (operands[0], operands[1]);
-+}
-+ [(set_attr "move_type" "mfc,fpstore")
-+ (set_attr "mode" "<HALFMODE>")])
-+
-+;; Expand in-line code to clear the instruction cache between operand[0] and
-+;; operand[1].
-+(define_expand "clear_cache"
-+ [(match_operand 0 "pmode_register_operand")
-+ (match_operand 1 "pmode_register_operand")]
-+ ""
-+ "
-+{
-+ emit_insn(gen_fence_i());
-+ DONE;
-+}")
-+
-+(define_insn "fence"
-+ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
-+ ""
-+ "%|fence%-")
-+
-+(define_insn "fence_i"
-+ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
-+ ""
-+ "fence.i")
-+
-+;; Block moves, see riscv.c for more details.
-+;; Argument 0 is the destination
-+;; Argument 1 is the source
-+;; Argument 2 is the length
-+;; Argument 3 is the alignment
-+
-+(define_expand "movmemsi"
-+ [(parallel [(set (match_operand:BLK 0 "general_operand")
-+ (match_operand:BLK 1 "general_operand"))
-+ (use (match_operand:SI 2 ""))
-+ (use (match_operand:SI 3 "const_int_operand"))])]
-+ "!TARGET_MEMCPY"
-+{
-+ if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
-+ DONE;
-+ else
-+ FAIL;
-+})
-+
-+;;
-+;; ....................
-+;;
-+;; SHIFTS
-+;;
-+;; ....................
-+
-+(define_insn "<optab>si3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
-+ (match_operand:SI 2 "arith_operand" "rI")))]
-+ ""
-+{
-+ if (GET_CODE (operands[2]) == CONST_INT)
-+ operands[2] = GEN_INT (INTVAL (operands[2])
-+ & (GET_MODE_BITSIZE (SImode) - 1));
-+
-+ return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
-+}
-+ [(set_attr "type" "shift")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*<optab>disi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
-+ (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
-+ "TARGET_64BIT"
-+ "<insn>w\t%0,%1,%2"
-+ [(set_attr "type" "shift")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*ashldi3_truncsi"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (truncate:SI
-+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
-+ (match_operand:DI 2 "const_arith_operand" "I"))))]
-+ "TARGET_64BIT && INTVAL (operands[2]) < 32"
-+ "sllw\t%0,%1,%2"
-+ [(set_attr "type" "shift")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "*ashldisi3"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (ashift:SI (match_operand:GPR 1 "register_operand" "r")
-+ (match_operand:GPR2 2 "arith_operand" "rI")))]
-+ "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
-+ "sllw\t%0,%1,%2"
-+ [(set_attr "type" "shift")
-+ (set_attr "mode" "SI")])
-+
-+(define_insn "<optab>di3"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (any_shift:DI (match_operand:DI 1 "register_operand" "r")
-+ (match_operand:DI 2 "arith_operand" "rI")))]
-+ "TARGET_64BIT"
-+{
-+ if (GET_CODE (operands[2]) == CONST_INT)
-+ operands[2] = GEN_INT (INTVAL (operands[2])
-+ & (GET_MODE_BITSIZE (DImode) - 1));
-+
-+ return "<insn>\t%0,%1,%2";
-+}
-+ [(set_attr "type" "shift")
-+ (set_attr "mode" "DI")])
-+
-+(define_insn "<optab>si3_extend"
-+ [(set (match_operand:DI 0 "register_operand" "=r")
-+ (sign_extend:DI
-+ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
-+ (match_operand:SI 2 "arith_operand" "rI"))))]
-+ "TARGET_64BIT"
-+{
-+ if (GET_CODE (operands[2]) == CONST_INT)
-+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
-+
-+ return "<insn>w\t%0,%1,%2";
-+}
-+ [(set_attr "type" "shift")
-+ (set_attr "mode" "SI")])
-+
-+;;
-+;; ....................
-+;;
-+;; CONDITIONAL BRANCHES
-+;;
-+;; ....................
-+
-+;; Conditional branches
-+
-+(define_insn "*branch_order<mode>"
-+ [(set (pc)
-+ (if_then_else
-+ (match_operator 1 "order_operator"
-+ [(match_operand:GPR 2 "register_operand" "r")
-+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
-+ (label_ref (match_operand 0 "" ""))
-+ (pc)))]
-+ ""
-+{
-+ if (GET_CODE (operands[3]) == CONST_INT)
-+ return "b%C1z\t%2,%0";
-+ return "b%C1\t%2,%3,%0";
-+}
-+ [(set_attr "type" "branch")
-+ (set_attr "mode" "none")])
-+
-+;; Used to implement built-in functions.
-+(define_expand "condjump"
-+ [(set (pc)
-+ (if_then_else (match_operand 0)
-+ (label_ref (match_operand 1))
-+ (pc)))])
-+
-+(define_expand "cbranch<mode>4"
-+ [(set (pc)
-+ (if_then_else (match_operator 0 "comparison_operator"
-+ [(match_operand:GPR 1 "register_operand")
-+ (match_operand:GPR 2 "nonmemory_operand")])
-+ (label_ref (match_operand 3 ""))
-+ (pc)))]
-+ ""
-+{
-+ riscv_expand_conditional_branch (operands);
-+ DONE;
-+})
-+
-+(define_expand "cbranch<mode>4"
-+ [(set (pc)
-+ (if_then_else (match_operator 0 "comparison_operator"
-+ [(match_operand:SCALARF 1 "register_operand")
-+ (match_operand:SCALARF 2 "register_operand")])
-+ (label_ref (match_operand 3 ""))
-+ (pc)))]
-+ ""
-+{
-+ riscv_expand_conditional_branch (operands);
-+ DONE;
-+})
-+
-+(define_insn_and_split "*branch_on_bit<GPR:mode>"
-+ [(set (pc)
-+ (if_then_else
-+ (match_operator 0 "equality_operator"
-+ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
-+ (const_int 1)
-+ (match_operand 3 "branch_on_bit_operand"))
-+ (const_int 0)])
-+ (label_ref (match_operand 1))
-+ (pc)))
-+ (clobber (match_scratch:GPR 4 "=&r"))]
-+ ""
-+ "#"
-+ "reload_completed"
-+ [(set (match_dup 4)
-+ (ashift:GPR (match_dup 2) (match_dup 3)))
-+ (set (pc)
-+ (if_then_else
-+ (match_op_dup 0 [(match_dup 4) (const_int 0)])
-+ (label_ref (match_operand 1))
-+ (pc)))]
-+{
-+ int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
-+ operands[3] = GEN_INT (shift);
-+
-+ if (GET_CODE (operands[0]) == EQ)
-+ operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
-+ else
-+ operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
-+})
-+
-+(define_insn_and_split "*branch_on_bit_range<GPR:mode>"
-+ [(set (pc)
-+ (if_then_else
-+ (match_operator 0 "equality_operator"
-+ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
-+ (match_operand 3 "branch_on_bit_operand")
-+ (const_int 0))
-+ (const_int 0)])
-+ (label_ref (match_operand 1))
-+ (pc)))
-+ (clobber (match_scratch:GPR 4 "=&r"))]
-+ ""
-+ "#"
-+ "reload_completed"
-+ [(set (match_dup 4)
-+ (ashift:GPR (match_dup 2) (match_dup 3)))
-+ (set (pc)
-+ (if_then_else
-+ (match_op_dup 0 [(match_dup 4) (const_int 0)])
-+ (label_ref (match_operand 1))
-+ (pc)))]
-+{
-+ operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
-+})
-+
-+;;
-+;; ....................
-+;;
-+;; SETTING A REGISTER FROM A COMPARISON
-+;;
-+;; ....................
-+
-+;; Destination is always set in SI mode.
-+
-+(define_expand "cstore<mode>4"
-+ [(set (match_operand:SI 0 "register_operand")
-+ (match_operator:SI 1 "order_operator"
-+ [(match_operand:GPR 2 "register_operand")
-+ (match_operand:GPR 3 "nonmemory_operand")]))]
-+ ""
-+{
-+ riscv_expand_scc (operands);
-+ DONE;
-+})
-+
-+(define_insn "cstore<mode>4"
-+ [(set (match_operand:SI 0 "register_operand" "=r")
-+ (match_operator:SI 1 "fp_order_operator"
-+ [(match_operand:SCALARF 2 "register_operand" "f")
-+ (match_operand:SCALARF 3 "register_operand" "f")]))]
-+ "TARGET_HARD_FLOAT"
-+{
-+ if (GET_CODE (operands[1]) == NE)
-+ return "feq.<fmt>\t%0,%2,%3; seqz %0, %0";
-+ return "f%C1.<fmt>\t%0,%2,%3";
-+}
-+ [(set_attr "type" "fcmp")
-+ (set_attr "mode" "<UNITMODE>")])
-+
-+(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
-+ [(set (match_operand:GPR2 0 "register_operand" "=r")
-+ (eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+ (const_int 0)))]
-+ ""
-+ "seqz\t%0,%1"
-+ [(set_attr "type" "slt")
-+ (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
-+ [(set (match_operand:GPR2 0 "register_operand" "=r")
-+ (ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+ (const_int 0)))]
-+ ""
-+ "snez\t%0,%1"
-+ [(set_attr "type" "slt")
-+ (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
-+ [(set (match_operand:GPR2 0 "register_operand" "=r")
-+ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+ (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
-+ ""
-+ "slt<u>\t%0,%z2,%1"
-+ [(set_attr "type" "slt")
-+ (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
-+ [(set (match_operand:GPR2 0 "register_operand" "=r")
-+ (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+ (const_int 1)))]
-+ ""
-+ "slt<u>\t%0,zero,%1"
-+ [(set_attr "type" "slt")
-+ (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
-+ [(set (match_operand:GPR2 0 "register_operand" "=r")
-+ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+ (match_operand:GPR 2 "arith_operand" "rI")))]
-+ ""
-+ "slt<u>\t%0,%1,%2"
-+ [(set_attr "type" "slt")
-+ (set_attr "mode" "<GPR:MODE>")])
-+
-+(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
-+ [(set (match_operand:GPR2 0 "register_operand" "=r")
-+ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r")
-+ (match_operand:GPR 2 "sle_operand" "")))]
-+ ""
-+{
-+ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
-+ return "slt<u>\t%0,%1,%2";
-+}
-+ [(set_attr "type" "slt")
-+ (set_attr "mode" "<GPR:MODE>")])
-+
-+;;
-+;; ....................
-+;;
-+;; UNCONDITIONAL BRANCHES
-+;;
-+;; ....................
-+
-+;; Unconditional branches.
-+
-+(define_insn "jump"
-+ [(set (pc)
-+ (label_ref (match_operand 0 "" "")))]
-+ ""
-+ "j\t%l0"
-+ [(set_attr "type" "jump")
-+ (set_attr "mode" "none")])
-+
-+(define_expand "indirect_jump"
-+ [(set (pc) (match_operand 0 "register_operand"))]
-+ ""
-+{
-+ operands[0] = force_reg (Pmode, operands[0]);
-+ if (Pmode == SImode)
-+ emit_jump_insn (gen_indirect_jumpsi (operands[0]));
-+ else
-+ emit_jump_insn (gen_indirect_jumpdi (operands[0]));
-+ DONE;
-+})
-+
-+(define_insn "indirect_jump<mode>"
-+ [(set (pc) (match_operand:P 0 "register_operand" "l"))]
-+ ""
-+ "jr\t%0"
-+ [(set_attr "type" "jump")
-+ (set_attr "mode" "none")])
-+
-+(define_expand "tablejump"
-+ [(set (pc) (match_operand 0 "register_operand" ""))
-+ (use (label_ref (match_operand 1 "" "")))]
-+ ""
-+{
-+ if (CASE_VECTOR_PC_RELATIVE)
-+ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
-+ gen_rtx_LABEL_REF (Pmode, operands[1]),
-+ NULL_RTX, 0, OPTAB_DIRECT);
-+
-+ if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
-+ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
-+ else
-+ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
-+ DONE;
-+})
-+
-+(define_insn "tablejump<mode>"
-+ [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
-+ (use (label_ref (match_operand 1 "" "")))]
-+ ""
-+ "jr\t%0"
-+ [(set_attr "type" "jump")
-+ (set_attr "mode" "none")])
-+
-+;;
-+;; ....................
-+;;
-+;; Function prologue/epilogue
-+;;
-+;; ....................
-+;;
-+
-+(define_expand "prologue"
-+ [(const_int 1)]
-+ ""
-+{
-+ riscv_expand_prologue ();
-+ DONE;
-+})
-+
-+;; Block any insns from being moved before this point, since the
-+;; profiling call to mcount can use various registers that aren't
-+;; saved or used to pass arguments.
-+
-+(define_insn "blockage"
-+ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
-+ ""
-+ ""
-+ [(set_attr "type" "ghost")
-+ (set_attr "mode" "none")])
-+
-+(define_expand "epilogue"
-+ [(const_int 2)]
-+ ""
-+{
-+ riscv_expand_epilogue (false);
-+ DONE;
-+})
-+
-+(define_expand "sibcall_epilogue"
-+ [(const_int 2)]
-+ ""
-+{
-+ riscv_expand_epilogue (true);
-+ DONE;
-+})
-+
-+;; Trivial return. Make it look like a normal return insn as that
-+;; allows jump optimizations to work better.
-+
-+(define_expand "return"
-+ [(simple_return)]
-+ "riscv_can_use_return_insn ()"
-+ "")
-+
-+(define_insn "simple_return"
-+ [(simple_return)]
-+ ""
-+ "ret"
-+ [(set_attr "type" "jump")
-+ (set_attr "mode" "none")])
-+
-+;; Normal return.
-+
-+(define_insn "simple_return_internal"
-+ [(simple_return)
-+ (use (match_operand 0 "pmode_register_operand" ""))]
-+ ""
-+ "jr\t%0"
-+ [(set_attr "type" "jump")
-+ (set_attr "mode" "none")])
-+
-+;; This is used in compiling the unwind routines.
-+(define_expand "eh_return"
-+ [(use (match_operand 0 "general_operand"))]
-+ ""
-+{
-+ if (GET_MODE (operands[0]) != word_mode)
-+ operands[0] = convert_to_mode (word_mode, operands[0], 0);
-+ if (TARGET_64BIT)
-+ emit_insn (gen_eh_set_lr_di (operands[0]));
-+ else
-+ emit_insn (gen_eh_set_lr_si (operands[0]));
-+ DONE;
-+})
-+
-+;; Clobber the return address on the stack. We can't expand this
-+;; until we know where it will be put in the stack frame.
-+
-+(define_insn "eh_set_lr_si"
-+ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
-+ (clobber (match_scratch:SI 1 "=&r"))]
-+ "! TARGET_64BIT"
-+ "#")
-+
-+(define_insn "eh_set_lr_di"
-+ [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
-+ (clobber (match_scratch:DI 1 "=&r"))]
-+ "TARGET_64BIT"
-+ "#")
-+
-+(define_split
-+ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
-+ (clobber (match_scratch 1))]
-+ "reload_completed"
-+ [(const_int 0)]
-+{
-+ riscv_set_return_address (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+;;
-+;; ....................
-+;;
-+;; FUNCTION CALLS
-+;;
-+;; ....................
-+
-+;; Sibling calls. All these patterns use jump instructions.
-+
-+;; call_insn_operand will only accept constant
-+;; addresses if a direct jump is acceptable. Since the 'S' constraint
-+;; is defined in terms of call_insn_operand, the same is true of the
-+;; constraints.
-+
-+;; When we use an indirect jump, we need a register that will be
-+;; preserved by the epilogue (constraint j).
-+
-+(define_expand "sibcall"
-+ [(parallel [(call (match_operand 0 "")
-+ (match_operand 1 ""))
-+ (use (match_operand 2 "")) ;; next_arg_reg
-+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
-+ ""
-+{
-+ riscv_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]);
-+ DONE;
-+})
-+
-+(define_insn "sibcall_internal"
-+ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
-+ (match_operand 1 "" ""))]
-+ "SIBLING_CALL_P (insn)"
-+ { return REG_P (operands[0]) ? "jr\t%0"
-+ : absolute_symbolic_operand (operands[0], VOIDmode) ? "tail\t%0"
-+ : "tail\t%0@"; }
-+ [(set_attr "type" "call")])
-+
-+(define_expand "sibcall_value"
-+ [(parallel [(set (match_operand 0 "")
-+ (call (match_operand 1 "")
-+ (match_operand 2 "")))
-+ (use (match_operand 3 ""))])] ;; next_arg_reg
-+ ""
-+{
-+ riscv_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]);
-+ DONE;
-+})
-+
-+(define_insn "sibcall_value_internal"
-+ [(set (match_operand 0 "register_operand" "")
-+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
-+ (match_operand 2 "" "")))]
-+ "SIBLING_CALL_P (insn)"
-+ { return REG_P (operands[1]) ? "jr\t%1"
-+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
-+ : "tail\t%1@"; }
-+ [(set_attr "type" "call")])
-+
-+(define_insn "sibcall_value_multiple_internal"
-+ [(set (match_operand 0 "register_operand" "")
-+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
-+ (match_operand 2 "" "")))
-+ (set (match_operand 3 "register_operand" "")
-+ (call (mem:SI (match_dup 1))
-+ (match_dup 2)))]
-+ "SIBLING_CALL_P (insn)"
-+ { return REG_P (operands[1]) ? "jr\t%1"
-+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
-+ : "tail\t%1@"; }
-+ [(set_attr "type" "call")])
-+
-+(define_expand "call"
-+ [(parallel [(call (match_operand 0 "")
-+ (match_operand 1 ""))
-+ (use (match_operand 2 "")) ;; next_arg_reg
-+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
-+ ""
-+{
-+ riscv_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]);
-+ DONE;
-+})
-+
-+(define_insn "call_internal"
-+ [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S"))
-+ (match_operand 1 "" ""))
-+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
-+ ""
-+ { return REG_P (operands[0]) ? "jalr\t%0"
-+ : absolute_symbolic_operand (operands[0], VOIDmode) ? "call\t%0"
-+ : "call\t%0@"; }
-+ [(set_attr "type" "call")])
-+
-+(define_expand "call_value"
-+ [(parallel [(set (match_operand 0 "")
-+ (call (match_operand 1 "")
-+ (match_operand 2 "")))
-+ (use (match_operand 3 ""))])] ;; next_arg_reg
-+ ""
-+{
-+ riscv_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]);
-+ DONE;
-+})
-+
-+;; See comment for call_internal.
-+(define_insn "call_value_internal"
-+ [(set (match_operand 0 "register_operand" "")
-+ (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
-+ (match_operand 2 "" "")))
-+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
-+ ""
-+ { return REG_P (operands[1]) ? "jalr\t%1"
-+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
-+ : "call\t%1@"; }
-+ [(set_attr "type" "call")])
-+
-+;; See comment for call_internal.
-+(define_insn "call_value_multiple_internal"
-+ [(set (match_operand 0 "register_operand" "")
-+ (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
-+ (match_operand 2 "" "")))
-+ (set (match_operand 3 "register_operand" "")
-+ (call (mem:SI (match_dup 1))
-+ (match_dup 2)))
-+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
-+ ""
-+ { return REG_P (operands[1]) ? "jalr\t%1"
-+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
-+ : "call\t%1@"; }
-+ [(set_attr "type" "call")])
-+
-+;; Call subroutine returning any type.
-+
-+(define_expand "untyped_call"
-+ [(parallel [(call (match_operand 0 "")
-+ (const_int 0))
-+ (match_operand 1 "")
-+ (match_operand 2 "")])]
-+ ""
-+{
-+ int i;
-+
-+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
-+
-+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
-+ {
-+ rtx set = XVECEXP (operands[2], 0, i);
-+ riscv_emit_move (SET_DEST (set), SET_SRC (set));
-+ }
-+
-+ emit_insn (gen_blockage ());
-+ DONE;
-+})
-+
-+(define_insn "nop"
-+ [(const_int 0)]
-+ ""
-+ "nop"
-+ [(set_attr "type" "nop")
-+ (set_attr "mode" "none")])
-+
-+(define_insn "trap"
-+ [(trap_if (const_int 1) (const_int 0))]
-+ ""
-+ "sbreak")
-+
-+(define_insn "gpr_save"
-+ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_SAVE)
-+ (clobber (reg:SI T0_REGNUM))
-+ (clobber (reg:SI T1_REGNUM))]
-+ ""
-+ { return riscv_output_gpr_save (INTVAL (operands[0])); })
-+
-+(define_insn "gpr_restore"
-+ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_RESTORE)]
-+ ""
-+ "tail\t__riscv_restore_%0")
-+
-+(define_insn "gpr_restore_return"
-+ [(return)
-+ (use (match_operand 0 "pmode_register_operand" ""))
-+ (const_int 0)]
-+ ""
-+ "")
-+
-+(include "sync.md")
-+(include "peephole.md")
-+(include "generic.md")
-diff -urN empty/gcc/config/riscv/riscv-modes.def gcc-5.3.0/gcc/config/riscv/riscv-modes.def
---- empty/gcc/config/riscv/riscv-modes.def 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv-modes.def 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,26 @@
-+/* Extra machine modes for RISC-V target.
-+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+FLOAT_MODE (TF, 16, ieee_quad_format);
-+
-+/* Vector modes. */
-+VECTOR_MODES (INT, 4); /* V8QI V4HI V2SI */
-+VECTOR_MODES (FLOAT, 4); /* V4HF V2SF */
-diff -urN empty/gcc/config/riscv/riscv.opt gcc-5.3.0/gcc/config/riscv/riscv.opt
---- empty/gcc/config/riscv/riscv.opt 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv.opt 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,87 @@
-+; Options for the MIPS port of the compiler
-+;
-+; Copyright (C) 2005, 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
-+;
-+; This file is part of GCC.
-+;
-+; GCC is free software; you can redistribute it and/or modify it under
-+; the terms of the GNU General Public License as published by the Free
-+; Software Foundation; either version 3, or (at your option) any later
-+; version.
-+;
-+; GCC is distributed in the hope that it will be useful, but WITHOUT
-+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
-+; License for more details.
-+;
-+; You should have received a copy of the GNU General Public License
-+; along with GCC; see the file COPYING3. If not see
-+; <http://www.gnu.org/licenses/ >.
-+
-+m32
-+Target RejectNegative Mask(32BIT)
-+Generate RV32 code
-+
-+m64
-+Target RejectNegative InverseMask(32BIT, 64BIT)
-+Generate RV64 code
-+
-+mbranch-cost=
-+Target RejectNegative Joined UInteger Var(riscv_branch_cost)
-+-mbranch-cost=COST Set the cost of branches to roughly COST instructions
-+
-+mhard-float
-+Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI)
-+Allow the use of hardware floating-point ABI and instructions
-+
-+mmemcpy
-+Target Report Mask(MEMCPY)
-+Don't optimize block moves
-+
-+mplt
-+Target Report Var(TARGET_PLT) Init(1)
-+When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
-+
-+msoft-float
-+Target Report RejectNegative Mask(SOFT_FLOAT_ABI)
-+Prevent the use of all hardware floating-point instructions
-+
-+mno-fdiv
-+Target Report RejectNegative Mask(NO_FDIV)
-+Don't use hardware floating-point divide and square root instructions
-+
-+mfdiv
-+Target Report RejectNegative InverseMask(NO_FDIV, FDIV)
-+Use hardware floating-point divide and square root instructions
-+
-+march=
-+Target RejectNegative Joined Var(riscv_arch_string)
-+-march= Generate code for given RISC-V ISA (e.g. RV64IM)
-+
-+mtune=
-+Target RejectNegative Joined Var(riscv_tune_string)
-+-mtune=PROCESSOR Optimize the output for PROCESSOR
-+
-+msmall-data-limit=
-+Target Joined Separate UInteger Var(g_switch_value) Init(8)
-+-msmall-data-limit=<number> Put global and static data smaller than <number> bytes into a special section (on some targets)
-+
-+matomic
-+Target Report Mask(ATOMIC)
-+Use hardware atomic memory instructions.
-+
-+mmuldiv
-+Target Report Mask(MULDIV)
-+Use hardware instructions for integer multiplication and division.
-+
-+mrvc
-+Target Report Mask(RVC)
-+Use compressed instruction encoding
-+
-+msave-restore
-+Target Report Mask(SAVE_RESTORE)
-+Use smaller but slower prologue and epilogue code
-+
-+mcmodel=
-+Target RejectNegative Joined Var(riscv_cmodel_string)
-+Use given RISC-V code model (medlow or medany)
-diff -urN empty/gcc/config/riscv/riscv-protos.h gcc-5.3.0/gcc/config/riscv/riscv-protos.h
---- empty/gcc/config/riscv/riscv-protos.h 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/riscv-protos.h 2016-04-02 14:07:12.469104719 +0800
-@@ -0,0 +1,94 @@
-+/* Definition of RISC-V target for GNU compiler.
-+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on MIPS target for GNU compiler.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify
-+it under the terms of the GNU General Public License as published by
-+the Free Software Foundation; either version 3, or (at your option)
-+any later version.
-+
-+GCC is distributed in the hope that it will be useful,
-+but WITHOUT ANY WARRANTY; without even the implied warranty of
-+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+GNU General Public License for more details.
-+
-+You should have received a copy of the GNU General Public License
-+along with GCC; see the file COPYING3. If not see
-+<http://www.gnu.org/licenses/ >. */
-+
-+#ifndef GCC_RISCV_PROTOS_H
-+#define GCC_RISCV_PROTOS_H
-+
-+enum riscv_symbol_type {
-+ SYMBOL_ABSOLUTE,
-+ SYMBOL_GOT_DISP,
-+ SYMBOL_TLS,
-+ SYMBOL_TLS_LE,
-+ SYMBOL_TLS_IE,
-+ SYMBOL_TLS_GD
-+};
-+#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
-+
-+enum riscv_code_model {
-+ CM_MEDLOW,
-+ CM_MEDANY,
-+ CM_PIC
-+};
-+extern enum riscv_code_model riscv_cmodel;
-+
-+extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
-+extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
-+extern int riscv_address_insns (rtx, enum machine_mode, bool);
-+extern int riscv_const_insns (rtx);
-+extern int riscv_split_const_insns (rtx);
-+extern int riscv_load_store_insns (rtx, rtx_insn *);
-+extern rtx riscv_emit_move (rtx, rtx);
-+extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
-+extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
-+extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
-+extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
-+extern bool riscv_legitimize_vector_move (enum machine_mode, rtx, rtx);
-+
-+extern rtx riscv_subword (rtx, bool);
-+extern bool riscv_split_64bit_move_p (rtx, rtx);
-+extern void riscv_split_doubleword_move (rtx, rtx);
-+extern const char *riscv_output_move (rtx, rtx);
-+extern const char *riscv_output_gpr_save (unsigned);
-+#ifdef RTX_CODE
-+extern void riscv_expand_scc (rtx *);
-+extern void riscv_expand_conditional_branch (rtx *);
-+#endif
-+extern rtx riscv_expand_call (bool, rtx, rtx, rtx);
-+extern void riscv_expand_fcc_reload (rtx, rtx, rtx);
-+extern void riscv_set_return_address (rtx, rtx);
-+extern bool riscv_expand_block_move (rtx, rtx, rtx);
-+extern void riscv_expand_synci_loop (rtx, rtx);
-+
-+extern bool riscv_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
-+ HOST_WIDE_INT);
-+extern bool riscv_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
-+ HOST_WIDE_INT);
-+extern void riscv_order_regs_for_local_alloc (void);
-+
-+extern rtx riscv_return_addr (int, rtx);
-+extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
-+extern void riscv_expand_prologue (void);
-+extern void riscv_expand_epilogue (bool);
-+extern bool riscv_can_use_return_insn (void);
-+extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
-+
-+extern enum reg_class riscv_secondary_reload_class (enum reg_class,
-+ enum machine_mode,
-+ rtx, bool);
-+extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
-+
-+extern void irix_asm_output_align (FILE *, unsigned);
-+extern const char *current_section_name (void);
-+extern unsigned int current_section_flags (void);
-+
-+extern void riscv_expand_vector_init (rtx, rtx);
-+
-+#endif /* ! GCC_RISCV_PROTOS_H */
-diff -urN empty/gcc/config/riscv/sync.md gcc-5.3.0/gcc/config/riscv/sync.md
---- empty/gcc/config/riscv/sync.md 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/sync.md 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,204 @@
-+;; Machine description for RISC-V atomic operations.
-+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
-+;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+;; Based on MIPS target for GNU compiler.
-+
-+;; This file is part of GCC.
-+
-+;; GCC is free software; you can redistribute it and/or modify
-+;; it under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+
-+;; GCC is distributed in the hope that it will be useful,
-+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+;; GNU General Public License for more details.
-+
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3. If not see
-+;; <http://www.gnu.org/licenses/ >.
-+
-+(define_c_enum "unspec" [
-+ UNSPEC_COMPARE_AND_SWAP
-+ UNSPEC_SYNC_OLD_OP
-+ UNSPEC_SYNC_EXCHANGE
-+ UNSPEC_ATOMIC_STORE
-+ UNSPEC_MEMORY_BARRIER
-+])
-+
-+(define_code_iterator any_atomic [plus ior xor and])
-+(define_code_attr atomic_optab
-+ [(plus "add") (ior "or") (xor "xor") (and "and")])
-+
-+;; Memory barriers.
-+
-+(define_expand "mem_thread_fence"
-+ [(match_operand:SI 0 "const_int_operand" "")] ;; model
-+ ""
-+{
-+ if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
-+ {
-+ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
-+ MEM_VOLATILE_P (mem) = 1;
-+ emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
-+ }
-+ DONE;
-+})
-+
-+(define_insn "mem_thread_fence_1"
-+ [(set (match_operand:BLK 0 "" "")
-+ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
-+ (match_operand:SI 1 "const_int_operand" "")] ;; model
-+ ""
-+{
-+ long model = INTVAL (operands[1]);
-+
-+ switch (model)
-+ {
-+ case MEMMODEL_SEQ_CST:
-+ case MEMMODEL_SYNC_SEQ_CST:
-+ case MEMMODEL_ACQ_REL:
-+ return "fence rw,rw";
-+ case MEMMODEL_ACQUIRE:
-+ case MEMMODEL_SYNC_ACQUIRE:
-+ case MEMMODEL_CONSUME:
-+ return "fence r,rw";
-+ case MEMMODEL_RELEASE:
-+ case MEMMODEL_SYNC_RELEASE:
-+ return "fence rw,w";
-+ default:
-+ fprintf(stderr, "mem_thread_fence_1(%ld)\n", model);
-+ gcc_unreachable();
-+ }
-+})
-+
-+;; Atomic memory operations.
-+
-+;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
-+(define_insn "atomic_store<mode>"
-+ [(set (match_operand:GPR 0 "memory_operand" "=A")
-+ (unspec_volatile:GPR
-+ [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
-+ (match_operand:SI 2 "const_int_operand")] ;; model
-+ UNSPEC_ATOMIC_STORE))]
-+ "TARGET_ATOMIC"
-+ "amoswap.<amo>%A2 zero,%z1,%0")
-+
-+(define_insn "atomic_<atomic_optab><mode>"
-+ [(set (match_operand:GPR 0 "memory_operand" "+A")
-+ (unspec_volatile:GPR
-+ [(any_atomic:GPR (match_dup 0)
-+ (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
-+ (match_operand:SI 2 "const_int_operand")] ;; model
-+ UNSPEC_SYNC_OLD_OP))]
-+ "TARGET_ATOMIC"
-+ "amo<insn>.<amo>%A2 zero,%z1,%0")
-+
-+(define_insn "atomic_fetch_<atomic_optab><mode>"
-+ [(set (match_operand:GPR 0 "register_operand" "=&r")
-+ (match_operand:GPR 1 "memory_operand" "+A"))
-+ (set (match_dup 1)
-+ (unspec_volatile:GPR
-+ [(any_atomic:GPR (match_dup 1)
-+ (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
-+ (match_operand:SI 3 "const_int_operand")] ;; model
-+ UNSPEC_SYNC_OLD_OP))]
-+ "TARGET_ATOMIC"
-+ "amo<insn>.<amo>%A3 %0,%z2,%1")
-+
-+(define_insn "atomic_exchange<mode>"
-+ [(set (match_operand:GPR 0 "register_operand" "=&r")
-+ (unspec_volatile:GPR
-+ [(match_operand:GPR 1 "memory_operand" "+A")
-+ (match_operand:SI 3 "const_int_operand")] ;; model
-+ UNSPEC_SYNC_EXCHANGE))
-+ (set (match_dup 1)
-+ (match_operand:GPR 2 "register_operand" "0"))]
-+ "TARGET_ATOMIC"
-+ "amoswap.<amo>%A3 %0,%z2,%1")
-+
-+(define_insn "atomic_cas_value_strong<mode>"
-+ [(set (match_operand:GPR 0 "register_operand" "=&r")
-+ (match_operand:GPR 1 "memory_operand" "+A"))
-+ (set (match_dup 1)
-+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
-+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")
-+ (match_operand:SI 4 "const_int_operand") ;; mod_s
-+ (match_operand:SI 5 "const_int_operand")] ;; mod_f
-+ UNSPEC_COMPARE_AND_SWAP))
-+ (clobber (match_scratch:GPR 6 "=&r"))]
-+ "TARGET_ATOMIC"
-+ "1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
-+ [(set (attr "length") (const_int 16))])
-+
-+(define_expand "atomic_compare_and_swap<mode>"
-+ [(match_operand:SI 0 "register_operand" "") ;; bool output
-+ (match_operand:GPR 1 "register_operand" "") ;; val output
-+ (match_operand:GPR 2 "memory_operand" "") ;; memory
-+ (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
-+ (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
-+ (match_operand:SI 5 "const_int_operand" "") ;; is_weak
-+ (match_operand:SI 6 "const_int_operand" "") ;; mod_s
-+ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
-+ "TARGET_ATOMIC"
-+{
-+ emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
-+ operands[3], operands[4],
-+ operands[6], operands[7]));
-+
-+ rtx compare = operands[1];
-+ if (operands[3] != const0_rtx)
-+ {
-+ rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
-+ compare = gen_reg_rtx (<MODE>mode);
-+ emit_insn (gen_rtx_SET (VOIDmode, compare, difference));
-+ }
-+
-+ rtx eq = gen_rtx_EQ (<MODE>mode, compare, const0_rtx);
-+ rtx result = gen_reg_rtx (<MODE>mode);
-+ emit_insn (gen_rtx_SET (VOIDmode, result, eq));
-+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_lowpart (SImode, result)));
-+ DONE;
-+})
-+
-+(define_expand "atomic_test_and_set"
-+ [(match_operand:QI 0 "register_operand" "") ;; bool output
-+ (match_operand:QI 1 "memory_operand" "+A") ;; memory
-+ (match_operand:SI 2 "const_int_operand" "")] ;; model
-+ "TARGET_ATOMIC"
-+{
-+ /* We have no QImode atomics, so use the address LSBs to form a mask,
-+ then use an aligned SImode atomic. */
-+ rtx result = operands[0];
-+ rtx mem = operands[1];
-+ rtx model = operands[2];
-+ rtx addr = force_reg (Pmode, XEXP (mem, 0));
-+
-+ rtx aligned_addr = gen_reg_rtx (Pmode);
-+ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
-+
-+ rtx aligned_mem = change_address (mem, SImode, aligned_addr);
-+ set_mem_alias_set (aligned_mem, 0);
-+
-+ rtx offset = gen_reg_rtx (SImode);
-+ emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
-+ GEN_INT (3)));
-+
-+ rtx tmp = gen_reg_rtx (SImode);
-+ emit_move_insn (tmp, GEN_INT (1));
-+
-+ rtx shmt = gen_reg_rtx (SImode);
-+ emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
-+
-+ rtx word = gen_reg_rtx (SImode);
-+ emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
-+
-+ tmp = gen_reg_rtx (SImode);
-+ emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
-+
-+ emit_move_insn (gen_lowpart (SImode, result),
-+ gen_rtx_LSHIFTRT (SImode, tmp,
-+ gen_lowpart (SImode, shmt)));
-+ DONE;
-+})
-diff -urN empty/gcc/config/riscv/t-elf gcc-5.3.0/gcc/config/riscv/t-elf
---- empty/gcc/config/riscv/t-elf 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/t-elf 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,4 @@
-+# Build the libraries for both hard and soft floating point
-+
-+MULTILIB_OPTIONS = m64/m32 msoft-float mno-atomic
-+MULTILIB_DIRNAMES = 64 32 soft-float no-atomic
-diff -urN empty/gcc/config/riscv/t-linux64 gcc-5.3.0/gcc/config/riscv/t-linux64
---- empty/gcc/config/riscv/t-linux64 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/gcc/config/riscv/t-linux64 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,5 @@
-+# Build the libraries for both hard and soft floating point
-+
-+MULTILIB_OPTIONS = m64/m32 msoft-float mno-atomic
-+MULTILIB_DIRNAMES = 64 32 soft-float no-atomic
-+MULTILIB_OSDIRNAMES = ../lib ../lib32 soft-float no-atomic
-diff -urN empty/libgcc/config/riscv/crti.S gcc-5.3.0/libgcc/config/riscv/crti.S
---- empty/libgcc/config/riscv/crti.S 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/crti.S 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1 @@
-+/* crti.S is empty because .init_array/.fini_array are used exclusively. */
-diff -urN empty/libgcc/config/riscv/crtn.S gcc-5.3.0/libgcc/config/riscv/crtn.S
---- empty/libgcc/config/riscv/crtn.S 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/crtn.S 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1 @@
-+/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
-diff -urN empty/libgcc/config/riscv/div.S gcc-5.3.0/libgcc/config/riscv/div.S
---- empty/libgcc/config/riscv/div.S 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/div.S 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,121 @@
-+ .text
-+ .align 2
-+
-+#ifndef __riscv64
-+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
-+# define __udivdi3 __udivsi3
-+# define __umoddi3 __umodsi3
-+# define __divdi3 __divsi3
-+# define __moddi3 __modsi3
-+#else
-+ .globl __udivsi3
-+__udivsi3:
-+ /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
-+ sll a0, a0, 32
-+ sll a1, a1, 32
-+ move t0, ra
-+ jal __udivdi3
-+ sext.w a0, a0
-+ jr t0
-+
-+ .globl __umodsi3
-+__umodsi3:
-+ /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
-+ sll a0, a0, 32
-+ sll a1, a1, 32
-+ srl a0, a0, 32
-+ srl a1, a1, 32
-+ move t0, ra
-+ jal __udivdi3
-+ sext.w a0, a1
-+ jr t0
-+
-+ .globl __modsi3
-+ __modsi3 = __moddi3
-+
-+ .globl __divsi3
-+__divsi3:
-+ /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
-+ li t0, -1
-+ beq a1, t0, .L20
-+#endif
-+
-+ .globl __divdi3
-+__divdi3:
-+ bltz a0, .L10
-+ bltz a1, .L11
-+ /* Since the quotient is positive, fall into __udivdi3. */
-+
-+ .globl __udivdi3
-+__udivdi3:
-+ mv a2, a1
-+ mv a1, a0
-+ li a0, -1
-+ beqz a2, .L5
-+ li a3, 1
-+ bgeu a2, a1, .L2
-+.L1:
-+ blez a2, .L2
-+ slli a2, a2, 1
-+ slli a3, a3, 1
-+ bgtu a1, a2, .L1
-+.L2:
-+ li a0, 0
-+.L3:
-+ bltu a1, a2, .L4
-+ sub a1, a1, a2
-+ or a0, a0, a3
-+.L4:
-+ srli a3, a3, 1
-+ srli a2, a2, 1
-+ bnez a3, .L3
-+.L5:
-+ ret
-+
-+ .globl __umoddi3
-+__umoddi3:
-+ /* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
-+ move t0, ra
-+ jal __udivdi3
-+ move a0, a1
-+ jr t0
-+
-+ /* Handle negative arguments to __divdi3. */
-+.L10:
-+ neg a0, a0
-+ bgez a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
-+ neg a1, a1
-+ j __divdi3 /* Compute __udivdi3(-a0, -a1). */
-+.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
-+ neg a1, a1
-+.L12:
-+ move t0, ra
-+ jal __divdi3
-+ neg a0, a0
-+ jr t0
-+
-+ .globl __moddi3
-+__moddi3:
-+ move t0, ra
-+ bltz a1, .L31
-+ bltz a0, .L32
-+.L30:
-+ jal __udivdi3 /* The dividend is not negative. */
-+ move a0, a1
-+ jr t0
-+.L31:
-+ neg a1, a1
-+ bgez a0, .L30
-+.L32:
-+ neg a0, a0
-+ jal __udivdi3 /* The dividend is hella negative. */
-+ neg a0, a1
-+ jr t0
-+
-+#ifdef __riscv64
-+ /* continuation of __divsi3 */
-+.L20:
-+ sll t0, t0, 31
-+ bne a0, t0, __divdi3
-+ ret
-+#endif
-diff -urN empty/libgcc/config/riscv/muldi3.S gcc-5.3.0/libgcc/config/riscv/muldi3.S
---- empty/libgcc/config/riscv/muldi3.S 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/muldi3.S 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,21 @@
-+ .text
-+ .align 2
-+
-+#ifndef __riscv64
-+/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */
-+# define __muldi3 __mulsi3
-+#endif
-+
-+ .globl __muldi3
-+__muldi3:
-+ mv a2, a0
-+ li a0, 0
-+.L1:
-+ andi a3, a1, 1
-+ beqz a3, .L2
-+ add a0, a0, a2
-+.L2:
-+ srli a1, a1, 1
-+ slli a2, a2, 1
-+ bnez a1, .L1
-+ ret
-diff -urN empty/libgcc/config/riscv/multi3.S gcc-5.3.0/libgcc/config/riscv/multi3.S
---- empty/libgcc/config/riscv/multi3.S 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/multi3.S 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,56 @@
-+ .text
-+ .align 2
-+
-+#ifndef __riscv64
-+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
-+# define __multi3 __muldi3
-+#endif
-+
-+ .globl __multi3
-+__multi3:
-+
-+#ifndef __riscv64
-+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
-+# define __muldi3 __mulsi3
-+#endif
-+
-+/* We rely on the fact that __muldi3 doesn't clobber the t-registers. */
-+
-+ mv t0, ra
-+ mv t5, a0
-+ mv a0, a1
-+ mv t6, a3
-+ mv a1, t5
-+ mv a4, a2
-+ li a5, 0
-+ li t2, 0
-+ li t4, 0
-+.L1:
-+ add a6, t2, a1
-+ andi t3, a4, 1
-+ slli a7, a5, 1
-+ slti t1, a1, 0
-+ srli a4, a4, 1
-+ add a5, t4, a5
-+ beqz t3, .L2
-+ sltu t3, a6, t2
-+ mv t2, a6
-+ add t4, t3, a5
-+.L2:
-+ slli a1, a1, 1
-+ or a5, t1, a7
-+ bnez a4, .L1
-+ beqz a0, .L3
-+ mv a1, a2
-+ call __muldi3
-+ add t4, t4, a0
-+.L3:
-+ beqz t6, .L4
-+ mv a1, t6
-+ mv a0, t5
-+ call __muldi3
-+ add t4, t4, a0
-+.L4:
-+ mv a0, t2
-+ mv a1, t4
-+ jr t0
-diff -urN empty/libgcc/config/riscv/riscv-fp.c gcc-5.3.0/libgcc/config/riscv/riscv-fp.c
---- empty/libgcc/config/riscv/riscv-fp.c 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/riscv-fp.c 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,178 @@
-+/* Functions needed for soft-float on riscv-linux. Based on
-+ rs6000/ppc64-fp.c with TF types removed.
-+
-+ Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-+ 2000, 2001, 2002, 2003, 2004, 2006, 2009 Free Software Foundation,
-+ Inc.
-+
-+This file is part of GCC.
-+
-+GCC is free software; you can redistribute it and/or modify it under
-+the terms of the GNU General Public License as published by the Free
-+Software Foundation; either version 3, or (at your option) any later
-+version.
-+
-+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-+WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-+for more details.
-+
-+Under Section 7 of GPL version 3, you are granted additional
-+permissions described in the GCC Runtime Library Exception, version
-+3.1, as published by the Free Software Foundation.
-+
-+You should have received a copy of the GNU General Public License and
-+a copy of the GCC Runtime Library Exception along with this program;
-+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
-+<http://www.gnu.org/licenses/ >. */
-+
-+#if defined(__riscv64)
-+#include "fp-bit.h"
-+
-+extern DItype __fixdfdi (DFtype);
-+extern DItype __fixsfdi (SFtype);
-+extern USItype __fixunsdfsi (DFtype);
-+extern USItype __fixunssfsi (SFtype);
-+extern DFtype __floatdidf (DItype);
-+extern DFtype __floatundidf (UDItype);
-+extern SFtype __floatdisf (DItype);
-+extern SFtype __floatundisf (UDItype);
-+
-+static DItype local_fixunssfdi (SFtype);
-+static DItype local_fixunsdfdi (DFtype);
-+
-+DItype
-+__fixdfdi (DFtype a)
-+{
-+ if (a < 0)
-+ return - local_fixunsdfdi (-a);
-+ return local_fixunsdfdi (a);
-+}
-+
-+DItype
-+__fixsfdi (SFtype a)
-+{
-+ if (a < 0)
-+ return - local_fixunssfdi (-a);
-+ return local_fixunssfdi (a);
-+}
-+
-+USItype
-+__fixunsdfsi (DFtype a)
-+{
-+ if (a >= - (DFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
-+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
-+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
-+ return (SItype) a;
-+}
-+
-+USItype
-+__fixunssfsi (SFtype a)
-+{
-+ if (a >= - (SFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
-+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
-+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
-+ return (SItype) a;
-+}
-+
-+DFtype
-+__floatdidf (DItype u)
-+{
-+ DFtype d;
-+
-+ d = (SItype) (u >> (sizeof (SItype) * 8));
-+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
-+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
-+
-+ return d;
-+}
-+
-+DFtype
-+__floatundidf (UDItype u)
-+{
-+ DFtype d;
-+
-+ d = (USItype) (u >> (sizeof (SItype) * 8));
-+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
-+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
-+
-+ return d;
-+}
-+
-+SFtype
-+__floatdisf (DItype u)
-+{
-+ DFtype f;
-+
-+ if (53 < (sizeof (DItype) * 8)
-+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
-+ {
-+ if (! (- ((DItype) 1 << 53) < u
-+ && u < ((DItype) 1 << 53)))
-+ {
-+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
-+ {
-+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
-+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
-+ }
-+ }
-+ }
-+ f = (SItype) (u >> (sizeof (SItype) * 8));
-+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
-+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
-+
-+ return (SFtype) f;
-+}
-+
-+SFtype
-+__floatundisf (UDItype u)
-+{
-+ DFtype f;
-+
-+ if (53 < (sizeof (DItype) * 8)
-+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
-+ {
-+ if (u >= ((UDItype) 1 << 53))
-+ {
-+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
-+ {
-+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
-+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
-+ }
-+ }
-+ }
-+ f = (USItype) (u >> (sizeof (SItype) * 8));
-+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
-+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
-+
-+ return (SFtype) f;
-+}
-+
-+/* This version is needed to prevent recursion; fixunsdfdi in libgcc
-+ calls fixdfdi, which in turn calls calls fixunsdfdi. */
-+
-+static DItype
-+local_fixunsdfdi (DFtype a)
-+{
-+ USItype hi, lo;
-+
-+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
-+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
-+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
-+}
-+
-+/* This version is needed to prevent recursion; fixunssfdi in libgcc
-+ calls fixsfdi, which in turn calls calls fixunssfdi. */
-+
-+static DItype
-+local_fixunssfdi (SFtype original_a)
-+{
-+ DFtype a = original_a;
-+ USItype hi, lo;
-+
-+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
-+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
-+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
-+}
-+
-+#endif
-diff -urN empty/libgcc/config/riscv/save-restore.S gcc-5.3.0/libgcc/config/riscv/save-restore.S
---- empty/libgcc/config/riscv/save-restore.S 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/save-restore.S 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,220 @@
-+ .text
-+
-+ .globl __riscv_save_12
-+ .globl __riscv_save_11
-+ .globl __riscv_save_10
-+ .globl __riscv_save_9
-+ .globl __riscv_save_8
-+ .globl __riscv_save_7
-+ .globl __riscv_save_6
-+ .globl __riscv_save_5
-+ .globl __riscv_save_4
-+ .globl __riscv_save_3
-+ .globl __riscv_save_2
-+ .globl __riscv_save_1
-+ .globl __riscv_save_0
-+
-+ .globl __riscv_restore_12
-+ .globl __riscv_restore_11
-+ .globl __riscv_restore_10
-+ .globl __riscv_restore_9
-+ .globl __riscv_restore_8
-+ .globl __riscv_restore_7
-+ .globl __riscv_restore_6
-+ .globl __riscv_restore_5
-+ .globl __riscv_restore_4
-+ .globl __riscv_restore_3
-+ .globl __riscv_restore_2
-+ .globl __riscv_restore_1
-+ .globl __riscv_restore_0
-+
-+#ifdef __riscv64
-+
-+__riscv_save_12:
-+ addi sp, sp, -112
-+ li t1, 0
-+ sd s11, 8(sp)
-+ j .Ls10
-+
-+__riscv_save_11:
-+__riscv_save_10:
-+ addi sp, sp, -112
-+ li t1, -16
-+.Ls10:
-+ sd s10, 16(sp)
-+ sd s9, 24(sp)
-+ j .Ls8
-+
-+__riscv_save_9:
-+__riscv_save_8:
-+ addi sp, sp, -112
-+ li t1, -32
-+.Ls8:
-+ sd s8, 32(sp)
-+ sd s7, 40(sp)
-+ j .Ls6
-+
-+__riscv_save_7:
-+__riscv_save_6:
-+ addi sp, sp, -112
-+ li t1, -48
-+.Ls6:
-+ sd s6, 48(sp)
-+ sd s5, 56(sp)
-+ j .Ls4
-+
-+__riscv_save_5:
-+__riscv_save_4:
-+ addi sp, sp, -112
-+ li t1, -64
-+.Ls4:
-+ sd s4, 64(sp)
-+ sd s3, 72(sp)
-+ j .Ls2
-+
-+__riscv_save_3:
-+__riscv_save_2:
-+ addi sp, sp, -112
-+ li t1, -80
-+.Ls2:
-+ sd s2, 80(sp)
-+ sd s1, 88(sp)
-+ sd s0, 96(sp)
-+ sd ra, 104(sp)
-+ sub sp, sp, t1
-+ jr t0
-+
-+__riscv_save_1:
-+__riscv_save_0:
-+ addi sp, sp, -16
-+ sd s0, 0(sp)
-+ sd ra, 8(sp)
-+ jr t0
-+
-+__riscv_restore_12:
-+ ld s11, 8(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_11:
-+__riscv_restore_10:
-+ ld s10, 0(sp)
-+ ld s9, 8(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_9:
-+__riscv_restore_8:
-+ ld s8, 0(sp)
-+ ld s7, 8(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_7:
-+__riscv_restore_6:
-+ ld s6, 0(sp)
-+ ld s5, 8(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_5:
-+__riscv_restore_4:
-+ ld s4, 0(sp)
-+ ld s3, 8(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_3:
-+__riscv_restore_2:
-+ ld s2, 0(sp)
-+ ld s1, 8(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_1:
-+__riscv_restore_0:
-+ ld s0, 0(sp)
-+ ld ra, 8(sp)
-+ addi sp, sp, 16
-+ ret
-+
-+#else
-+
-+__riscv_save_12:
-+ addi sp, sp, -64
-+ li t1, 0
-+ sw s11, 12(sp)
-+ j .Ls10
-+
-+__riscv_save_11:
-+__riscv_save_10:
-+__riscv_save_9:
-+__riscv_save_8:
-+ addi sp, sp, -64
-+ li t1, -16
-+.Ls10:
-+ sw s10, 16(sp)
-+ sw s9, 20(sp)
-+ sw s8, 24(sp)
-+ sw s7, 28(sp)
-+ j .Ls6
-+
-+__riscv_save_7:
-+__riscv_save_6:
-+__riscv_save_5:
-+__riscv_save_4:
-+ addi sp, sp, -64
-+ li t1, -32
-+.Ls6:
-+ sw s6, 32(sp)
-+ sw s5, 36(sp)
-+ sw s4, 40(sp)
-+ sw s3, 44(sp)
-+ sw s2, 48(sp)
-+ sw s1, 52(sp)
-+ sw s0, 56(sp)
-+ sw ra, 60(sp)
-+ sub sp, sp, t1
-+ jr t0
-+
-+__riscv_save_3:
-+__riscv_save_2:
-+__riscv_save_1:
-+__riscv_save_0:
-+ addi sp, sp, -16
-+ sw s2, 0(sp)
-+ sw s1, 4(sp)
-+ sw s0, 8(sp)
-+ sw ra, 12(sp)
-+ jr t0
-+
-+__riscv_restore_12:
-+ lw s11, 12(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_11:
-+__riscv_restore_10:
-+__riscv_restore_9:
-+__riscv_restore_8:
-+ lw s10, 0(sp)
-+ lw s9, 4(sp)
-+ lw s8, 8(sp)
-+ lw s7, 12(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_7:
-+__riscv_restore_6:
-+__riscv_restore_5:
-+__riscv_restore_4:
-+ lw s6, 0(sp)
-+ lw s5, 4(sp)
-+ lw s4, 8(sp)
-+ lw s3, 12(sp)
-+ addi sp, sp, 16
-+
-+__riscv_restore_3:
-+__riscv_restore_2:
-+__riscv_restore_1:
-+__riscv_restore_0:
-+ lw s2, 0(sp)
-+ lw s1, 4(sp)
-+ lw s0, 8(sp)
-+ lw ra, 12(sp)
-+ addi sp, sp, 16
-+ ret
-+
-+#endif
-diff -urN empty/libgcc/config/riscv/t-dpbit gcc-5.3.0/libgcc/config/riscv/t-dpbit
---- empty/libgcc/config/riscv/t-dpbit 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-dpbit 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,4 @@
-+LIB2ADD += dp-bit.c
-+
-+dp-bit.c: $(srcdir)/fp-bit.c
-+ cat $(srcdir)/fp-bit.c > dp-bit.c
-diff -urN empty/libgcc/config/riscv/t-elf gcc-5.3.0/libgcc/config/riscv/t-elf
---- empty/libgcc/config/riscv/t-elf 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-elf 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,5 @@
-+LIB2ADD += $(srcdir)/config/riscv/riscv-fp.c \
-+ $(srcdir)/config/riscv/save-restore.S \
-+ $(srcdir)/config/riscv/muldi3.S \
-+ $(srcdir)/config/riscv/multi3.S \
-+ $(srcdir)/config/riscv/div.S
-diff -urN empty/libgcc/config/riscv/t-elf32 gcc-5.3.0/libgcc/config/riscv/t-elf32
---- empty/libgcc/config/riscv/t-elf32 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-elf32 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,4 @@
-+LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
-+
-+HOST_LIBGCC2_CFLAGS += -m32
-+CRTSTUFF_CFLAGS += -m32
-diff -urN empty/libgcc/config/riscv/t-elf64 gcc-5.3.0/libgcc/config/riscv/t-elf64
---- empty/libgcc/config/riscv/t-elf64 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-elf64 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,2 @@
-+LIB2FUNCS_EXCLUDE += _divdi3 _moddi3 _udivdi3 _umoddi3 _muldi3 _multi3 \
-+ _divsi3 _modsi3 _udivsi3 _umodsi3 \
-diff -urN empty/libgcc/config/riscv/t-fpbit gcc-5.3.0/libgcc/config/riscv/t-fpbit
---- empty/libgcc/config/riscv/t-fpbit 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-fpbit 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,5 @@
-+LIB2ADD += fp-bit.c
-+
-+fp-bit.c: $(srcdir)/fp-bit.c
-+ echo '#define FLOAT' > fp-bit.c
-+ cat $(srcdir)/fp-bit.c >> fp-bit.c
-diff -urN empty/libgcc/config/riscv/t-tpbit gcc-5.3.0/libgcc/config/riscv/t-tpbit
---- empty/libgcc/config/riscv/t-tpbit 1970-01-01 08:00:00.000000000 +0800
-+++ gcc-5.3.0/libgcc/config/riscv/t-tpbit 2016-04-02 14:07:12.472438058 +0800
-@@ -0,0 +1,10 @@
-+LIB2ADD += tp-bit.c
-+
-+tp-bit.c: $(srcdir)/fp-bit.c
-+ echo '#ifdef _RISCVEL' > tp-bit.c
-+ echo '# define FLOAT_BIT_ORDER_MISMATCH' >> tp-bit.c
-+ echo '#endif' >> tp-bit.c
-+ echo '#if __LDBL_MANT_DIG__ == 113' >> tp-bit.c
-+ echo '# define TFLOAT' >> tp-bit.c
-+ cat $(srcdir)/fp-bit.c >> tp-bit.c
-+ echo '#endif' >> tp-bit.c
diff --git a/util/crossgcc/patches/gcc-6.2.0_elf_biarch.patch b/util/crossgcc/patches/gcc-6.2.0_elf_biarch.patch
new file mode 100644
index 0000000..226aed9
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.2.0_elf_biarch.patch
@@ -0,0 +1,87 @@
+diff -urN gcc-4.9.2/gcc/config/i386/t-elf64 gcc-4.9.2/gcc/config/i386/t-elf64
+--- gcc-4.9.2/gcc/config/i386/t-elf64 1969-12-31 16:00:00.000000000 -0800
++++ gcc-6.1.0/gcc/config/i386/t-elf64 2015-06-17 11:20:08.032513005 -0700
+@@ -0,0 +1,38 @@
++# Copyright (C) 2002-2014 Free Software Foundation, Inc.
++#
++# This file is part of GCC.
++#
++# GCC is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3, or (at your option)
++# any later version.
++#
++# GCC is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3. If not see
++# <http://www.gnu.org/licenses/ >.
++
++# On Debian, Ubuntu and other derivative distributions, the 32bit libraries
++# are found in /lib32 and /usr/lib32, /lib64 and /usr/lib64 are symlinks to
++# /lib and /usr/lib, while other distributions install libraries into /lib64
++# and /usr/lib64. The LSB does not enforce the use of /lib64 and /usr/lib64,
++# it doesn't tell anything about the 32bit libraries on those systems. Set
++# MULTILIB_OSDIRNAMES according to what is found on the target.
++
++# To support i386, x86-64 and x32 libraries, the directory structrue
++# should be:
++#
++# /lib has i386 libraries.
++# /lib64 has x86-64 libraries.
++# /libx32 has x32 libraries.
++#
++comma=,
++MULTILIB_OPTIONS = $(subst $(comma),/,$(TM_MULTILIB_CONFIG))
++MULTILIB_DIRNAMES = $(patsubst m%, %, $(subst /, ,$(MULTILIB_OPTIONS)))
++MULTILIB_OSDIRNAMES = m64=../lib64$(call if_multiarch,:x86_64-elf)
++MULTILIB_OSDIRNAMES+= m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:i386-elf)
++MULTILIB_OSDIRNAMES+= mx32=../libx32$(call if_multiarch,:x86_64-elf-x32)
+diff -urN gcc-4.9.2/gcc/config.gcc gcc-4.9.2/gcc/config.gcc
+--- gcc-4.9.2/gcc/config.gcc 2015-06-17 11:20:57.841008182 -0700
++++ gcc-6.1.0/gcc/config.gcc 2015-06-17 11:17:24.818890200 -0700
+@@ -1353,6 +1353,30 @@
+ ;;
+ x86_64-*-elf*)
+ tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h"
++ tmake_file="${tmake_file} i386/t-elf64"
++ x86_multilibs="${with_multilib_list}"
++ if test "$x86_multilibs" = "default"; then
++ case ${with_abi} in
++ x32 | mx32)
++ x86_multilibs="mx32"
++ ;;
++ *)
++ x86_multilibs="m64,m32"
++ ;;
++ esac
++ fi
++ x86_multilibs=`echo $x86_multilibs | sed -e 's/,/ /g'`
++ for x86_multilib in ${x86_multilibs}; do
++ case ${x86_multilib} in
++ m32 | m64 | mx32)
++ TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG},${x86_multilib}"
++ ;;
++ *)
++ echo "--with-multilib-list=${x86_with_multilib} not supported."
++ exit 1
++ esac
++ done
++ TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'`
+ ;;
+ i[34567]86-*-rdos*)
+ tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/rdos.h"
+--- gcc-6.1.0/gcc/config/i386/x86-64.h.orig 2015-08-20 17:17:34.555919593 +0200
++++ gcc-6.1.0/gcc/config/i386/x86-64.h 2015-08-20 17:17:42.615908670 +0200
+@@ -49,7 +49,7 @@
+ #define WCHAR_TYPE_SIZE 32
+
+ #undef ASM_SPEC
+-#define ASM_SPEC "%{m32:--32} %{m64:--64} %{mx32:--x32}"
++#define ASM_SPEC "%{m16|m32:--32} %{m64:--64} %{mx32:--x32}"
+
+ #undef ASM_OUTPUT_ALIGNED_BSS
+ #define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
diff --git a/util/crossgcc/patches/gcc-6.2.0_gnat.patch b/util/crossgcc/patches/gcc-6.2.0_gnat.patch
new file mode 100644
index 0000000..ac1e26a
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.2.0_gnat.patch
@@ -0,0 +1,11 @@
+--- gcc-6.1.0/gcc/ada/gcc-interface/Make-lang.in.bak 2015-08-24 16:23:25.004493665 +0200
++++ gcc-6.1.0/gcc/ada/gcc-interface/Make-lang.in 2015-08-24 17:53:52.496636113 +0200
+@@ -45,7 +45,7 @@
+
+
+ # Extra flags to pass to recursive makes.
+-COMMON_ADAFLAGS= -gnatpg
++COMMON_ADAFLAGS= -gnatpg -gnatwG
+ ifeq ($(TREECHECKING),)
+ CHECKING_ADAFLAGS=
+ else
diff --git a/util/crossgcc/patches/gcc-6.2.0_libgcc.patch b/util/crossgcc/patches/gcc-6.2.0_libgcc.patch
new file mode 100644
index 0000000..1b0b8a4
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.2.0_libgcc.patch
@@ -0,0 +1,57 @@
+diff -urN gcc-5.2.0.orig/libgcc/config/t-hardfp gcc-5.2.0/libgcc/config/t-hardfp
+--- gcc-5.2.0.orig/libgcc/config/t-hardfp 2015-01-05 04:33:28.000000000 -0800
++++ gcc-6.1.0/libgcc/config/t-hardfp 2016-04-06 12:04:51.000000000 -0700
+@@ -59,21 +59,52 @@
+
+ hardfp_func_list := $(filter-out $(hardfp_exclusions),$(hardfp_func_list))
+
++HOST_OS ?= $(shell uname)
++
+ # Regexp for matching a floating-point mode.
++ifeq ($(HOST_OS), Darwin)
++hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /|/g')
++else
++ifeq ($(HOST_OS), FreeBSD)
++hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /|/g')
++else
+ hardfp_mode_regexp := $(shell echo $(hardfp_float_modes) | sed 's/ /\\|/g')
++endif
++endif
+
+ # Regexp for matching the end of a function name, after the last
+ # floating-point mode.
++ifeq ($(HOST_OS), Darwin)
++hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /|/g')
++else
++ifeq ($(HOST_OS), FreeBSD)
++hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /|/g')
++else
+ hardfp_suffix_regexp := $(shell echo $(hardfp_int_modes) 2 3 | sed 's/ /\\|/g')
++endif
++endif
+
+ # Add -D options to define:
+ # FUNC: the function name (e.g. __addsf3)
+ # OP: the function name without the leading __ and with the last
+ # floating-point mode removed (e.g. add3)
+ # TYPE: the last floating-point mode (e.g. sf)
++
++ifeq ($(HOST_OS), Darwin)
+ hardfp_defines_for = \
+ $(shell echo $1 | \
+- sed 's/\(.*\)\($(hardfp_mode_regexp)\)\($(hardfp_suffix_regexp)\|\)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
++ sed -E 's/(.*)($(hardfp_mode_regexp))($(hardfp_suffix_regexp)|.*)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
++else
++ifeq ($(HOST_OS), FreeBSD)
++hardfp_defines_for = \
++ $(shell echo $1 | \
++ sed -r 's/(.*)($(hardfp_mode_regexp))($(hardfp_suffix_regexp)|.*)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
++else
++hardfp_defines_for = \
++ $(shell echo $1 | \
++ sed 's/\(.*\)\($(hardfp_mode_regexp)\)\($(hardfp_suffix_regexp)\|\)$$/-DFUNC=__& -DOP_\1\3 -DTYPE=\2/')
++endif
++endif
+
+ hardfp-o = $(patsubst %,%$(objext),$(hardfp_func_list))
+ $(hardfp-o): %$(objext): $(srcdir)/config/hardfp.c
diff --git a/util/crossgcc/patches/gcc-6.2.0_nds32.patch b/util/crossgcc/patches/gcc-6.2.0_nds32.patch
new file mode 100644
index 0000000..cdfb02f
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.2.0_nds32.patch
@@ -0,0 +1,17 @@
+diff -urN gcc-6.1.0.orig/gcc/config/nds32/nds32.md gcc-6.1.0/gcc/config/nds32/nds32.md
+--- gcc-6.1.0.orig/gcc/config/nds32/nds32.md 2015-01-15 22:45:09.000000000 -0800
++++ gcc-6.1.0/gcc/config/nds32/nds32.md 2016-04-14 22:09:09.000000000 -0700
+@@ -2289,11 +2289,11 @@
+ emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2],
+ operands[4]));
+
+- operands[5] = gen_reg_rtx (SImode);
++ rtx tmp = gen_reg_rtx (SImode);
+ /* Step C, D, E, and F, using another temporary register operands[5]. */
+ emit_jump_insn (gen_casesi_internal (operands[0],
+ operands[3],
+- operands[5]));
++ tmp));
+ DONE;
+ })
+
diff --git a/util/crossgcc/patches/gcc-6.2.0_riscv.patch b/util/crossgcc/patches/gcc-6.2.0_riscv.patch
new file mode 100644
index 0000000..b0e44b0
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.2.0_riscv.patch
@@ -0,0 +1,10428 @@
+diff --git original-gcc/gcc/common/config/riscv/riscv-common.c gcc-6.2.0/gcc/common/config/riscv/riscv-common.c
+new file mode 100644
+index 0000000..bb3b5c8
+--- /dev/null
++++ gcc-6.2.0/gcc/common/config/riscv/riscv-common.c
+@@ -0,0 +1,172 @@
++/* Common hooks for RISC-V.
++ Copyright (C) 1989-2014 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "common/common-target.h"
++#include "common/common-target-def.h"
++#include "opts.h"
++#include "flags.h"
++#include "errors.h"
++
++/* Parse a RISC-V ISA string into an option mask. */
++
++static void
++riscv_parse_arch_string (const char *isa, int *flags)
++{
++ const char *p = isa;
++
++ if (strncasecmp (p, "RV32", 4) == 0)
++ *flags |= MASK_32BIT, p += 4;
++ else if (strncasecmp (p, "RV64", 4) == 0)
++ *flags &= ~MASK_32BIT, p += 4;
++ else if (strncasecmp (p, "RV", 2) == 0)
++ p += 2;
++
++ if (TOUPPER (*p) == 'G')
++ {
++ p++;
++
++ *flags |= MASK_MUL | MASK_DIV;
++ *flags |= MASK_ATOMIC;
++ *flags |= MASK_HARD_FLOAT;
++ *flags |= MASK_DOUBLE_FLOAT;
++ }
++ else if (TOUPPER (*p) == 'I')
++ {
++ p++;
++
++ *flags &= ~(MASK_MUL | MASK_DIV);
++ if (TOUPPER (*p) == 'M')
++ *flags |= (MASK_MUL | MASK_DIV), p++;
++
++ *flags &= ~MASK_ATOMIC;
++ if (TOUPPER (*p) == 'A')
++ *flags |= MASK_ATOMIC, p++;
++
++ *flags &= ~MASK_HARD_FLOAT;
++ if (TOUPPER (*p) == 'F')
++ {
++ *flags |= MASK_HARD_FLOAT, p++;
++
++ *flags &= ~MASK_DOUBLE_FLOAT;
++ if (TOUPPER (*p) == 'D')
++ {
++ *flags |= MASK_DOUBLE_FLOAT;
++ p++;
++ }
++ }
++ }
++ else
++ {
++ error ("-march=%s: invalid ISA string", isa);
++ return;
++ }
++
++ *flags &= ~MASK_RVC;
++ if (TOUPPER (*p) == 'C')
++ *flags |= MASK_RVC, p++;
++
++ /* FIXME: For now we just stop parsing when faced with a
++ non-standard RISC-V ISA extension. We might consider
++ ignoring it and passing it through to the assembler. */
++ if (TOUPPER (*p) == 'X')
++ return;
++
++ if (*p)
++ {
++ error ("-march=%s: unsupported ISA substring %s", isa, p);
++ return;
++ }
++}
++
++static int
++riscv_flags_from_arch_string (const char *isa)
++{
++ int flags = 0;
++ riscv_parse_arch_string (isa, &flags);
++ return flags;
++}
++
++/* Implement TARGET_HANDLE_OPTION. */
++
++static bool
++riscv_handle_option (struct gcc_options *opts,
++ struct gcc_options *opts_set ATTRIBUTE_UNUSED,
++ const struct cl_decoded_option *decoded,
++ location_t loc ATTRIBUTE_UNUSED)
++{
++ switch (decoded->opt_index)
++ {
++ case OPT_march_:
++ riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
++ return true;
++
++ case OPT_mmuldiv:
++ if (decoded->value)
++ opts->x_target_flags |= (MASK_MUL | MASK_DIV);
++ else
++ opts->x_target_flags &= ~(MASK_MUL | MASK_DIV);
++ return true;
++
++ case OPT_mno_float:
++ opts->x_target_flags &= ~(MASK_HARD_FLOAT | MASK_DOUBLE_FLOAT);
++ return true;
++
++ case OPT_msingle_float:
++ /* In addition to enabling the F extension, disable the D extension. */
++ opts->x_target_flags &= ~MASK_DOUBLE_FLOAT;
++ return true;
++
++ case OPT_mdouble_float:
++ opts->x_target_flags |= MASK_HARD_FLOAT;
++ return true;
++
++ default:
++ return true;
++ }
++}
++
++/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
++static const struct default_options riscv_option_optimization_table[] =
++ {
++ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
++ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
++ { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 },
++ { OPT_LEVELS_NONE, 0, NULL, 0 }
++ };
++
++#undef TARGET_OPTION_OPTIMIZATION_TABLE
++#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
++
++#define STR(x) #x
++#define XSTR(x) STR (x)
++
++#undef TARGET_DEFAULT_TARGET_FLAGS
++#define TARGET_DEFAULT_TARGET_FLAGS \
++ (TARGET_DEFAULT \
++ | riscv_flags_from_arch_string (XSTR (TARGET_ARCH_STRING_DEFAULT)) \
++ | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT))
++
++#undef TARGET_HANDLE_OPTION
++#define TARGET_HANDLE_OPTION riscv_handle_option
++
++struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
+diff --git original-gcc/gcc/config.gcc gcc-6.2.0/gcc/config.gcc
+index 82cc9a9..b797239 100644
+--- original-gcc/gcc/config.gcc
++++ gcc-6.2.0/gcc/config.gcc
+@@ -453,6 +453,9 @@ powerpc*-*-*)
+ esac
+ extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
+ ;;
++riscv*)
++ cpu_type=riscv
++ ;;
+ rs6000*-*-*)
+ extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
+ ;;
+@@ -2028,6 +2031,20 @@ microblaze*-*-elf)
+ cxx_target_objs="${cxx_target_objs} microblaze-c.o"
+ tmake_file="${tmake_file} microblaze/t-microblaze"
+ ;;
++riscv*-*-linux*)
++ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h"
++ tmake_file="${tmake_file} riscv/t-linux"
++ gnu_ld=yes
++ gas=yes
++ gcc_cv_initfini_array=yes
++ ;;
++riscv*-*-elf*)
++ tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
++ tmake_file="${tmake_file} riscv/t-elf"
++ gnu_ld=yes
++ gas=yes
++ gcc_cv_initfini_array=yes
++ ;;
+ mips*-*-netbsd*) # NetBSD/mips, either endian.
+ target_cpu_default="MASK_ABICALLS"
+ tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
+@@ -3959,6 +3976,81 @@ case "${target}" in
+ done
+ ;;
+
++ riscv*-*-*)
++ supported_defaults="arch float tune"
++
++ case "${with_arch}" in
++ "")
++ with_arch="G"
++ ;;
++ *)
++ ;;
++ esac
++
++ # Handle --with-float, or default to soft-float ABI unless the
++ # D extension is present; then, default to double-float ABI.
++ case "${with_float}" in
++ "")
++ case ${with_arch} in
++ "" | *g* | *G* | *d* | *D*)
++ with_float=double
++ ;;
++ *)
++ with_float=soft
++ ;;
++ esac
++ ;;
++ soft | single | double)
++ # OK
++ ;;
++ *)
++ echo "Unknown floating point type used in --with-float-abi=$with_float" 1>&2
++ exit 1
++ ;;
++ esac
++
++ # Set TARGET_64BIT_DEFAULT from --target.
++ case "${target}" in
++ riscv32*)
++ rv64=0
++ ;;
++ riscv64*)
++ rv64=1
++ ;;
++ *)
++ rv64=""
++ ;;
++ esac
++
++ # Or set TARGET_64BIT_DEFAULT from --with-arch.
++ case "`echo $with_arch | tr A-Z_ a-z-`" in
++ rv32)
++ if test "$rv64" = 1; then
++ echo "--with-arch and --target specify conflicting XLEN"
++ exit 1
++ fi
++ rv64=0
++ ;;
++ rv64)
++ if test "$rv64" = 0; then
++ echo "--with-arch and --target specify conflicting XLEN"
++ exit 1
++ fi
++ rv64=1
++ ;;
++ *)
++ ;;
++ esac
++
++ # Or set TARGET_64BIT_DEFAULT to 1.
++ if test "$rv64" = ""; then
++ rv64=1
++ fi
++
++ tm_defines="${tm_defines} TARGET_64BIT_DEFAULT=${rv64}"
++ tm_defines="${tm_defines} TARGET_ARCH_STRING_DEFAULT=${with_arch}"
++ ;;
++
+ mips*-*-*)
+ supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci"
+
+diff --git original-gcc/gcc/config/riscv/constraints.md gcc-6.2.0/gcc/config/riscv/constraints.md
+new file mode 100644
+index 0000000..19dbbd7
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/constraints.md
+@@ -0,0 +1,93 @@
++;; Constraint definitions for RISC-V target.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/ >.
++
++;; Register constraints
++
++(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
++ "A floating-point register (if available).")
++
++(define_register_constraint "b" "ALL_REGS"
++ "@internal")
++
++(define_register_constraint "j" "T_REGS"
++ "@internal")
++
++(define_register_constraint "l" "JALR_REGS"
++ "@internal")
++
++;; Integer constraints
++
++(define_constraint "Z"
++ "@internal"
++ (and (match_code "const_int")
++ (match_test "1")))
++
++(define_constraint "I"
++ "An I-type 12-bit signed immediate."
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (ival)")))
++
++(define_constraint "J"
++ "Integer zero."
++ (and (match_code "const_int")
++ (match_test "ival == 0")))
++
++;; Floating-point constraints
++
++(define_constraint "G"
++ "Floating-point zero."
++ (and (match_code "const_double")
++ (match_test "op == CONST0_RTX (mode)")))
++
++;; General constraints
++
++(define_constraint "Q"
++ "@internal"
++ (match_operand 0 "const_arith_operand"))
++
++(define_memory_constraint "A"
++ "An address that is held in a general-purpose register."
++ (and (match_code "mem")
++ (match_test "GET_CODE(XEXP(op,0)) == REG")))
++
++(define_constraint "S"
++ "@internal
++ A constant call address."
++ (and (match_operand 0 "call_insn_operand")
++ (match_test "CONSTANT_P (op)")))
++
++(define_constraint "T"
++ "@internal
++ A constant @code{move_operand}."
++ (and (match_operand 0 "move_operand")
++ (match_test "CONSTANT_P (op)")))
++
++(define_memory_constraint "W"
++ "@internal
++ A memory address based on a member of @code{BASE_REG_CLASS}."
++ (and (match_code "mem")
++ (match_operand 0 "memory_operand")))
++
++(define_constraint "YG"
++ "@internal
++ A vector zero."
++ (and (match_code "const_vector")
++ (match_test "op == CONST0_RTX (mode)")))
+diff --git original-gcc/gcc/config/riscv/elf.h gcc-6.2.0/gcc/config/riscv/elf.h
+new file mode 100644
+index 0000000..491ec8b
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/elf.h
+@@ -0,0 +1,31 @@
++/* Target macros for riscv*-elf targets.
++ Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010
++ Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++/* Leave the linker script to choose the appropriate libraries. */
++#undef LIB_SPEC
++#define LIB_SPEC ""
++
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
++
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC "crtend%O%s"
++
++#define NO_IMPLICIT_EXTERN_C 1
+diff --git original-gcc/gcc/config/riscv/generic.md gcc-6.2.0/gcc/config/riscv/generic.md
+new file mode 100644
+index 0000000..b2b0a42
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/generic.md
+@@ -0,0 +1,78 @@
++;; Generic DFA-based pipeline description for RISC-V targets.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published
++;; by the Free Software Foundation; either version 3, or (at your
++;; option) any later version.
++
++;; GCC is distributed in the hope that it will be useful, but WITHOUT
++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++;; License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/ >.
++
++
++(define_automaton "pipe0")
++(define_cpu_unit "alu" "pipe0")
++(define_cpu_unit "imuldiv" "pipe0")
++(define_cpu_unit "fdivsqrt" "pipe0")
++
++(define_insn_reservation "generic_alu" 1
++ (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
++ "alu")
++
++(define_insn_reservation "generic_load" 3
++ (eq_attr "type" "load,fpload")
++ "alu")
++
++(define_insn_reservation "generic_store" 1
++ (eq_attr "type" "store,fpstore")
++ "alu")
++
++(define_insn_reservation "generic_xfer" 3
++ (eq_attr "type" "mfc,mtc,fcvt,fmove,fcmp")
++ "alu")
++
++(define_insn_reservation "generic_branch" 1
++ (eq_attr "type" "branch,jump,call")
++ "alu")
++
++(define_insn_reservation "generic_imul" 10
++ (eq_attr "type" "imul")
++ "imuldiv*10")
++
++(define_insn_reservation "generic_idivsi" 34
++ (and (eq_attr "type" "idiv")
++ (eq_attr "mode" "SI"))
++ "imuldiv*34")
++
++(define_insn_reservation "generic_idivdi" 66
++ (and (eq_attr "type" "idiv")
++ (eq_attr "mode" "DI"))
++ "imuldiv*66")
++
++(define_insn_reservation "generic_fmul_single" 5
++ (and (eq_attr "type" "fadd,fmul,fmadd")
++ (eq_attr "mode" "SF"))
++ "alu")
++
++(define_insn_reservation "generic_fmul_double" 7
++ (and (eq_attr "type" "fadd,fmul,fmadd")
++ (eq_attr "mode" "DF"))
++ "alu")
++
++(define_insn_reservation "generic_fdiv" 20
++ (eq_attr "type" "fdiv")
++ "fdivsqrt*20")
++
++(define_insn_reservation "generic_fsqrt" 25
++ (eq_attr "type" "fsqrt")
++ "fdivsqrt*25")
+diff --git original-gcc/gcc/config/riscv/linux.h gcc-6.2.0/gcc/config/riscv/linux.h
+new file mode 100644
+index 0000000..4231212
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/linux.h
+@@ -0,0 +1,63 @@
++/* Definitions for RISC-V GNU/Linux systems with ELF format.
++ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
++ 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++#undef WCHAR_TYPE
++#define WCHAR_TYPE "int"
++
++#undef WCHAR_TYPE_SIZE
++#define WCHAR_TYPE_SIZE 32
++
++#define TARGET_OS_CPP_BUILTINS() \
++ do { \
++ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
++ /* The GNU C++ standard library requires this. */ \
++ if (c_dialect_cxx ()) \
++ builtin_define ("_GNU_SOURCE"); \
++ } while (0)
++
++#undef SUBTARGET_CPP_SPEC
++#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
++
++#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1"
++#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1"
++
++#undef LINK_SPEC
++#define LINK_SPEC "\
++%{shared} \
++ %{!shared: \
++ %{!static: \
++ %{rdynamic:-export-dynamic} \
++ %{" OPT_ARCH64 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
++ %{" OPT_ARCH32 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
++ %{static:-static}} \
++%{" OPT_ARCH64 ":-melf64lriscv} \
++%{" OPT_ARCH32 ":-melf32lriscv}"
++
++#undef LIB_SPEC
++#define LIB_SPEC "\
++%{pthread:-lpthread} \
++%{shared:-lc} \
++%{!shared: \
++ %{profile:-lc_p} %{!profile:-lc}}"
++
++/* Similar to standard Linux, but adding -ffast-math support. */
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC \
++ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
+diff --git original-gcc/gcc/config/riscv/peephole.md gcc-6.2.0/gcc/config/riscv/peephole.md
+new file mode 100644
+index 0000000..898cbbd
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/peephole.md
+@@ -0,0 +1,121 @@
++;;........................
++;; DI -> SI optimizations
++;;........................
++
++;; Simplify (int)(a + 1), etc.
++(define_peephole2
++ [(set (match_operand:DI 0 "register_operand")
++ (match_operator:DI 4 "modular_operator"
++ [(match_operand:DI 1 "register_operand")
++ (match_operand:DI 2 "arith_operand")]))
++ (set (match_operand:SI 3 "register_operand")
++ (truncate:SI (match_dup 0)))]
++ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
++ && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
++ [(set (match_dup 3)
++ (truncate:SI
++ (match_op_dup:DI 4
++ [(match_operand:DI 1 "register_operand")
++ (match_operand:DI 2 "arith_operand")])))])
++
++;; Simplify (int)a + 1, etc.
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand")
++ (truncate:SI (match_operand:DI 1 "register_operand")))
++ (set (match_operand:SI 3 "register_operand")
++ (match_operator:SI 4 "modular_operator"
++ [(match_dup 0)
++ (match_operand:SI 2 "arith_operand")]))]
++ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
++ [(set (match_dup 3)
++ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
++
++;; Simplify -(int)a, etc.
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand")
++ (truncate:SI (match_operand:DI 2 "register_operand")))
++ (set (match_operand:SI 3 "register_operand")
++ (match_operator:SI 4 "modular_operator"
++ [(match_operand:SI 1 "reg_or_0_operand")
++ (match_dup 0)]))]
++ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
++ [(set (match_dup 3)
++ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
++
++;; Simplify (unsigned long)(unsigned int)a << const
++(define_peephole2
++ [(set (match_operand:DI 0 "register_operand")
++ (ashift:DI (match_operand:DI 1 "register_operand")
++ (match_operand 2 "const_int_operand")))
++ (set (match_operand:DI 3 "register_operand")
++ (lshiftrt:DI (match_dup 0) (match_dup 2)))
++ (set (match_operand:DI 4 "register_operand")
++ (ashift:DI (match_dup 3) (match_operand 5 "const_int_operand")))]
++ "TARGET_64BIT
++ && INTVAL (operands[5]) < INTVAL (operands[2])
++ && (REGNO (operands[3]) == REGNO (operands[4])
++ || peep2_reg_dead_p (3, operands[3]))"
++ [(set (match_dup 0)
++ (ashift:DI (match_dup 1) (match_dup 2)))
++ (set (match_dup 4)
++ (lshiftrt:DI (match_dup 0) (match_operand 5)))]
++{
++ operands[5] = GEN_INT (INTVAL (operands[2]) - INTVAL (operands[5]));
++})
++
++;; Simplify PIC loads to static variables.
++;; These will go away once we figure out how to emit auipc discretely.
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYI 0 "register_operand" "=r")
++ (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
++ "USE_LOAD_ADDRESS_MACRO (operands[1])"
++ "<load>\t%0,%1"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
++ (clobber (match_scratch:DI 2 "=&r"))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
++ "<load>\t%0,%1,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
++ (clobber (match_scratch:SI 2 "=&r"))]
++ "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
++ "<load>\t%0,%1,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_loadu<mode>"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
++ (zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
++ "USE_LOAD_ADDRESS_MACRO (operands[1])"
++ "<load>u\t%0,%1"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storedi<mode>"
++ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
++ (clobber (match_scratch:DI 2 "=&r"))]
++ "TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++ "<store>\t%z1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storesi<mode>"
++ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
++ (clobber (match_scratch:SI 2 "=&r"))]
++ "!TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++ "<store>\t%z1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storedi<mode>"
++ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYF 1 "register_operand" "f"))
++ (clobber (match_scratch:DI 2 "=&r"))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++ "<store>\t%1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storesi<mode>"
++ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYF 1 "register_operand" "f"))
++ (clobber (match_scratch:SI 2 "=&r"))]
++ "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++ "<store>\t%1,%0,%2"
++ [(set (attr "length") (const_int 8))])
+diff --git original-gcc/gcc/config/riscv/predicates.md gcc-6.2.0/gcc/config/riscv/predicates.md
+new file mode 100644
+index 0000000..0ed8a4a
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/predicates.md
+@@ -0,0 +1,186 @@
++;; Predicate description for RISC-V target.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/ >.
++
++(define_predicate "const_arith_operand"
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (INTVAL (op))")))
++
++(define_predicate "arith_operand"
++ (ior (match_operand 0 "const_arith_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "sle_operand"
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
++
++(define_predicate "sleu_operand"
++ (and (match_operand 0 "sle_operand")
++ (match_test "INTVAL (op) + 1 != 0")))
++
++(define_predicate "const_0_operand"
++ (and (match_code "const_int,const_double,const_vector")
++ (match_test "op == CONST0_RTX (GET_MODE (op))")))
++
++(define_predicate "reg_or_0_operand"
++ (ior (match_operand 0 "const_0_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "const_1_operand"
++ (and (match_code "const_int,const_double,const_vector")
++ (match_test "op == CONST1_RTX (GET_MODE (op))")))
++
++(define_predicate "reg_or_1_operand"
++ (ior (match_operand 0 "const_1_operand")
++ (match_operand 0 "register_operand")))
++
++;; Only use branch-on-bit sequences when the mask is not an ANDI immediate.
++(define_predicate "branch_on_bit_operand"
++ (and (match_code "const_int")
++ (match_test "INTVAL (op) >= IMM_BITS - 1")))
++
++;; This is used for indexing into vectors, and hence only accepts const_int.
++(define_predicate "const_0_or_1_operand"
++ (and (match_code "const_int")
++ (ior (match_test "op == CONST0_RTX (GET_MODE (op))")
++ (match_test "op == CONST1_RTX (GET_MODE (op))"))))
++
++(define_special_predicate "pc_or_label_operand"
++ (match_code "pc,label_ref"))
++
++;; A legitimate CONST_INT operand that takes more than one instruction
++;; to load.
++(define_predicate "splittable_const_int_operand"
++ (match_code "const_int")
++{
++ /* Don't handle multi-word moves this way; we don't want to introduce
++ the individual word-mode moves until after reload. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++ return false;
++
++ /* Otherwise check whether the constant can be loaded in a single
++ instruction. */
++ return !LUI_OPERAND (INTVAL (op)) && !SMALL_OPERAND (INTVAL (op));
++})
++
++(define_predicate "move_operand"
++ (match_operand 0 "general_operand")
++{
++ enum riscv_symbol_type symbol_type;
++
++ /* The thinking here is as follows:
++
++ (1) The move expanders should split complex load sequences into
++ individual instructions. Those individual instructions can
++ then be optimized by all rtl passes.
++
++ (2) The target of pre-reload load sequences should not be used
++ to store temporary results. If the target register is only
++ assigned one value, reload can rematerialize that value
++ on demand, rather than spill it to the stack.
++
++ (3) If we allowed pre-reload passes like combine and cse to recreate
++ complex load sequences, we would want to be able to split the
++ sequences before reload as well, so that the pre-reload scheduler
++ can see the individual instructions. This falls foul of (2);
++ the splitter would be forced to reuse the target register for
++ intermediate results.
++
++ (4) We want to define complex load splitters for combine. These
++ splitters can request a temporary scratch register, which avoids
++ the problem in (2). They allow things like:
++
++ (set (reg T1) (high SYM))
++ (set (reg T2) (low (reg T1) SYM))
++ (set (reg X) (plus (reg T2) (const_int OFFSET)))
++
++ to be combined into:
++
++ (set (reg T3) (high SYM+OFFSET))
++ (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
++
++ if T2 is only used this once. */
++ switch (GET_CODE (op))
++ {
++ case CONST_INT:
++ return !splittable_const_int_operand (op, mode);
++
++ case CONST:
++ case SYMBOL_REF:
++ case LABEL_REF:
++ return riscv_symbolic_constant_p (op, &symbol_type)
++ && !riscv_split_symbol_type (symbol_type);
++
++ case HIGH:
++ op = XEXP (op, 0);
++ return riscv_symbolic_constant_p (op, &symbol_type)
++ && riscv_split_symbol_type (symbol_type)
++ && symbol_type != SYMBOL_PCREL;
++
++ default:
++ return true;
++ }
++})
++
++(define_predicate "consttable_operand"
++ (match_test "CONSTANT_P (op)"))
++
++(define_predicate "symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return riscv_symbolic_constant_p (op, &type);
++})
++
++(define_predicate "absolute_symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return (riscv_symbolic_constant_p (op, &type)
++ && (type == SYMBOL_ABSOLUTE || type == SYMBOL_PCREL));
++})
++
++(define_predicate "plt_symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return (riscv_symbolic_constant_p (op, &type)
++ && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
++})
++
++(define_predicate "call_insn_operand"
++ (ior (match_operand 0 "absolute_symbolic_operand")
++ (match_operand 0 "plt_symbolic_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "symbol_ref_operand"
++ (match_code "symbol_ref"))
++
++(define_predicate "modular_operator"
++ (match_code "plus,minus,mult,ashift"))
++
++(define_predicate "equality_operator"
++ (match_code "eq,ne"))
++
++(define_predicate "order_operator"
++ (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
++
++(define_predicate "fp_order_operator"
++ (match_code "eq,ne,lt,le,gt,ge"))
+diff --git original-gcc/gcc/config/riscv/riscv-ftypes.def gcc-6.2.0/gcc/config/riscv/riscv-ftypes.def
+new file mode 100644
+index 0000000..96a38f1
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv-ftypes.def
+@@ -0,0 +1,39 @@
++/* Definitions of prototypes for RISC-V built-in functions.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
++ MIPS built-in functions, where:
++
++ NARGS is the number of arguments.
++ LIST contains the return-type code followed by the codes for each
++ argument type.
++
++ Argument- and return-type codes are either modes or one of the following:
++
++ VOID for void_type_node
++ INT for integer_type_node
++ POINTER for ptr_type_node
++
++ (we don't use PTR because that's a ANSI-compatibillity macro).
++
++ Please keep this list lexicographically sorted by the LIST argument. */
++
++DEF_RISCV_FTYPE (1, (VOID, VOID))
+diff --git original-gcc/gcc/config/riscv/riscv-modes.def gcc-6.2.0/gcc/config/riscv/riscv-modes.def
+new file mode 100644
+index 0000000..bb42344
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv-modes.def
+@@ -0,0 +1,26 @@
++/* Extra machine modes for RISC-V target.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++FLOAT_MODE (TF, 16, ieee_quad_format);
++
++/* Vector modes. */
++VECTOR_MODES (INT, 4); /* V8QI V4HI V2SI */
++VECTOR_MODES (FLOAT, 4); /* V4HF V2SF */
+diff --git original-gcc/gcc/config/riscv/riscv-opts.h gcc-6.2.0/gcc/config/riscv/riscv-opts.h
+new file mode 100644
+index 0000000..2636a46
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv-opts.h
+@@ -0,0 +1,31 @@
++/* Definition of RISC-V target for GNU compiler.
++ Copyright (C) 2016 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++#ifndef GCC_RISCV_OPTS_H
++#define GCC_RISCV_OPTS_H
++
++enum riscv_float_abi_type {
++ FLOAT_ABI_SOFT,
++ FLOAT_ABI_SINGLE,
++ FLOAT_ABI_DOUBLE
++};
++extern enum riscv_float_abi_type riscv_float_abi;
++
++#endif /* ! GCC_RISCV_OPTS_H */
+diff --git original-gcc/gcc/config/riscv/riscv-protos.h gcc-6.2.0/gcc/config/riscv/riscv-protos.h
+new file mode 100644
+index 0000000..ef2ddca
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv-protos.h
+@@ -0,0 +1,98 @@
++/* Definition of RISC-V target for GNU compiler.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++#ifndef GCC_RISCV_PROTOS_H
++#define GCC_RISCV_PROTOS_H
++
++enum riscv_symbol_type {
++ SYMBOL_ABSOLUTE,
++ SYMBOL_PCREL,
++ SYMBOL_GOT_DISP,
++ SYMBOL_TLS,
++ SYMBOL_TLS_LE,
++ SYMBOL_TLS_IE,
++ SYMBOL_TLS_GD
++};
++#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
++
++enum riscv_code_model {
++ CM_MEDLOW,
++ CM_MEDANY,
++ CM_PIC
++};
++extern enum riscv_code_model riscv_cmodel;
++
++extern enum riscv_symbol_type riscv_classify_symbolic_expression (rtx);
++extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
++extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
++extern bool riscv_hard_regno_mode_ok_p (unsigned int, enum machine_mode);
++extern int riscv_address_insns (rtx, enum machine_mode, bool);
++extern int riscv_const_insns (rtx);
++extern int riscv_split_const_insns (rtx);
++extern int riscv_load_store_insns (rtx, rtx_insn *);
++extern rtx riscv_emit_move (rtx, rtx);
++extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
++extern bool riscv_split_symbol_type (enum riscv_symbol_type);
++extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
++extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
++extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
++extern bool riscv_legitimize_vector_move (enum machine_mode, rtx, rtx);
++
++extern rtx riscv_subword (rtx, bool);
++extern bool riscv_split_64bit_move_p (rtx, rtx);
++extern void riscv_split_doubleword_move (rtx, rtx);
++extern const char *riscv_output_move (rtx, rtx);
++extern const char *riscv_output_gpr_save (unsigned);
++#ifdef RTX_CODE
++extern void riscv_expand_scc (rtx *);
++extern void riscv_expand_conditional_branch (rtx *);
++#endif
++extern rtx riscv_expand_call (bool, rtx, rtx, rtx);
++extern void riscv_expand_fcc_reload (rtx, rtx, rtx);
++extern void riscv_set_return_address (rtx, rtx);
++extern bool riscv_expand_block_move (rtx, rtx, rtx);
++extern void riscv_expand_synci_loop (rtx, rtx);
++
++extern bool riscv_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
++ HOST_WIDE_INT);
++extern bool riscv_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
++ HOST_WIDE_INT);
++extern void riscv_order_regs_for_local_alloc (void);
++
++extern rtx riscv_return_addr (int, rtx);
++extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
++extern void riscv_expand_prologue (void);
++extern void riscv_expand_epilogue (bool);
++extern bool riscv_can_use_return_insn (void);
++extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
++
++extern enum reg_class riscv_secondary_reload_class (enum reg_class,
++ enum machine_mode,
++ rtx, bool);
++extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
++
++extern void irix_asm_output_align (FILE *, unsigned);
++extern const char *current_section_name (void);
++extern unsigned int current_section_flags (void);
++
++extern void riscv_expand_vector_init (rtx, rtx);
++
++#endif /* ! GCC_RISCV_PROTOS_H */
+diff --git original-gcc/gcc/config/riscv/riscv.c gcc-6.2.0/gcc/config/riscv/riscv.c
+new file mode 100644
+index 0000000..03c27cc
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv.c
+@@ -0,0 +1,4427 @@
++/* Subroutines used for code generation for RISC-V.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "rtl.h"
++#include "regs.h"
++#include "hard-reg-set.h"
++#include "insn-config.h"
++#include "conditions.h"
++#include "insn-attr.h"
++#include "recog.h"
++#include "output.h"
++#include "hash-set.h"
++#include "machmode.h"
++#include "vec.h"
++#include "double-int.h"
++#include "input.h"
++#include "alias.h"
++#include "symtab.h"
++#include "wide-int.h"
++#include "inchash.h"
++#include "tree.h"
++#include "fold-const.h"
++#include "varasm.h"
++#include "stringpool.h"
++#include "stor-layout.h"
++#include "calls.h"
++#include "function.h"
++#include "hashtab.h"
++#include "flags.h"
++#include "statistics.h"
++#include "real.h"
++#include "fixed-value.h"
++#include "expmed.h"
++#include "dojump.h"
++#include "explow.h"
++#include "emit-rtl.h"
++#include "stmt.h"
++#include "expr.h"
++#include "insn-codes.h"
++#include "optabs.h"
++#include "libfuncs.h"
++#include "reload.h"
++#include "tm_p.h"
++#include "ggc.h"
++#include "gstab.h"
++#include "hash-table.h"
++#include "debug.h"
++#include "target.h"
++#include "target-def.h"
++#include "common/common-target.h"
++#include "langhooks.h"
++#include "dominance.h"
++#include "cfg.h"
++#include "cfgrtl.h"
++#include "cfganal.h"
++#include "lcm.h"
++#include "cfgbuild.h"
++#include "cfgcleanup.h"
++#include "predict.h"
++#include "basic-block.h"
++#include "bitmap.h"
++#include "regset.h"
++#include "df.h"
++#include "sched-int.h"
++#include "tree-ssa-alias.h"
++#include "internal-fn.h"
++#include "gimple-fold.h"
++#include "tree-eh.h"
++#include "gimple-expr.h"
++#include "is-a.h"
++#include "gimple.h"
++#include "gimplify.h"
++#include "diagnostic.h"
++#include "target-globals.h"
++#include "opts.h"
++#include "tree-pass.h"
++#include "context.h"
++#include "hash-map.h"
++#include "plugin-api.h"
++#include "ipa-ref.h"
++#include "cgraph.h"
++#include "builtins.h"
++#include "rtl-iter.h"
++#include <stdint.h>
++
++/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
++#define UNSPEC_ADDRESS_P(X) \
++ (GET_CODE (X) == UNSPEC \
++ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
++ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
++
++/* Extract the symbol or label from UNSPEC wrapper X. */
++#define UNSPEC_ADDRESS(X) \
++ XVECEXP (X, 0, 0)
++
++/* Extract the symbol type from UNSPEC wrapper X. */
++#define UNSPEC_ADDRESS_TYPE(X) \
++ ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
++
++/* The maximum distance between the top of the stack frame and the
++ value sp has when we save and restore registers. This is set by the
++ range of load/store offsets and must also preserve stack alignment. */
++#define RISCV_MAX_FIRST_STACK_STEP (IMM_REACH/2 - 16)
++
++/* True if INSN is a riscv.md pattern or asm statement. */
++#define USEFUL_INSN_P(INSN) \
++ (NONDEBUG_INSN_P (INSN) \
++ && GET_CODE (PATTERN (INSN)) != USE \
++ && GET_CODE (PATTERN (INSN)) != CLOBBER \
++ && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
++ && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
++
++/* True if bit BIT is set in VALUE. */
++#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
++
++/* Classifies an address.
++
++ ADDRESS_REG
++ A natural register + offset address. The register satisfies
++ riscv_valid_base_register_p and the offset is a const_arith_operand.
++
++ ADDRESS_LO_SUM
++ A LO_SUM rtx. The first operand is a valid base register and
++ the second operand is a symbolic address.
++
++ ADDRESS_CONST_INT
++ A signed 16-bit constant address.
++
++ ADDRESS_SYMBOLIC:
++ A constant symbolic address. */
++enum riscv_address_type {
++ ADDRESS_REG,
++ ADDRESS_LO_SUM,
++ ADDRESS_CONST_INT,
++ ADDRESS_SYMBOLIC
++};
++
++enum riscv_code_model riscv_cmodel = TARGET_DEFAULT_CMODEL;
++
++/* Macros to create an enumeration identifier for a function prototype. */
++#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
++#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
++#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
++#define RISCV_FTYPE_NAME4(A, B, C, D, E) RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
++
++/* Classifies the prototype of a built-in function. */
++enum riscv_function_type {
++#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
++#include "config/riscv/riscv-ftypes.def"
++#undef DEF_RISCV_FTYPE
++ RISCV_MAX_FTYPE_MAX
++};
++
++/* Specifies how a built-in function should be converted into rtl. */
++enum riscv_builtin_type {
++ /* The function corresponds directly to an .md pattern. The return
++ value is mapped to operand 0 and the arguments are mapped to
++ operands 1 and above. */
++ RISCV_BUILTIN_DIRECT,
++
++ /* The function corresponds directly to an .md pattern. There is no return
++ value and the arguments are mapped to operands 0 and above. */
++ RISCV_BUILTIN_DIRECT_NO_TARGET
++};
++
++/* Information about a function's frame layout. */
++struct GTY(()) riscv_frame_info {
++ /* The size of the frame in bytes. */
++ HOST_WIDE_INT total_size;
++
++ /* Bit X is set if the function saves or restores GPR X. */
++ unsigned int mask;
++
++ /* Likewise FPR X. */
++ unsigned int fmask;
++
++ /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
++ unsigned save_libcall_adjustment;
++
++ /* Offsets of fixed-point and floating-point save areas from frame bottom */
++ HOST_WIDE_INT gp_sp_offset;
++ HOST_WIDE_INT fp_sp_offset;
++
++ /* Offset of virtual frame pointer from stack pointer/frame bottom */
++ HOST_WIDE_INT frame_pointer_offset;
++
++ /* Offset of hard frame pointer from stack pointer/frame bottom */
++ HOST_WIDE_INT hard_frame_pointer_offset;
++
++ /* The offset of arg_pointer_rtx from the bottom of the frame. */
++ HOST_WIDE_INT arg_pointer_offset;
++};
++
++struct GTY(()) machine_function {
++ /* The number of extra stack bytes taken up by register varargs.
++ This area is allocated by the callee at the very top of the frame. */
++ int varargs_size;
++
++ /* Cached return value of leaf_function_p. <0 if false, >0 if true. */
++ int is_leaf;
++
++ /* The current frame information, calculated by riscv_compute_frame_info. */
++ struct riscv_frame_info frame;
++};
++
++/* Information about a single argument. */
++struct riscv_arg_info {
++ /* True if the argument is passed in a floating-point register, or
++ would have been if we hadn't run out of registers. */
++ bool fpr_p;
++
++ /* The number of words passed in registers, rounded up. */
++ unsigned int reg_words;
++
++ /* For EABI, the offset of the first register from GP_ARG_FIRST or
++ FP_ARG_FIRST. For other ABIs, the offset of the first register from
++ the start of the ABI's argument structure (see the CUMULATIVE_ARGS
++ comment for details).
++
++ The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
++ on the stack. */
++ unsigned int reg_offset;
++
++ /* The number of words that must be passed on the stack, rounded up. */
++ unsigned int stack_words;
++
++ /* The offset from the start of the stack overflow area of the argument's
++ first stack word. Only meaningful when STACK_WORDS is nonzero. */
++ unsigned int stack_offset;
++};
++
++/* Information about an address described by riscv_address_type.
++
++ ADDRESS_CONST_INT
++ No fields are used.
++
++ ADDRESS_REG
++ REG is the base register and OFFSET is the constant offset.
++
++ ADDRESS_LO_SUM
++ REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
++ is the type of symbol it references.
++
++ ADDRESS_SYMBOLIC
++ SYMBOL_TYPE is the type of symbol that the address references. */
++struct riscv_address_info {
++ enum riscv_address_type type;
++ rtx reg;
++ rtx offset;
++ enum riscv_symbol_type symbol_type;
++};
++
++/* One stage in a constant building sequence. These sequences have
++ the form:
++
++ A = VALUE[0]
++ A = A CODE[1] VALUE[1]
++ A = A CODE[2] VALUE[2]
++ ...
++
++ where A is an accumulator, each CODE[i] is a binary rtl operation
++ and each VALUE[i] is a constant integer. CODE[0] is undefined. */
++struct riscv_integer_op {
++ enum rtx_code code;
++ unsigned HOST_WIDE_INT value;
++};
++
++/* The largest number of operations needed to load an integer constant.
++ The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI,
++ but we may attempt and reject even worse sequences. */
++#define RISCV_MAX_INTEGER_OPS 32
++
++/* Costs of various operations on the different architectures. */
++
++struct riscv_tune_info
++{
++ unsigned short fp_add[2];
++ unsigned short fp_mul[2];
++ unsigned short fp_div[2];
++ unsigned short int_mul[2];
++ unsigned short int_div[2];
++ unsigned short issue_rate;
++ unsigned short branch_cost;
++ unsigned short memory_cost;
++};
++
++/* Information about one CPU we know about. */
++struct riscv_cpu_info {
++ /* This CPU's canonical name. */
++ const char *name;
++
++ /* The RISC-V ISA and extensions supported by this CPU. */
++ const char *isa;
++
++ /* Tuning parameters for this CPU. */
++ const struct riscv_tune_info *tune_info;
++};
++
++/* Global variables for machine-dependent things. */
++
++/* Which tuning parameters to use. */
++static const struct riscv_tune_info *tune_info;
++
++/* Index R is the smallest register class that contains register R. */
++const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, T_REGS, T_REGS, T_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ T_REGS, T_REGS, T_REGS, T_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FRAME_REGS, FRAME_REGS,
++};
++
++/* Costs to use when optimizing for size. */
++static const struct riscv_tune_info rocket_tune_info = {
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
++ {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
++ {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
++ 1, /* issue_rate */
++ 3, /* branch_cost */
++ 5 /* memory_cost */
++};
++
++/* Costs to use when optimizing for size. */
++static const struct riscv_tune_info optimize_size_tune_info = {
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
++ 1, /* issue_rate */
++ 1, /* branch_cost */
++ 1 /* memory_cost */
++};
++
++/* A table describing all the processors GCC knows about. */
++static const struct riscv_cpu_info riscv_cpu_info_table[] = {
++ /* Entries for generic ISAs. */
++ { "rocket", "IMAFD", &rocket_tune_info },
++};
++
++/* Return the riscv_cpu_info entry for the given name string. */
++
++static const struct riscv_cpu_info *
++riscv_parse_cpu (const char *cpu_string)
++{
++ unsigned int i;
++
++ for (i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
++ if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
++ return riscv_cpu_info_table + i;
++
++ error ("unknown cpu `%s' for -mtune", cpu_string);
++ return riscv_cpu_info_table;
++}
++
++/* Fill CODES with a sequence of rtl operations to load VALUE.
++ Return the number of operations needed. */
++
++static int
++riscv_build_integer_1 (struct riscv_integer_op *codes, HOST_WIDE_INT value,
++ enum machine_mode mode)
++{
++ HOST_WIDE_INT low_part = CONST_LOW_PART (value);
++ int cost = INT_MAX, alt_cost;
++ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
++
++ if (SMALL_OPERAND (value) || LUI_OPERAND (value))
++ {
++ /* Simply ADDI or LUI */
++ codes[0].code = UNKNOWN;
++ codes[0].value = value;
++ return 1;
++ }
++
++ /* End with ADDI */
++ if (low_part != 0
++ && !(mode == HImode && (int16_t)(value - low_part) != (value - low_part)))
++ {
++ cost = 1 + riscv_build_integer_1 (codes, value - low_part, mode);
++ codes[cost-1].code = PLUS;
++ codes[cost-1].value = low_part;
++ }
++
++ /* End with XORI */
++ if (cost > 2 && (low_part < 0 || mode == HImode))
++ {
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
++ alt_codes[alt_cost-1].code = XOR;
++ alt_codes[alt_cost-1].value = low_part;
++ if (alt_cost < cost)
++ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++ }
++
++ /* Eliminate trailing zeros and end with SLLI */
++ if (cost > 2 && (value & 1) == 0)
++ {
++ int shift = 0;
++ while ((value & 1) == 0)
++ shift++, value >>= 1;
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value, mode);
++ alt_codes[alt_cost-1].code = ASHIFT;
++ alt_codes[alt_cost-1].value = shift;
++ if (alt_cost < cost)
++ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++ }
++
++ gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
++ return cost;
++}
++
++static int
++riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
++ enum machine_mode mode)
++{
++ int cost = riscv_build_integer_1 (codes, value, mode);
++
++ /* Eliminate leading zeros and end with SRLI */
++ if (value > 0 && cost > 2)
++ {
++ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
++ int alt_cost, shift = 0;
++ HOST_WIDE_INT shifted_val;
++
++ /* Try filling trailing bits with 1s */
++ while ((value << shift) >= 0)
++ shift++;
++ shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
++ alt_codes[alt_cost-1].code = LSHIFTRT;
++ alt_codes[alt_cost-1].value = shift;
++ if (alt_cost < cost)
++ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++
++ /* Try filling trailing bits with 0s */
++ shifted_val = value << shift;
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
++ alt_codes[alt_cost-1].code = LSHIFTRT;
++ alt_codes[alt_cost-1].value = shift;
++ if (alt_cost < cost)
++ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++ }
++
++ return cost;
++}
++
++static int
++riscv_split_integer_cost (HOST_WIDE_INT val)
++{
++ int cost;
++ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++
++ cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
++ if (loval != hival)
++ cost += riscv_build_integer (codes, hival, VOIDmode);
++
++ return cost;
++}
++
++static int
++riscv_integer_cost (HOST_WIDE_INT val)
++{
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++ return MIN (riscv_build_integer (codes, val, VOIDmode),
++ riscv_split_integer_cost (val));
++}
++
++/* Try to split a 64b integer into 32b parts, then reassemble. */
++
++static rtx
++riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
++{
++ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
++ rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
++
++ riscv_move_integer (hi, hi, hival);
++ riscv_move_integer (lo, lo, loval);
++
++ hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
++ hi = force_reg (mode, hi);
++
++ return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
++}
++
++/* Return true if X is a thread-local symbol. */
++
++static bool
++riscv_tls_symbol_p (const_rtx x)
++{
++ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
++}
++
++static bool
++riscv_symbol_binds_local_p (const_rtx x)
++{
++ return (SYMBOL_REF_DECL (x)
++ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
++ : SYMBOL_REF_LOCAL_P (x));
++}
++
++/* Return the method that should be used to access SYMBOL_REF or
++ LABEL_REF X in context CONTEXT. */
++
++static enum riscv_symbol_type
++riscv_classify_symbol (const_rtx x)
++{
++ if (riscv_tls_symbol_p (x))
++ return SYMBOL_TLS;
++
++ switch (GET_CODE (x))
++ {
++ case LABEL_REF:
++ if (LABEL_REF_NONLOCAL_P (x))
++ return SYMBOL_GOT_DISP;
++ break;
++
++ case SYMBOL_REF:
++ if (flag_pic && !riscv_symbol_binds_local_p (x))
++ return SYMBOL_GOT_DISP;
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ return riscv_cmodel == CM_MEDLOW ? SYMBOL_ABSOLUTE : SYMBOL_PCREL;
++}
++
++/* Classify the base of symbolic expression X, given that X appears in
++ context CONTEXT. */
++
++enum riscv_symbol_type
++riscv_classify_symbolic_expression (rtx x)
++{
++ rtx offset;
++
++ split_const (x, &x, &offset);
++ if (UNSPEC_ADDRESS_P (x))
++ return UNSPEC_ADDRESS_TYPE (x);
++
++ return riscv_classify_symbol (x);
++}
++
++/* Return true if X is a symbolic constant that can be used in context
++ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
++
++bool
++riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
++{
++ rtx offset;
++
++ split_const (x, &x, &offset);
++ if (UNSPEC_ADDRESS_P (x))
++ {
++ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
++ x = UNSPEC_ADDRESS (x);
++ }
++ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
++ *symbol_type = riscv_classify_symbol (x);
++ else
++ return false;
++
++ if (offset == const0_rtx)
++ return true;
++
++ /* Check whether a nonzero offset is valid for the underlying
++ relocations. */
++ switch (*symbol_type)
++ {
++ case SYMBOL_ABSOLUTE:
++ case SYMBOL_PCREL:
++ case SYMBOL_TLS_LE:
++ return (int32_t) INTVAL (offset) == INTVAL (offset);
++
++ default:
++ return false;
++ }
++ gcc_unreachable ();
++}
++
++/* Returns the number of instructions necessary to reference a symbol. */
++
++static int riscv_symbol_insns (enum riscv_symbol_type type)
++{
++ switch (type)
++ {
++ case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
++ case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference itself */
++ case SYMBOL_PCREL: return 2; /* AUIPC + the reference itself */
++ case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference itself */
++ case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference itself */
++ default: gcc_unreachable();
++ }
++}
++
++/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
++
++static bool
++riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++ return riscv_const_insns (x) > 0;
++}
++
++/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
++
++static bool
++riscv_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++ enum riscv_symbol_type type;
++ rtx base, offset;
++
++ /* There is no assembler syntax for expressing an address-sized
++ high part. */
++ if (GET_CODE (x) == HIGH)
++ return true;
++
++ split_const (x, &base, &offset);
++ if (riscv_symbolic_constant_p (base, &type))
++ {
++ /* As an optimization, don't spill symbolic constants that are as
++ cheap to rematerialize as to access in the constant pool. */
++ if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
++ return true;
++
++ /* As an optimization, avoid needlessly generate dynamic relocations. */
++ if (flag_pic)
++ return true;
++ }
++
++ /* TLS symbols must be computed by riscv_legitimize_move. */
++ if (tls_referenced_p (x))
++ return true;
++
++ return false;
++}
++
++/* Return true if register REGNO is a valid base register for mode MODE.
++ STRICT_P is true if REG_OK_STRICT is in effect. */
++
++int
++riscv_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
++ bool strict_p)
++{
++ if (!HARD_REGISTER_NUM_P (regno))
++ {
++ if (!strict_p)
++ return true;
++ regno = reg_renumber[regno];
++ }
++
++ /* These fake registers will be eliminated to either the stack or
++ hard frame pointer, both of which are usually valid base registers.
++ Reload deals with the cases where the eliminated form isn't valid. */
++ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
++ return true;
++
++ return GP_REG_P (regno);
++}
++
++/* Return true if X is a valid base register for mode MODE.
++ STRICT_P is true if REG_OK_STRICT is in effect. */
++
++static bool
++riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
++{
++ if (!strict_p && GET_CODE (x) == SUBREG)
++ x = SUBREG_REG (x);
++
++ return (REG_P (x)
++ && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
++}
++
++/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
++ can address a value of mode MODE. */
++
++static bool
++riscv_valid_offset_p (rtx x, enum machine_mode mode)
++{
++ /* Check that X is a signed 12-bit number. */
++ if (!const_arith_operand (x, Pmode))
++ return false;
++
++ /* We may need to split multiword moves, so make sure that every word
++ is accessible. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
++ return false;
++
++ return true;
++}
++
++/* Should a symbol of type SYMBOL_TYPE should be split in two? */
++
++bool
++riscv_split_symbol_type (enum riscv_symbol_type symbol_type)
++{
++ if (symbol_type == SYMBOL_TLS_LE)
++ return true;
++
++ if (!TARGET_EXPLICIT_RELOCS)
++ return false;
++
++ return symbol_type == SYMBOL_ABSOLUTE || symbol_type == SYMBOL_PCREL;
++}
++
++/* Return true if a LO_SUM can address a value of mode MODE when the
++ LO_SUM symbol has type SYMBOL_TYPE. */
++
++static bool
++riscv_valid_lo_sum_p (enum riscv_symbol_type symbol_type, enum machine_mode mode)
++{
++ /* Check that symbols of type SYMBOL_TYPE can be used to access values
++ of mode MODE. */
++ if (riscv_symbol_insns (symbol_type) == 0)
++ return false;
++
++ /* Check that there is a known low-part relocation. */
++ if (!riscv_split_symbol_type (symbol_type))
++ return false;
++
++ /* We may need to split multiword moves, so make sure that each word
++ can be accessed without inducing a carry. This is mainly needed
++ for o64, which has historically only guaranteed 64-bit alignment
++ for 128-bit types. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++ && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
++ return false;
++
++ return true;
++}
++
++/* Return true if X is a valid address for machine mode MODE. If it is,
++ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
++ effect. */
++
++static bool
++riscv_classify_address (struct riscv_address_info *info, rtx x,
++ enum machine_mode mode, bool strict_p)
++{
++ switch (GET_CODE (x))
++ {
++ case REG:
++ case SUBREG:
++ info->type = ADDRESS_REG;
++ info->reg = x;
++ info->offset = const0_rtx;
++ return riscv_valid_base_register_p (info->reg, mode, strict_p);
++
++ case PLUS:
++ info->type = ADDRESS_REG;
++ info->reg = XEXP (x, 0);
++ info->offset = XEXP (x, 1);
++ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
++ && riscv_valid_offset_p (info->offset, mode));
++
++ case LO_SUM:
++ info->type = ADDRESS_LO_SUM;
++ info->reg = XEXP (x, 0);
++ info->offset = XEXP (x, 1);
++ /* We have to trust the creator of the LO_SUM to do something vaguely
++ sane. Target-independent code that creates a LO_SUM should also
++ create and verify the matching HIGH. Target-independent code that
++ adds an offset to a LO_SUM must prove that the offset will not
++ induce a carry. Failure to do either of these things would be
++ a bug, and we are not required to check for it here. The RISCV
++ backend itself should only create LO_SUMs for valid symbolic
++ constants, with the high part being either a HIGH or a copy
++ of _gp. */
++ info->symbol_type
++ = riscv_classify_symbolic_expression (info->offset);
++ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
++ && riscv_valid_lo_sum_p (info->symbol_type, mode));
++
++ case CONST_INT:
++ /* Small-integer addresses don't occur very often, but they
++ are legitimate if $0 is a valid base register. */
++ info->type = ADDRESS_CONST_INT;
++ return SMALL_OPERAND (INTVAL (x));
++
++ default:
++ return false;
++ }
++}
++
++/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
++
++static bool
++riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
++{
++ struct riscv_address_info addr;
++
++ return riscv_classify_address (&addr, x, mode, strict_p);
++}
++
++/* Return the number of instructions needed to load or store a value
++ of mode MODE at address X. Return 0 if X isn't valid for MODE.
++ Assume that multiword moves may need to be split into word moves
++ if MIGHT_SPLIT_P, otherwise assume that a single load or store is
++ enough. */
++
++int
++riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
++{
++ struct riscv_address_info addr;
++ int n = 1;
++
++ if (!riscv_classify_address (&addr, x, mode, false))
++ return 0;
++
++ /* BLKmode is used for single unaligned loads and stores and should
++ not count as a multiword mode. */
++ if (mode != BLKmode && might_split_p)
++ n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++
++ if (addr.type == ADDRESS_LO_SUM)
++ n += riscv_symbol_insns (addr.symbol_type) - 1;
++
++ return n;
++}
++
++/* Return the number of instructions needed to load constant X.
++ Return 0 if X isn't a valid constant. */
++
++int
++riscv_const_insns (rtx x)
++{
++ enum riscv_symbol_type symbol_type;
++ rtx offset;
++
++ switch (GET_CODE (x))
++ {
++ case HIGH:
++ if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
++ || !riscv_split_symbol_type (symbol_type))
++ return 0;
++
++ /* This is simply an LUI. */
++ return 1;
++
++ case CONST_INT:
++ {
++ int cost = riscv_integer_cost (INTVAL (x));
++ /* Force complicated constants to memory. */
++ return cost < 4 ? cost : 0;
++ }
++
++ case CONST_DOUBLE:
++ case CONST_VECTOR:
++ /* Allow zeros for normal mode, where we can use x0. */
++ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
++
++ case CONST:
++ /* See if we can refer to X directly. */
++ if (riscv_symbolic_constant_p (x, &symbol_type))
++ return riscv_symbol_insns (symbol_type);
++
++ /* Otherwise try splitting the constant into a base and offset. */
++ split_const (x, &x, &offset);
++ if (offset != 0)
++ {
++ int n = riscv_const_insns (x);
++ if (n != 0)
++ return n + riscv_integer_cost (INTVAL (offset));
++ }
++ return 0;
++
++ case SYMBOL_REF:
++ case LABEL_REF:
++ return riscv_symbol_insns (riscv_classify_symbol (x));
++
++ default:
++ return 0;
++ }
++}
++
++/* X is a doubleword constant that can be handled by splitting it into
++ two words and loading each word separately. Return the number of
++ instructions required to do this. */
++
++int
++riscv_split_const_insns (rtx x)
++{
++ unsigned int low, high;
++
++ low = riscv_const_insns (riscv_subword (x, false));
++ high = riscv_const_insns (riscv_subword (x, true));
++ gcc_assert (low > 0 && high > 0);
++ return low + high;
++}
++
++/* Return the number of instructions needed to implement INSN,
++ given that it loads from or stores to MEM. */
++
++int
++riscv_load_store_insns (rtx mem, rtx_insn *insn)
++{
++ enum machine_mode mode;
++ bool might_split_p;
++ rtx set;
++
++ gcc_assert (MEM_P (mem));
++ mode = GET_MODE (mem);
++
++ /* Try to prove that INSN does not need to be split. */
++ might_split_p = true;
++ if (GET_MODE_BITSIZE (mode) == 64)
++ {
++ set = single_set (insn);
++ if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
++ might_split_p = false;
++ }
++
++ return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
++}
++
++/* Emit a move from SRC to DEST. Assume that the move expanders can
++ handle all moves if !can_create_pseudo_p (). The distinction is
++ important because, unlike emit_move_insn, the move expanders know
++ how to force Pmode objects into the constant pool even when the
++ constant pool address is not itself legitimate. */
++
++rtx
++riscv_emit_move (rtx dest, rtx src)
++{
++ return (can_create_pseudo_p ()
++ ? emit_move_insn (dest, src)
++ : emit_move_insn_1 (dest, src));
++}
++
++/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
++
++static void
++riscv_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
++{
++ emit_insn (gen_rtx_SET (target,
++ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
++}
++
++/* Compute (CODE OP0 OP1) and store the result in a new register
++ of mode MODE. Return that new register. */
++
++static rtx
++riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
++{
++ rtx reg;
++
++ reg = gen_reg_rtx (mode);
++ riscv_emit_binary (code, reg, op0, op1);
++ return reg;
++}
++
++/* Copy VALUE to a register and return that register. If new pseudos
++ are allowed, copy it into a new register, otherwise use DEST. */
++
++static rtx
++riscv_force_temporary (rtx dest, rtx value)
++{
++ if (can_create_pseudo_p ())
++ return force_reg (Pmode, value);
++ else
++ {
++ riscv_emit_move (dest, value);
++ return dest;
++ }
++}
++
++/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
++ then add CONST_INT OFFSET to the result. */
++
++static rtx
++riscv_unspec_address_offset (rtx base, rtx offset,
++ enum riscv_symbol_type symbol_type)
++{
++ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
++ UNSPEC_ADDRESS_FIRST + symbol_type);
++ if (offset != const0_rtx)
++ base = gen_rtx_PLUS (Pmode, base, offset);
++ return gen_rtx_CONST (Pmode, base);
++}
++
++/* Return an UNSPEC address with underlying address ADDRESS and symbol
++ type SYMBOL_TYPE. */
++
++rtx
++riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
++{
++ rtx base, offset;
++
++ split_const (address, &base, &offset);
++ return riscv_unspec_address_offset (base, offset, symbol_type);
++}
++
++/* If OP is an UNSPEC address, return the address to which it refers,
++ otherwise return OP itself. */
++
++static rtx
++riscv_strip_unspec_address (rtx op)
++{
++ rtx base, offset;
++
++ split_const (op, &base, &offset);
++ if (UNSPEC_ADDRESS_P (base))
++ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
++ return op;
++}
++
++/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
++ high part to BASE and return the result. Just return BASE otherwise.
++ TEMP is as for riscv_force_temporary.
++
++ The returned expression can be used as the first operand to a LO_SUM. */
++
++static rtx
++riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
++{
++ addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
++ return riscv_force_temporary (temp, addr);
++}
++
++/* Load an entry from the GOT. */
++static rtx riscv_got_load_tls_gd(rtx dest, rtx sym)
++{
++ return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym));
++}
++
++static rtx riscv_got_load_tls_ie(rtx dest, rtx sym)
++{
++ return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym));
++}
++
++static rtx riscv_tls_add_tp_le(rtx dest, rtx base, rtx sym)
++{
++ rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++ return (Pmode == DImode ? gen_tls_add_tp_ledi(dest, base, tp, sym) : gen_tls_add_tp_lesi(dest, base, tp, sym));
++}
++
++/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
++ it appears in a MEM of that mode. Return true if ADDR is a legitimate
++ constant in that context and can be split into high and low parts.
++ If so, and if LOW_OUT is nonnull, emit the high part and store the
++ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
++
++ TEMP is as for riscv_force_temporary and is used to load the high
++ part into a register.
++
++ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
++ a legitimize SET_SRC for an .md pattern, otherwise the low part
++ is guaranteed to be a legitimate address for mode MODE. */
++
++bool
++riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
++{
++ enum riscv_symbol_type symbol_type;
++
++ if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
++ || !riscv_symbolic_constant_p (addr, &symbol_type)
++ || riscv_symbol_insns (symbol_type) == 0
++ || !riscv_split_symbol_type (symbol_type))
++ return false;
++
++ if (low_out)
++ switch (symbol_type)
++ {
++ case SYMBOL_ABSOLUTE:
++ {
++ rtx high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
++ high = riscv_force_temporary (temp, high);
++ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
++ }
++ break;
++
++ case SYMBOL_PCREL:
++ {
++ static int seqno;
++ char buf[32];
++ rtx label;
++
++ sprintf (buf, ".LA%d", seqno);
++ label = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
++ SYMBOL_REF_FLAGS (label) |= SYMBOL_FLAG_LOCAL;
++
++ if (temp == NULL)
++ temp = gen_reg_rtx (Pmode);
++
++ if (Pmode == DImode)
++ emit_insn (gen_auipcdi (temp, copy_rtx (addr), GEN_INT (seqno)));
++ else
++ emit_insn (gen_auipcsi (temp, copy_rtx (addr), GEN_INT (seqno)));
++
++ *low_out = gen_rtx_LO_SUM (Pmode, temp, label);
++
++ seqno++;
++ }
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ return true;
++}
++
++/* Return a legitimate address for REG + OFFSET. TEMP is as for
++ riscv_force_temporary; it is only needed when OFFSET is not a
++ SMALL_OPERAND. */
++
++static rtx
++riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
++{
++ if (!SMALL_OPERAND (offset))
++ {
++ rtx high;
++
++ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
++ The addition inside the macro CONST_HIGH_PART may cause an
++ overflow, so we need to force a sign-extension check. */
++ high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
++ offset = CONST_LOW_PART (offset);
++ high = riscv_force_temporary (temp, high);
++ reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
++ }
++ return plus_constant (Pmode, reg, offset);
++}
++
++/* The __tls_get_attr symbol. */
++static GTY(()) rtx riscv_tls_symbol;
++
++/* Return an instruction sequence that calls __tls_get_addr. SYM is
++ the TLS symbol we are referencing and TYPE is the symbol type to use
++ (either global dynamic or local dynamic). RESULT is an RTX for the
++ return value location. */
++
++static rtx
++riscv_call_tls_get_addr (rtx sym, rtx result)
++{
++ rtx insn, a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
++
++ if (!riscv_tls_symbol)
++ riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
++
++ start_sequence ();
++
++ emit_insn (riscv_got_load_tls_gd (a0, sym));
++ insn = riscv_expand_call (false, result, riscv_tls_symbol, const0_rtx);
++ RTL_CONST_CALL_P (insn) = 1;
++ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
++ insn = get_insns ();
++
++ end_sequence ();
++
++ return insn;
++}
++
++/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
++ its address. The return value will be both a valid address and a valid
++ SET_SRC (either a REG or a LO_SUM). */
++
++static rtx
++riscv_legitimize_tls_address (rtx loc)
++{
++ rtx dest, insn, tp, tmp1;
++ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
++
++ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
++ if (!flag_pic)
++ model = TLS_MODEL_LOCAL_EXEC;
++
++ switch (model)
++ {
++ case TLS_MODEL_LOCAL_DYNAMIC:
++ /* Rely on section anchors for the optimization that LDM TLS
++ provides. The anchor's address is loaded with GD TLS. */
++ case TLS_MODEL_GLOBAL_DYNAMIC:
++ tmp1 = gen_rtx_REG (Pmode, GP_RETURN);
++ insn = riscv_call_tls_get_addr (loc, tmp1);
++ dest = gen_reg_rtx (Pmode);
++ emit_libcall_block (insn, dest, tmp1, loc);
++ break;
++
++ case TLS_MODEL_INITIAL_EXEC:
++ /* la.tls.ie; tp-relative add */
++ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++ tmp1 = gen_reg_rtx (Pmode);
++ emit_insn (riscv_got_load_tls_ie (tmp1, loc));
++ dest = gen_reg_rtx (Pmode);
++ emit_insn (gen_add3_insn (dest, tmp1, tp));
++ break;
++
++ case TLS_MODEL_LOCAL_EXEC:
++ tmp1 = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
++ dest = gen_reg_rtx (Pmode);
++ emit_insn (riscv_tls_add_tp_le (dest, tmp1, loc));
++ dest = gen_rtx_LO_SUM (Pmode, dest,
++ riscv_unspec_address (loc, SYMBOL_TLS_LE));
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++ return dest;
++}
++
++/* If X is not a valid address for mode MODE, force it into a register. */
++
++static rtx
++riscv_force_address (rtx x, enum machine_mode mode)
++{
++ if (!riscv_legitimate_address_p (mode, x, false))
++ x = force_reg (Pmode, x);
++ return x;
++}
++
++/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
++ be legitimized in a way that the generic machinery might not expect,
++ return a new address, otherwise return NULL. MODE is the mode of
++ the memory being accessed. */
++
++static rtx
++riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
++ enum machine_mode mode)
++{
++ rtx addr;
++
++ if (riscv_tls_symbol_p (x))
++ return riscv_legitimize_tls_address (x);
++
++ /* See if the address can split into a high part and a LO_SUM. */
++ if (riscv_split_symbol (NULL, x, mode, &addr))
++ return riscv_force_address (addr, mode);
++
++ /* Handle BASE + OFFSET using riscv_add_offset. */
++ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
++ && INTVAL (XEXP (x, 1)) != 0)
++ {
++ rtx base = XEXP (x, 0);
++ HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
++
++ if (!riscv_valid_base_register_p (base, mode, false))
++ base = copy_to_mode_reg (Pmode, base);
++ addr = riscv_add_offset (NULL, base, offset);
++ return riscv_force_address (addr, mode);
++ }
++
++ return x;
++}
++
++/* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
++
++void
++riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
++{
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++ enum machine_mode mode;
++ int i, num_ops;
++ rtx x;
++
++ mode = GET_MODE (dest);
++ num_ops = riscv_build_integer (codes, value, mode);
++
++ if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
++ && num_ops >= riscv_split_integer_cost (value))
++ x = riscv_split_integer (value, mode);
++ else
++ {
++ /* Apply each binary operation to X. */
++ x = GEN_INT (codes[0].value);
++
++ for (i = 1; i < num_ops; i++)
++ {
++ if (!can_create_pseudo_p ())
++ {
++ emit_insn (gen_rtx_SET (temp, x));
++ x = temp;
++ }
++ else
++ x = force_reg (mode, x);
++
++ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
++ }
++ }
++
++ emit_insn (gen_rtx_SET (dest, x));
++}
++
++/* Subroutine of riscv_legitimize_move. Move constant SRC into register
++ DEST given that SRC satisfies immediate_operand but doesn't satisfy
++ move_operand. */
++
++static void
++riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
++{
++ rtx base, offset;
++
++ /* Split moves of big integers into smaller pieces. */
++ if (splittable_const_int_operand (src, mode))
++ {
++ riscv_move_integer (dest, dest, INTVAL (src));
++ return;
++ }
++
++ /* Split moves of symbolic constants into high/low pairs. */
++ if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
++ {
++ emit_insn (gen_rtx_SET (dest, src));
++ return;
++ }
++
++ /* Generate the appropriate access sequences for TLS symbols. */
++ if (riscv_tls_symbol_p (src))
++ {
++ riscv_emit_move (dest, riscv_legitimize_tls_address (src));
++ return;
++ }
++
++ /* If we have (const (plus symbol offset)), and that expression cannot
++ be forced into memory, load the symbol first and add in the offset. Also
++ prefer to do this even if the constant _can_ be forced into memory, as it
++ usually produces better code. */
++ split_const (src, &base, &offset);
++ if (offset != const0_rtx
++ && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
++ {
++ base = riscv_force_temporary (dest, base);
++ riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
++ return;
++ }
++
++ src = force_const_mem (mode, src);
++
++ /* When using explicit relocs, constant pool references are sometimes
++ not legitimate addresses. */
++ riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
++ riscv_emit_move (dest, src);
++}
++
++/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
++ sequence that is valid. */
++
++bool
++riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
++{
++ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
++ {
++ riscv_emit_move (dest, force_reg (mode, src));
++ return true;
++ }
++
++ /* We need to deal with constants that would be legitimate
++ immediate_operands but aren't legitimate move_operands. */
++ if (CONSTANT_P (src) && !move_operand (src, mode))
++ {
++ riscv_legitimize_const_move (mode, dest, src);
++ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
++ return true;
++ }
++ return false;
++}
++
++/* Return true if there is an instruction that implements CODE and accepts
++ X as an immediate operand. */
++
++static int
++riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
++{
++ switch (code)
++ {
++ case ASHIFT:
++ case ASHIFTRT:
++ case LSHIFTRT:
++ /* All shift counts are truncated to a valid constant. */
++ return true;
++
++ case AND:
++ case IOR:
++ case XOR:
++ case PLUS:
++ case LT:
++ case LTU:
++ /* These instructions take 12-bit signed immediates. */
++ return SMALL_OPERAND (x);
++
++ case LE:
++ /* We add 1 to the immediate and use SLT. */
++ return SMALL_OPERAND (x + 1);
++
++ case LEU:
++ /* Likewise SLTU, but reject the always-true case. */
++ return SMALL_OPERAND (x + 1) && x + 1 != 0;
++
++ case GE:
++ case GEU:
++ /* We can emulate an immediate of 1 by using GT/GTU against x0. */
++ return x == 1;
++
++ default:
++ /* By default assume that x0 can be used for 0. */
++ return x == 0;
++ }
++}
++
++/* Return the cost of binary operation X, given that the instruction
++ sequence for a word-sized or smaller operation takes SIGNLE_INSNS
++ instructions and that the sequence of a double-word operation takes
++ DOUBLE_INSNS instructions. */
++
++static int
++riscv_binary_cost (rtx x, int single_insns, int double_insns)
++{
++ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
++ return COSTS_N_INSNS (double_insns);
++ return COSTS_N_INSNS (single_insns);
++}
++
++/* Return the cost of sign-extending OP to mode MODE, not including the
++ cost of OP itself. */
++
++static int
++riscv_sign_extend_cost (enum machine_mode mode, rtx op)
++{
++ if (MEM_P (op))
++ /* Extended loads are as cheap as unextended ones. */
++ return 0;
++
++ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
++ /* A sign extension from SImode to DImode in 64-bit mode is free. */
++ return 0;
++
++ /* We need to use a shift left and a shift right. */
++ return COSTS_N_INSNS (2);
++}
++
++/* Return the cost of zero-extending OP to mode MODE, not including the
++ cost of OP itself. */
++
++static int
++riscv_zero_extend_cost (enum machine_mode mode, rtx op)
++{
++ if (MEM_P (op))
++ /* Extended loads are as cheap as unextended ones. */
++ return 0;
++
++ if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ||
++ ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode))
++ /* We need a shift left by 32 bits and a shift right by 32 bits. */
++ return COSTS_N_INSNS (2);
++
++ /* We can use ANDI. */
++ return COSTS_N_INSNS (1);
++}
++
++/* Implement TARGET_RTX_COSTS. */
++
++static bool
++riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UNUSED,
++ int *total, bool speed)
++{
++ int code = GET_CODE(x);
++ bool float_mode_p = FLOAT_MODE_P (mode);
++ int cost;
++
++ switch (code)
++ {
++ case CONST_INT:
++ if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
++ {
++ *total = 0;
++ return true;
++ }
++ /* Fall through. */
++
++ case SYMBOL_REF:
++ case LABEL_REF:
++ case CONST_DOUBLE:
++ case CONST:
++ if (speed)
++ *total = 1;
++ else if ((cost = riscv_const_insns (x)) > 0)
++ *total = COSTS_N_INSNS (cost);
++ else /* The instruction will be fetched from the constant pool. */
++ *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
++ return true;
++
++ case MEM:
++ /* If the address is legitimate, return the number of
++ instructions it needs. */
++ if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
++ {
++ *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
++ return true;
++ }
++ /* Otherwise use the default handling. */
++ return false;
++
++ case NOT:
++ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
++ return false;
++
++ case AND:
++ case IOR:
++ case XOR:
++ /* Double-word operations use two single-word operations. */
++ *total = riscv_binary_cost (x, 1, 2);
++ return false;
++
++ case ASHIFT:
++ case ASHIFTRT:
++ case LSHIFTRT:
++ *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
++ return false;
++
++ case ABS:
++ *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
++ return false;
++
++ case LO_SUM:
++ *total = set_src_cost (XEXP (x, 0), mode, speed);
++ return true;
++
++ case LT:
++ case LTU:
++ case LE:
++ case LEU:
++ case GT:
++ case GTU:
++ case GE:
++ case GEU:
++ case EQ:
++ case NE:
++ case UNORDERED:
++ case LTGT:
++ /* Branch comparisons have VOIDmode, so use the first operand's
++ mode instead. */
++ mode = GET_MODE (XEXP (x, 0));
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = riscv_binary_cost (x, 1, 3);
++ return false;
++
++ case MINUS:
++ if (float_mode_p
++ && !HONOR_NANS (mode)
++ && !HONOR_SIGNED_ZEROS (mode))
++ {
++ /* See if we can use NMADD or NMSUB. See riscv.md for the
++ associated patterns. */
++ rtx op0 = XEXP (x, 0);
++ rtx op1 = XEXP (x, 1);
++ if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
++ {
++ *total = (tune_info->fp_mul[mode == DFmode]
++ + set_src_cost (XEXP (XEXP (op0, 0), 0), mode, speed)
++ + set_src_cost (XEXP (op0, 1), mode, speed)
++ + set_src_cost (op1, mode, speed));
++ return true;
++ }
++ if (GET_CODE (op1) == MULT)
++ {
++ *total = (tune_info->fp_mul[mode == DFmode]
++ + set_src_cost (op0, mode, speed)
++ + set_src_cost (XEXP (op1, 0), mode, speed)
++ + set_src_cost (XEXP (op1, 1), mode, speed));
++ return true;
++ }
++ }
++ /* Fall through. */
++
++ case PLUS:
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = riscv_binary_cost (x, 1, 4);
++ return false;
++
++ case NEG:
++ if (float_mode_p
++ && !HONOR_NANS (mode)
++ && HONOR_SIGNED_ZEROS (mode))
++ {
++ /* See if we can use NMADD or NMSUB. See riscv.md for the
++ associated patterns. */
++ rtx op = XEXP (x, 0);
++ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
++ && GET_CODE (XEXP (op, 0)) == MULT)
++ {
++ *total = (tune_info->fp_mul[mode == DFmode]
++ + set_src_cost (XEXP (XEXP (op, 0), 0), mode, speed)
++ + set_src_cost (XEXP (XEXP (op, 0), 1), mode, speed)
++ + set_src_cost (XEXP (op, 1), mode, speed));
++ return true;
++ }
++ }
++
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
++ return false;
++
++ case MULT:
++ if (float_mode_p)
++ *total = tune_info->fp_mul[mode == DFmode];
++ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++ *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
++ else if (!speed)
++ *total = COSTS_N_INSNS (1);
++ else
++ *total = tune_info->int_mul[mode == DImode];
++ return false;
++
++ case DIV:
++ case SQRT:
++ case MOD:
++ if (float_mode_p)
++ {
++ *total = tune_info->fp_div[mode == DFmode];
++ return false;
++ }
++ /* Fall through. */
++
++ case UDIV:
++ case UMOD:
++ if (speed)
++ *total = tune_info->int_div[mode == DImode];
++ else
++ *total = COSTS_N_INSNS (1);
++ return false;
++
++ case SIGN_EXTEND:
++ *total = riscv_sign_extend_cost (mode, XEXP (x, 0));
++ return false;
++
++ case ZERO_EXTEND:
++ *total = riscv_zero_extend_cost (mode, XEXP (x, 0));
++ return false;
++
++ case FLOAT:
++ case UNSIGNED_FLOAT:
++ case FIX:
++ case FLOAT_EXTEND:
++ case FLOAT_TRUNCATE:
++ *total = tune_info->fp_add[mode == DFmode];
++ return false;
++
++ case UNSPEC:
++ if (XINT (x, 1) == UNSPEC_AUIPC)
++ {
++ /* Make AUIPC cheap to avoid spilling its result to the stack. */
++ *total = 1;
++ return true;
++ }
++ return false;
++
++ default:
++ return false;
++ }
++}
++
++/* Implement TARGET_ADDRESS_COST. */
++
++static int
++riscv_address_cost (rtx addr, enum machine_mode mode,
++ addr_space_t as ATTRIBUTE_UNUSED,
++ bool speed ATTRIBUTE_UNUSED)
++{
++ return riscv_address_insns (addr, mode, false);
++}
++
++/* Return one word of double-word value OP. HIGH_P is true to select the
++ high part or false to select the low part. */
++
++rtx
++riscv_subword (rtx op, bool high_p)
++{
++ unsigned int byte;
++ enum machine_mode mode;
++
++ mode = GET_MODE (op);
++ if (mode == VOIDmode)
++ mode = TARGET_64BIT ? TImode : DImode;
++
++ byte = high_p ? UNITS_PER_WORD : 0;
++
++ if (FP_REG_RTX_P (op))
++ return gen_rtx_REG (word_mode, REGNO (op) + high_p);
++
++ if (MEM_P (op))
++ return adjust_address (op, word_mode, byte);
++
++ return simplify_gen_subreg (word_mode, op, mode, byte);
++}
++
++/* Return true if a 64-bit move from SRC to DEST should be split into two. */
++
++bool
++riscv_split_64bit_move_p (rtx dest, rtx src)
++{
++ /* All 64b moves are legal in 64b mode. All 64b FPR <-> FPR and
++ FPR <-> MEM moves are legal in 32b mode, too. Although
++ FPR <-> GPR moves are not available in general in 32b mode,
++ we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */
++ return !(TARGET_64BIT
++ || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
++ || (FP_REG_RTX_P (dest) && MEM_P (src))
++ || (FP_REG_RTX_P (src) && MEM_P (dest))
++ || (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src))));
++}
++
++/* Split a doubleword move from SRC to DEST. On 32-bit targets,
++ this function handles 64-bit moves for which riscv_split_64bit_move_p
++ holds. For 64-bit targets, this function handles 128-bit moves. */
++
++void
++riscv_split_doubleword_move (rtx dest, rtx src)
++{
++ rtx low_dest;
++
++ /* The operation can be split into two normal moves. Decide in
++ which order to do them. */
++ low_dest = riscv_subword (dest, false);
++ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
++ {
++ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
++ riscv_emit_move (low_dest, riscv_subword (src, false));
++ }
++ else
++ {
++ riscv_emit_move (low_dest, riscv_subword (src, false));
++ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
++ }
++}
++
++/* Return the appropriate instructions to move SRC into DEST. Assume
++ that SRC is operand 1 and DEST is operand 0. */
++
++const char *
++riscv_output_move (rtx dest, rtx src)
++{
++ enum rtx_code dest_code, src_code;
++ enum machine_mode mode;
++ bool dbl_p;
++
++ dest_code = GET_CODE (dest);
++ src_code = GET_CODE (src);
++ mode = GET_MODE (dest);
++ dbl_p = (GET_MODE_SIZE (mode) == 8);
++
++ if (dbl_p && riscv_split_64bit_move_p (dest, src))
++ return "#";
++
++ if (dest_code == REG && GP_REG_P (REGNO (dest)))
++ {
++ if (src_code == REG && FP_REG_P (REGNO (src)))
++ return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
++
++ if (src_code == MEM)
++ switch (GET_MODE_SIZE (mode))
++ {
++ case 1: return "lbu\t%0,%1";
++ case 2: return "lhu\t%0,%1";
++ case 4: return "lw\t%0,%1";
++ case 8: return "ld\t%0,%1";
++ }
++
++ if (src_code == CONST_INT)
++ return "li\t%0,%1";
++
++ if (src_code == HIGH)
++ return "lui\t%0,%h1";
++
++ if (symbolic_operand (src, VOIDmode))
++ switch (riscv_classify_symbolic_expression (src))
++ {
++ case SYMBOL_GOT_DISP: return "la\t%0,%1";
++ case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
++ case SYMBOL_PCREL: return "lla\t%0,%1";
++ default: gcc_unreachable();
++ }
++ }
++ if ((src_code == REG && GP_REG_P (REGNO (src)))
++ || (src == CONST0_RTX (mode)))
++ {
++ if (dest_code == REG)
++ {
++ if (GP_REG_P (REGNO (dest)))
++ return "mv\t%0,%z1";
++
++ if (FP_REG_P (REGNO (dest)))
++ {
++ if (!dbl_p)
++ return "fmv.s.x\t%0,%z1";
++ if (TARGET_64BIT)
++ return "fmv.d.x\t%0,%z1";
++ /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
++ gcc_assert (src == CONST0_RTX (mode));
++ return "fcvt.d.w\t%0,x0";
++ }
++ }
++ if (dest_code == MEM)
++ switch (GET_MODE_SIZE (mode))
++ {
++ case 1: return "sb\t%z1,%0";
++ case 2: return "sh\t%z1,%0";
++ case 4: return "sw\t%z1,%0";
++ case 8: return "sd\t%z1,%0";
++ }
++ }
++ if (src_code == REG && FP_REG_P (REGNO (src)))
++ {
++ if (dest_code == REG && FP_REG_P (REGNO (dest)))
++ return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
++
++ if (dest_code == MEM)
++ return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
++ }
++ if (dest_code == REG && FP_REG_P (REGNO (dest)))
++ {
++ if (src_code == MEM)
++ return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
++ }
++ gcc_unreachable ();
++}
++
++/* Return true if CMP1 is a suitable second operand for integer ordering
++ test CODE. See also the *sCC patterns in riscv.md. */
++
++static bool
++riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
++{
++ switch (code)
++ {
++ case GT:
++ case GTU:
++ return reg_or_0_operand (cmp1, VOIDmode);
++
++ case GE:
++ case GEU:
++ return cmp1 == const1_rtx;
++
++ case LT:
++ case LTU:
++ return arith_operand (cmp1, VOIDmode);
++
++ case LE:
++ return sle_operand (cmp1, VOIDmode);
++
++ case LEU:
++ return sleu_operand (cmp1, VOIDmode);
++
++ default:
++ gcc_unreachable ();
++ }
++}
++
++/* Return true if *CMP1 (of mode MODE) is a valid second operand for
++ integer ordering test *CODE, or if an equivalent combination can
++ be formed by adjusting *CODE and *CMP1. When returning true, update
++ *CODE and *CMP1 with the chosen code and operand, otherwise leave
++ them alone. */
++
++static bool
++riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
++ enum machine_mode mode)
++{
++ HOST_WIDE_INT plus_one;
++
++ if (riscv_int_order_operand_ok_p (*code, *cmp1))
++ return true;
++
++ if (CONST_INT_P (*cmp1))
++ switch (*code)
++ {
++ case LE:
++ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
++ if (INTVAL (*cmp1) < plus_one)
++ {
++ *code = LT;
++ *cmp1 = force_reg (mode, GEN_INT (plus_one));
++ return true;
++ }
++ break;
++
++ case LEU:
++ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
++ if (plus_one != 0)
++ {
++ *code = LTU;
++ *cmp1 = force_reg (mode, GEN_INT (plus_one));
++ return true;
++ }
++ break;
++
++ default:
++ break;
++ }
++ return false;
++}
++
++/* Compare CMP0 and CMP1 using ordering test CODE and store the result
++ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
++ is nonnull, it's OK to set TARGET to the inverse of the result and
++ flip *INVERT_PTR instead. */
++
++static void
++riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
++ rtx target, rtx cmp0, rtx cmp1)
++{
++ enum machine_mode mode;
++
++ /* First see if there is a RISCV instruction that can do this operation.
++ If not, try doing the same for the inverse operation. If that also
++ fails, force CMP1 into a register and try again. */
++ mode = GET_MODE (cmp0);
++ if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
++ riscv_emit_binary (code, target, cmp0, cmp1);
++ else
++ {
++ enum rtx_code inv_code = reverse_condition (code);
++ if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
++ {
++ cmp1 = force_reg (mode, cmp1);
++ riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
++ }
++ else if (invert_ptr == 0)
++ {
++ rtx inv_target;
++
++ inv_target = riscv_force_binary (GET_MODE (target),
++ inv_code, cmp0, cmp1);
++ riscv_emit_binary (XOR, target, inv_target, const1_rtx);
++ }
++ else
++ {
++ *invert_ptr = !*invert_ptr;
++ riscv_emit_binary (inv_code, target, cmp0, cmp1);
++ }
++ }
++}
++
++/* Return a register that is zero iff CMP0 and CMP1 are equal.
++ The register will have the same mode as CMP0. */
++
++static rtx
++riscv_zero_if_equal (rtx cmp0, rtx cmp1)
++{
++ if (cmp1 == const0_rtx)
++ return cmp0;
++
++ return expand_binop (GET_MODE (cmp0), sub_optab,
++ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
++}
++
++/* Return false if we can easily emit code for the FP comparison specified
++ by *CODE. If not, set *CODE to its inverse and return true. */
++
++static bool
++riscv_reversed_fp_cond (enum rtx_code *code)
++{
++ switch (*code)
++ {
++ case EQ:
++ case LT:
++ case LE:
++ case GT:
++ case GE:
++ case LTGT:
++ case ORDERED:
++ /* We know how to emit code for these cases... */
++ return false;
++
++ default:
++ /* ...but we must invert these and rely on the others. */
++ *code = reverse_condition_maybe_unordered (*code);
++ return true;
++ }
++}
++
++/* Convert a comparison into something that can be used in a branch or
++ conditional move. On entry, *OP0 and *OP1 are the values being
++ compared and *CODE is the code used to compare them.
++
++ Update *CODE, *OP0 and *OP1 so that they describe the final comparison. */
++
++static void
++riscv_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1)
++{
++ rtx cmp_op0 = *op0;
++ rtx cmp_op1 = *op1;
++
++ if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
++ {
++ if (splittable_const_int_operand (cmp_op1, VOIDmode))
++ {
++ HOST_WIDE_INT rhs = INTVAL (cmp_op1), new_rhs;
++ enum rtx_code new_code;
++
++ switch (*code)
++ {
++ case LTU: new_rhs = rhs - 1; new_code = LEU; goto try_new_rhs;
++ case LEU: new_rhs = rhs + 1; new_code = LTU; goto try_new_rhs;
++ case GTU: new_rhs = rhs + 1; new_code = GEU; goto try_new_rhs;
++ case GEU: new_rhs = rhs - 1; new_code = GTU; goto try_new_rhs;
++ case LT: new_rhs = rhs - 1; new_code = LE; goto try_new_rhs;
++ case LE: new_rhs = rhs + 1; new_code = LT; goto try_new_rhs;
++ case GT: new_rhs = rhs + 1; new_code = GE; goto try_new_rhs;
++ case GE: new_rhs = rhs - 1; new_code = GT;
++ try_new_rhs:
++ /* Convert e.g. OP0 > 4095 into OP0 >= 4096. */
++ if ((rhs < 0) == (new_rhs < 0)
++ && riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs))
++ {
++ *op1 = GEN_INT (new_rhs);
++ *code = new_code;
++ }
++ break;
++
++ case EQ:
++ case NE:
++ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
++ if (SMALL_OPERAND (-rhs))
++ {
++ *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
++ riscv_emit_binary (PLUS, *op0, cmp_op0, GEN_INT (-rhs));
++ *op1 = const0_rtx;
++ }
++ default:
++ break;
++ }
++ }
++
++ if (*op1 != const0_rtx)
++ *op1 = force_reg (GET_MODE (cmp_op0), *op1);
++ }
++ else
++ {
++ /* For FP comparisons, set an integer register with the result of the
++ comparison, then branch on it. */
++ rtx tmp0, tmp1, final_op;
++ enum rtx_code fp_code = *code;
++ *code = riscv_reversed_fp_cond (&fp_code) ? EQ : NE;
++
++ switch (fp_code)
++ {
++ case ORDERED:
++ /* a == a && b == b */
++ tmp0 = gen_reg_rtx (SImode);
++ riscv_emit_binary (EQ, tmp0, cmp_op0, cmp_op0);
++ tmp1 = gen_reg_rtx (SImode);
++ riscv_emit_binary (EQ, tmp1, cmp_op1, cmp_op1);
++ final_op = gen_reg_rtx (SImode);
++ riscv_emit_binary (AND, final_op, tmp0, tmp1);
++ break;
++
++ case LTGT:
++ /* a < b || a > b */
++ tmp0 = gen_reg_rtx (SImode);
++ riscv_emit_binary (LT, tmp0, cmp_op0, cmp_op1);
++ tmp1 = gen_reg_rtx (SImode);
++ riscv_emit_binary (GT, tmp1, cmp_op0, cmp_op1);
++ final_op = gen_reg_rtx (SImode);
++ riscv_emit_binary (IOR, final_op, tmp0, tmp1);
++ break;
++
++ case EQ:
++ case LE:
++ case LT:
++ case GE:
++ case GT:
++ /* We have instructions for these cases. */
++ final_op = gen_reg_rtx (SImode);
++ riscv_emit_binary (fp_code, final_op, cmp_op0, cmp_op1);
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ /* Compare the binary result against 0. */
++ *op0 = final_op;
++ *op1 = const0_rtx;
++ }
++}
++
++/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
++ and OPERAND[3]. Store the result in OPERANDS[0].
++
++ On 64-bit targets, the mode of the comparison and target will always be
++ SImode, thus possibly narrower than that of the comparison's operands. */
++
++void
++riscv_expand_scc (rtx operands[])
++{
++ rtx target = operands[0];
++ enum rtx_code code = GET_CODE (operands[1]);
++ rtx op0 = operands[2];
++ rtx op1 = operands[3];
++
++ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
++
++ if (code == EQ || code == NE)
++ {
++ rtx zie = riscv_zero_if_equal (op0, op1);
++ riscv_emit_binary (code, target, zie, const0_rtx);
++ }
++ else
++ riscv_emit_int_order_test (code, 0, target, op0, op1);
++}
++
++/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
++ CODE and jump to OPERANDS[3] if the condition holds. */
++
++void
++riscv_expand_conditional_branch (rtx *operands)
++{
++ enum rtx_code code = GET_CODE (operands[0]);
++ rtx op0 = operands[1];
++ rtx op1 = operands[2];
++ rtx condition;
++
++ riscv_emit_compare (&code, &op0, &op1);
++ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
++ emit_jump_insn (gen_condjump (condition, operands[3]));
++}
++
++/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
++ least PARM_BOUNDARY bits of alignment, but will be given anything up
++ to STACK_BOUNDARY bits if the type requires it. */
++
++static unsigned int
++riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
++{
++ unsigned int alignment;
++
++ /* Use natural alignment if the type is not aggregate data. */
++ if (type && !AGGREGATE_TYPE_P (type))
++ alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type));
++ else
++ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
++
++ if (alignment < PARM_BOUNDARY)
++ alignment = PARM_BOUNDARY;
++ if (alignment > STACK_BOUNDARY)
++ alignment = STACK_BOUNDARY;
++ return alignment;
++}
++
++/* Fill INFO with information about a single argument. CUM is the
++ cumulative state for earlier arguments. MODE is the mode of this
++ argument and TYPE is its type (if known). NAMED is true if this
++ is a named (fixed) argument rather than a variable one. */
++
++static void
++riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
++ enum machine_mode mode, const_tree type, bool named)
++{
++ bool doubleword_aligned_p;
++ unsigned int num_bytes, num_words, max_regs;
++
++ /* Work out the size of the argument. */
++ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
++ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++
++ /* Scalar, complex and vector floating-point types are passed in
++ floating-point registers, as long as this is a named rather
++ than a variable argument. */
++ info->fpr_p = (named
++ && (type == 0 || FLOAT_TYPE_P (type))
++ && (GET_MODE_CLASS (mode) == MODE_FLOAT
++ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
++ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
++ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG);
++
++ /* Complex floats should only go into FPRs if there are two FPRs free,
++ otherwise they should be passed in the same way as a struct
++ containing two floats. */
++ if (info->fpr_p
++ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
++ && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FP_ARG)
++ {
++ if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
++ info->fpr_p = false;
++ else
++ num_words = 2;
++ }
++
++ /* See whether the argument has doubleword alignment,
++ and do not align for zero size type. */
++ doubleword_aligned_p = (riscv_function_arg_boundary (mode, type)
++ > BITS_PER_WORD)
++ && (num_bytes != 0);
++
++ /* Set REG_OFFSET to the register count we're interested in.
++ The EABI allocates the floating-point registers separately,
++ but the other ABIs allocate them like integer registers. */
++ info->reg_offset = cum->num_gprs;
++
++ /* Advance to an even register if the argument is doubleword-aligned. */
++ if (doubleword_aligned_p)
++ info->reg_offset += info->reg_offset & 1;
++
++ /* Work out the offset of a stack argument. */
++ info->stack_offset = cum->stack_words;
++ if (doubleword_aligned_p)
++ info->stack_offset += info->stack_offset & 1;
++
++ max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
++
++ /* Partition the argument between registers and stack. */
++ info->reg_words = MIN (num_words, max_regs);
++ info->stack_words = num_words - info->reg_words;
++}
++
++/* INFO describes a register argument that has the normal format for the
++ argument's mode. Return the register it uses. */
++
++static unsigned int
++riscv_arg_regno (const struct riscv_arg_info *info)
++{
++ if (!info->fpr_p || riscv_float_abi == FLOAT_ABI_SOFT)
++ return GP_ARG_FIRST + info->reg_offset;
++ else
++ return FP_ARG_FIRST + info->reg_offset;
++}
++
++/* Implement TARGET_FUNCTION_ARG. */
++
++static rtx
++riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
++ const_tree type, bool named)
++{
++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++ struct riscv_arg_info info;
++
++ if (mode == VOIDmode)
++ return NULL;
++
++ riscv_get_arg_info (&info, cum, mode, type, named);
++
++ /* Return straight away if the whole argument is passed on the stack. */
++ if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
++ return NULL;
++
++ /* If any XLEN-bit chunk of a structure contains an XLEN-bit floating-point
++ number in its entirety, and the floating-point ABI can return XLEN-bit
++ values in FPRs, then pass the chunk in an FPR. */
++ if (named
++ && type != 0
++ && TREE_CODE (type) == RECORD_TYPE
++ && TYPE_SIZE_UNIT (type)
++ && UNITS_PER_FP_ARG >= UNITS_PER_WORD
++ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
++ {
++ enum machine_mode fmode = TARGET_64BIT ? DFmode : SFmode;
++ enum machine_mode imode = TARGET_64BIT ? DImode : SImode;
++ tree field;
++
++ /* First check to see if there is any such field. */
++ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
++ if (TREE_CODE (field) == FIELD_DECL
++ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
++ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
++ && tree_fits_shwi_p (bit_position (field))
++ && int_bit_position (field) % BITS_PER_WORD == 0)
++ break;
++
++ if (field != 0)
++ {
++ /* Now handle the special case by returning a PARALLEL
++ indicating where each 64-bit chunk goes. INFO.REG_WORDS
++ chunks are passed in registers. */
++ unsigned int i;
++ HOST_WIDE_INT bitpos;
++ rtx ret;
++
++ /* assign_parms checks the mode of ENTRY_PARM, so we must
++ use the actual mode here. */
++ ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
++
++ bitpos = 0;
++ field = TYPE_FIELDS (type);
++ for (i = 0; i < info.reg_words; i++)
++ {
++ rtx reg;
++
++ for (; field; field = DECL_CHAIN (field))
++ if (TREE_CODE (field) == FIELD_DECL
++ && int_bit_position (field) >= bitpos)
++ break;
++
++ if (field
++ && int_bit_position (field) == bitpos
++ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
++ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
++ reg = gen_rtx_REG (fmode, FP_ARG_FIRST + info.reg_offset + i);
++ else
++ reg = gen_rtx_REG (imode, GP_ARG_FIRST + info.reg_offset + i);
++
++ XVECEXP (ret, 0, i)
++ = gen_rtx_EXPR_LIST (VOIDmode, reg,
++ GEN_INT (bitpos / BITS_PER_UNIT));
++
++ bitpos += BITS_PER_WORD;
++ }
++ return ret;
++ }
++ }
++
++ /* Pass complex floating-point arguments in FPR pairs, with the real part
++ in the lower register and the imaginary part in the upper register. */
++ if (info.fpr_p && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++ {
++ rtx real, imag;
++ enum machine_mode inner = GET_MODE_INNER (mode);
++ unsigned int regno = FP_ARG_FIRST + info.reg_offset;
++
++ gcc_assert (info.stack_words == 0 && info.reg_words == 2);
++ real = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (inner, regno),
++ const0_rtx);
++ imag = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (inner, regno + 1),
++ GEN_INT (GET_MODE_SIZE (inner)));
++ return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
++ }
++
++ return gen_rtx_REG (mode, riscv_arg_regno (&info));
++}
++
++/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
++
++static void
++riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
++ const_tree type, bool named)
++{
++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++ struct riscv_arg_info info;
++
++ riscv_get_arg_info (&info, cum, mode, type, named);
++
++ /* Advance the register count. This has the effect of setting
++ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
++ argument required us to skip the final GPR and pass the whole
++ argument on the stack. */
++ cum->num_gprs = info.reg_offset + info.reg_words;
++
++ /* Advance the stack word count. */
++ if (info.stack_words > 0)
++ cum->stack_words = info.stack_offset + info.stack_words;
++}
++
++/* Implement TARGET_ARG_PARTIAL_BYTES. */
++
++static int
++riscv_arg_partial_bytes (cumulative_args_t cum,
++ enum machine_mode mode, tree type, bool named)
++{
++ struct riscv_arg_info info;
++
++ riscv_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
++ return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
++}
++
++/* See whether VALTYPE is a record whose fields should be returned in
++ floating-point registers. If so, return the number of fields and
++ list them in FIELDS (which should have two elements). Return 0
++ otherwise.
++
++ For n32 & n64, a structure with one or two fields is returned in
++ floating-point registers as long as every field has a floating-point
++ type. */
++
++static int
++riscv_fpr_return_fields (const_tree valtype, tree fields[2])
++{
++ tree field;
++ int i;
++
++ if (TREE_CODE (valtype) != RECORD_TYPE)
++ return 0;
++
++ i = 0;
++ for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
++ {
++ if (TREE_CODE (field) != FIELD_DECL)
++ continue;
++
++ if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
++ return 0;
++
++ if (i == 2)
++ return 0;
++
++ fields[i++] = field;
++ }
++ return i;
++}
++
++/* Return true if the function return value MODE will get returned in a
++ floating-point register. */
++
++static bool
++riscv_return_mode_in_fpr_p (enum machine_mode mode)
++{
++ return ((GET_MODE_CLASS (mode) == MODE_FLOAT
++ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
++ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG);
++}
++
++/* Return the representation of an FPR return register when the
++ value being returned in FP_RETURN has mode VALUE_MODE and the
++ return type itself has mode TYPE_MODE. On NewABI targets,
++ the two modes may be different for structures like:
++
++ struct __attribute__((packed)) foo { float f; }
++
++ where we return the SFmode value of "f" in FP_RETURN, but where
++ the structure itself has mode BLKmode. */
++
++static rtx
++riscv_return_fpr_single (enum machine_mode type_mode,
++ enum machine_mode value_mode)
++{
++ rtx x;
++
++ x = gen_rtx_REG (value_mode, FP_RETURN);
++ if (type_mode != value_mode)
++ {
++ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
++ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
++ }
++ return x;
++}
++
++/* Return a composite value in a pair of floating-point registers.
++ MODE1 and OFFSET1 are the mode and byte offset for the first value,
++ likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
++ complete value. */
++
++static rtx
++riscv_return_fpr_pair (enum machine_mode mode,
++ enum machine_mode mode1, HOST_WIDE_INT offset1,
++ enum machine_mode mode2, HOST_WIDE_INT offset2)
++{
++ return gen_rtx_PARALLEL
++ (mode,
++ gen_rtvec (2,
++ gen_rtx_EXPR_LIST (VOIDmode,
++ gen_rtx_REG (mode1, FP_RETURN),
++ GEN_INT (offset1)),
++ gen_rtx_EXPR_LIST (VOIDmode,
++ gen_rtx_REG (mode2, FP_RETURN + 1),
++ GEN_INT (offset2))));
++
++}
++
++/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
++ VALTYPE is the return type and MODE is VOIDmode. For libcalls,
++ VALTYPE is null and MODE is the mode of the return value. */
++
++rtx
++riscv_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
++{
++ if (valtype)
++ {
++ tree fields[2];
++ int unsigned_p;
++
++ mode = TYPE_MODE (valtype);
++ unsigned_p = TYPE_UNSIGNED (valtype);
++
++ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
++ return values, promote the mode here too. */
++ mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
++
++ /* Handle structures whose fields are returned in fa0/fa1. */
++ switch (riscv_fpr_return_fields (valtype, fields))
++ {
++ case 1:
++ return riscv_return_fpr_single (mode,
++ TYPE_MODE (TREE_TYPE (fields[0])));
++
++ case 2:
++ return riscv_return_fpr_pair (mode,
++ TYPE_MODE (TREE_TYPE (fields[0])),
++ int_byte_position (fields[0]),
++ TYPE_MODE (TREE_TYPE (fields[1])),
++ int_byte_position (fields[1]));
++ }
++
++ /* Only use FPRs for scalar, complex or vector types. */
++ if (!FLOAT_TYPE_P (valtype))
++ return gen_rtx_REG (mode, GP_RETURN);
++ }
++
++ if (riscv_return_mode_in_fpr_p (mode))
++ {
++ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++ return riscv_return_fpr_pair (mode,
++ GET_MODE_INNER (mode), 0,
++ GET_MODE_INNER (mode),
++ GET_MODE_SIZE (mode) / 2);
++ else
++ return gen_rtx_REG (mode, FP_RETURN);
++ }
++
++ return gen_rtx_REG (mode, GP_RETURN);
++}
++
++/* Implement TARGET_RETURN_IN_MEMORY. Scalars and small structures
++ that fit in two registers are returned in a0/a1. */
++
++static bool
++riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
++{
++ /* TFmode alwyas pass by reference. */
++ if (TYPE_MODE (type) == TFmode)
++ {
++ return true;
++ }
++
++ if (TREE_CODE (type) == RECORD_TYPE)
++ {
++ tree field;
++ /* Check if this struc only TFmode, then it's still pass in memory. */
++ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
++ if (TREE_CODE (field) == FIELD_DECL
++ && !error_operand_p (field)
++ && TYPE_MODE (TREE_TYPE (field)) == TFmode)
++ return true;
++ }
++
++ return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD);
++}
++
++/* Implement TARGET_PASS_BY_REFERENCE. */
++
++static bool
++riscv_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
++ enum machine_mode mode, const_tree type,
++ bool named ATTRIBUTE_UNUSED)
++{
++ /* TFmode alwyas pass by reference. */
++ if (mode == TFmode)
++ {
++ return true;
++ }
++ if (type && riscv_return_in_memory (type, NULL_TREE))
++ return true;
++ return targetm.calls.must_pass_in_stack (mode, type);
++}
++
++/* Implement TARGET_SETUP_INCOMING_VARARGS. */
++
++static void
++riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
++ tree type, int *pretend_size ATTRIBUTE_UNUSED,
++ int no_rtl)
++{
++ CUMULATIVE_ARGS local_cum;
++ int gp_saved;
++
++ /* The caller has advanced CUM up to, but not beyond, the last named
++ argument. Advance a local copy of CUM past the last "real" named
++ argument, to find out how many registers are left over. */
++ local_cum = *get_cumulative_args (cum);
++ riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
++
++ /* Found out how many registers we need to save. */
++ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
++
++ if (!no_rtl && gp_saved > 0)
++ {
++ rtx ptr, mem;
++
++ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
++ REG_PARM_STACK_SPACE (cfun->decl)
++ - gp_saved * UNITS_PER_WORD);
++ mem = gen_frame_mem (BLKmode, ptr);
++ set_mem_alias_set (mem, get_varargs_alias_set ());
++
++ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
++ mem, gp_saved);
++ }
++ if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
++ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
++}
++
++/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
++
++static void
++riscv_va_start (tree valist, rtx nextarg)
++{
++ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
++ std_expand_builtin_va_start (valist, nextarg);
++}
++
++/* Expand a call of type TYPE. RESULT is where the result will go (null
++ for "call"s and "sibcall"s), ADDR is the address of the function,
++ ARGS_SIZE is the size of the arguments and AUX is the value passed
++ to us by riscv_function_arg. Return the call itself. */
++
++rtx
++riscv_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size)
++{
++ rtx pattern;
++
++ if (!call_insn_operand (addr, VOIDmode))
++ {
++ rtx reg = RISCV_PROLOGUE_TEMP (Pmode);
++ riscv_emit_move (reg, addr);
++ addr = reg;
++ }
++
++ if (result == 0)
++ {
++ rtx (*fn) (rtx, rtx);
++
++ if (sibcall_p)
++ fn = gen_sibcall_internal;
++ else
++ fn = gen_call_internal;
++
++ pattern = fn (addr, args_size);
++ }
++ else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
++ {
++ /* Handle return values created by riscv_return_fpr_pair. */
++ rtx (*fn) (rtx, rtx, rtx, rtx);
++ rtx reg1, reg2;
++
++ if (sibcall_p)
++ fn = gen_sibcall_value_multiple_internal;
++ else
++ fn = gen_call_value_multiple_internal;
++
++ reg1 = XEXP (XVECEXP (result, 0, 0), 0);
++ reg2 = XEXP (XVECEXP (result, 0, 1), 0);
++ pattern = fn (reg1, addr, args_size, reg2);
++ }
++ else
++ {
++ rtx (*fn) (rtx, rtx, rtx);
++
++ if (sibcall_p)
++ fn = gen_sibcall_value_internal;
++ else
++ fn = gen_call_value_internal;
++
++ /* Handle return values created by riscv_return_fpr_single. */
++ if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
++ result = XEXP (XVECEXP (result, 0, 0), 0);
++ pattern = fn (result, addr, args_size);
++ }
++
++ return emit_call_insn (pattern);
++}
++
++/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
++ Assume that the areas do not overlap. */
++
++static void
++riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
++{
++ HOST_WIDE_INT offset, delta;
++ unsigned HOST_WIDE_INT bits;
++ int i;
++ enum machine_mode mode;
++ rtx *regs;
++
++ bits = MAX( BITS_PER_UNIT,
++ MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) );
++
++ mode = mode_for_size (bits, MODE_INT, 0);
++ delta = bits / BITS_PER_UNIT;
++
++ /* Allocate a buffer for the temporary registers. */
++ regs = XALLOCAVEC (rtx, length / delta);
++
++ /* Load as many BITS-sized chunks as possible. Use a normal load if
++ the source has enough alignment, otherwise use left/right pairs. */
++ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
++ {
++ regs[i] = gen_reg_rtx (mode);
++ riscv_emit_move (regs[i], adjust_address (src, mode, offset));
++ }
++
++ /* Copy the chunks to the destination. */
++ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
++ riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
++
++ /* Mop up any left-over bytes. */
++ if (offset < length)
++ {
++ src = adjust_address (src, BLKmode, offset);
++ dest = adjust_address (dest, BLKmode, offset);
++ move_by_pieces (dest, src, length - offset,
++ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
++ }
++}
++
++/* Helper function for doing a loop-based block operation on memory
++ reference MEM. Each iteration of the loop will operate on LENGTH
++ bytes of MEM.
++
++ Create a new base register for use within the loop and point it to
++ the start of MEM. Create a new memory reference that uses this
++ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
++
++static void
++riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
++ rtx *loop_reg, rtx *loop_mem)
++{
++ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
++
++ /* Although the new mem does not refer to a known location,
++ it does keep up to LENGTH bytes of alignment. */
++ *loop_mem = change_address (mem, BLKmode, *loop_reg);
++ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
++}
++
++/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
++ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
++ the memory regions do not overlap. */
++
++static void
++riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
++ HOST_WIDE_INT bytes_per_iter)
++{
++ rtx label, src_reg, dest_reg, final_src, test;
++ HOST_WIDE_INT leftover;
++
++ leftover = length % bytes_per_iter;
++ length -= leftover;
++
++ /* Create registers and memory references for use within the loop. */
++ riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
++ riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
++
++ /* Calculate the value that SRC_REG should have after the last iteration
++ of the loop. */
++ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
++ 0, 0, OPTAB_WIDEN);
++
++ /* Emit the start of the loop. */
++ label = gen_label_rtx ();
++ emit_label (label);
++
++ /* Emit the loop body. */
++ riscv_block_move_straight (dest, src, bytes_per_iter);
++
++ /* Move on to the next block. */
++ riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
++ riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
++
++ /* Emit the loop condition. */
++ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
++ if (Pmode == DImode)
++ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
++ else
++ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
++
++ /* Mop up any left-over bytes. */
++ if (leftover)
++ riscv_block_move_straight (dest, src, leftover);
++}
++
++/* Expand a movmemsi instruction, which copies LENGTH bytes from
++ memory reference SRC to memory reference DEST. */
++
++bool
++riscv_expand_block_move (rtx dest, rtx src, rtx length)
++{
++ if (CONST_INT_P (length))
++ {
++ HOST_WIDE_INT factor, align;
++
++ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
++ factor = BITS_PER_WORD / align;
++
++ if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
++ {
++ riscv_block_move_straight (dest, src, INTVAL (length));
++ return true;
++ }
++ else if (optimize && align >= BITS_PER_WORD)
++ {
++ riscv_block_move_loop (dest, src, INTVAL (length),
++ RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / factor);
++ return true;
++ }
++ }
++ return false;
++}
++
++/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
++ in context CONTEXT. HI_RELOC indicates a high-part reloc. */
++
++static void
++riscv_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
++{
++ const char *reloc;
++
++ switch (riscv_classify_symbolic_expression (op))
++ {
++ case SYMBOL_ABSOLUTE:
++ reloc = hi_reloc ? "%hi" : "%lo";
++ break;
++
++ case SYMBOL_PCREL:
++ reloc = hi_reloc ? "%pcrel_hi" : "%pcrel_lo";
++ break;
++
++ case SYMBOL_TLS_LE:
++ reloc = hi_reloc ? "%tprel_hi" : "%tprel_lo";
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ fprintf (file, "%s(", reloc);
++ output_addr_const (file, riscv_strip_unspec_address (op));
++ fputc (')', file);
++}
++
++static const char *
++riscv_memory_model_suffix (enum memmodel model)
++{
++ switch (model)
++ {
++ case MEMMODEL_ACQ_REL:
++ case MEMMODEL_SEQ_CST:
++ case MEMMODEL_SYNC_SEQ_CST:
++ return ".sc";
++ case MEMMODEL_ACQUIRE:
++ case MEMMODEL_CONSUME:
++ case MEMMODEL_SYNC_ACQUIRE:
++ return ".aq";
++ case MEMMODEL_RELEASE:
++ case MEMMODEL_SYNC_RELEASE:
++ return ".rl";
++ case MEMMODEL_RELAXED:
++ return "";
++ default:
++ gcc_unreachable();
++ }
++}
++
++/* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
++
++ 'h' Print the high-part relocation associated with OP, after stripping
++ any outermost HIGH.
++ 'R' Print the low-part relocation associated with OP.
++ 'C' Print the integer branch condition for comparison OP.
++ 'A' Print the atomic operation suffix for memory model OP.
++ 'z' Print $0 if OP is zero, otherwise print OP normally. */
++
++static void
++riscv_print_operand (FILE *file, rtx op, int letter)
++{
++ enum machine_mode mode = GET_MODE(op);
++ enum rtx_code code;
++
++ gcc_assert (op);
++ code = GET_CODE (op);
++
++ switch (letter)
++ {
++ case 'h':
++ if (code == HIGH)
++ op = XEXP (op, 0);
++ riscv_print_operand_reloc (file, op, true);
++ break;
++
++ case 'R':
++ riscv_print_operand_reloc (file, op, false);
++ break;
++
++ case 'C':
++ /* The RTL names match the instruction names. */
++ fputs (GET_RTX_NAME (code), file);
++ break;
++
++ case 'A':
++ fputs (riscv_memory_model_suffix ((enum memmodel)INTVAL (op)), file);
++ break;
++
++ default:
++ switch (code)
++ {
++ case REG:
++ if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ fprintf (file, "%s", reg_names[REGNO (op)]);
++ break;
++
++ case MEM:
++ if (letter == 'y')
++ fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]);
++ else if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ else
++ output_address (mode, XEXP (op, 0));
++ break;
++
++ default:
++ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
++ fputs (reg_names[GP_REG_FIRST], file);
++ else if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ else
++ output_addr_const (file, riscv_strip_unspec_address (op));
++ break;
++ }
++ }
++}
++
++/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
++
++static void
++riscv_print_operand_address (FILE *file, machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++ struct riscv_address_info addr;
++
++ if (riscv_classify_address (&addr, x, word_mode, true))
++ switch (addr.type)
++ {
++ case ADDRESS_REG:
++ riscv_print_operand (file, addr.offset, 0);
++ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
++ return;
++
++ case ADDRESS_LO_SUM:
++ riscv_print_operand_reloc (file, addr.offset, false);
++ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
++ return;
++
++ case ADDRESS_CONST_INT:
++ output_addr_const (file, x);
++ fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
++ return;
++
++ case ADDRESS_SYMBOLIC:
++ output_addr_const (file, riscv_strip_unspec_address (x));
++ return;
++ }
++ gcc_unreachable ();
++}
++
++static bool
++riscv_size_ok_for_small_data_p (int size)
++{
++ return g_switch_value && IN_RANGE (size, 1, g_switch_value);
++}
++
++/* Return true if EXP should be placed in the small data section. */
++
++static bool
++riscv_in_small_data_p (const_tree x)
++{
++ if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
++ return false;
++
++ if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
++ {
++ const char *sec = DECL_SECTION_NAME (x);
++ return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
++ }
++
++ return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
++}
++
++/* Return a section for X, handling small data. */
++
++static section *
++riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
++ unsigned HOST_WIDE_INT align)
++{
++ section *s = default_elf_select_rtx_section (mode, x, align);
++
++ if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
++ {
++ if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
++ {
++ /* Rename .rodata.cst* to .srodata.cst*. */
++ char *name = (char *) alloca (strlen (s->named.name) + 2);
++ sprintf (name, ".s%s", s->named.name + 1);
++ return get_section (name, s->named.common.flags, NULL);
++ }
++
++ if (s == data_section)
++ return sdata_section;
++ }
++
++ return s;
++}
++
++/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
++
++static void ATTRIBUTE_UNUSED
++riscv_output_dwarf_dtprel (FILE *file, int size, rtx x)
++{
++ switch (size)
++ {
++ case 4:
++ fputs ("\t.dtprelword\t", file);
++ break;
++
++ case 8:
++ fputs ("\t.dtpreldword\t", file);
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++ output_addr_const (file, x);
++ fputs ("+0x800", file);
++}
++
++/* Make the last instruction frame-related and note that it performs
++ the operation described by FRAME_PATTERN. */
++
++static void
++riscv_set_frame_expr (rtx frame_pattern)
++{
++ rtx insn;
++
++ insn = get_last_insn ();
++ RTX_FRAME_RELATED_P (insn) = 1;
++ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
++ frame_pattern,
++ REG_NOTES (insn));
++}
++
++/* Return a frame-related rtx that stores REG at MEM.
++ REG must be a single register. */
++
++static rtx
++riscv_frame_set (rtx mem, rtx reg)
++{
++ rtx set;
++
++ set = gen_rtx_SET (mem, reg);
++ RTX_FRAME_RELATED_P (set) = 1;
++
++ return set;
++}
++
++/* Return true if the current function must save register REGNO. */
++
++static bool
++riscv_save_reg_p (unsigned int regno)
++{
++ bool call_saved = !global_regs[regno] && !call_really_used_regs[regno];
++ bool might_clobber = crtl->saves_all_registers
++ || df_regs_ever_live_p (regno)
++ || (regno == HARD_FRAME_POINTER_REGNUM
++ && frame_pointer_needed);
++
++ return (call_saved && might_clobber)
++ || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return);
++}
++
++/* Determine whether to call GPR save/restore routines. */
++static bool
++riscv_use_save_libcall (const struct riscv_frame_info *frame)
++{
++ if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
++ return false;
++
++ return frame->save_libcall_adjustment != 0;
++}
++
++/* Determine which GPR save/restore routine to call. */
++
++static unsigned
++riscv_save_libcall_count (unsigned mask)
++{
++ for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
++ if (BITSET_P (mask, n))
++ return CALLEE_SAVED_REG_NUMBER (n) + 1;
++ abort ();
++}
++
++/* Populate the current function's riscv_frame_info structure.
++
++ RISC-V stack frames grown downward. High addresses are at the top.
++
++ +-------------------------------+
++ | |
++ | incoming stack arguments |
++ | |
++ +-------------------------------+ <-- incoming stack pointer
++ | |
++ | callee-allocated save area |
++ | for arguments that are |
++ | split between registers and |
++ | the stack |
++ | |
++ +-------------------------------+ <-- arg_pointer_rtx
++ | |
++ | callee-allocated save area |
++ | for register varargs |
++ | |
++ +-------------------------------+ <-- hard_frame_pointer_rtx;
++ | | stack_pointer_rtx + gp_sp_offset
++ | GPR save area | + UNITS_PER_WORD
++ | |
++ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
++ | | + UNITS_PER_HWVALUE
++ | FPR save area |
++ | |
++ +-------------------------------+ <-- frame_pointer_rtx (virtual)
++ | |
++ | local variables |
++ | |
++ P +-------------------------------+
++ | |
++ | outgoing stack arguments |
++ | |
++ +-------------------------------+ <-- stack_pointer_rtx
++
++ Dynamic stack allocations such as alloca insert data at point P.
++ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
++ hard_frame_pointer_rtx unchanged. */
++
++static void
++riscv_compute_frame_info (void)
++{
++ struct riscv_frame_info *frame;
++ HOST_WIDE_INT offset;
++ unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
++
++ frame = &cfun->machine->frame;
++ memset (frame, 0, sizeof (*frame));
++
++ /* Find out which GPRs we need to save. */
++ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
++ if (riscv_save_reg_p (regno))
++ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
++
++ /* If this function calls eh_return, we must also save and restore the
++ EH data registers. */
++ if (crtl->calls_eh_return)
++ for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
++ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
++
++ /* Find out which FPRs we need to save. This loop must iterate over
++ the same space as its companion in riscv_for_each_saved_reg. */
++ if (TARGET_HARD_FLOAT)
++ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ if (riscv_save_reg_p (regno))
++ frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
++
++ /* At the bottom of the frame are any outgoing stack arguments. */
++ offset = crtl->outgoing_args_size;
++ /* Next are local stack variables. */
++ offset += RISCV_STACK_ALIGN (get_frame_size ());
++ /* The virtual frame pointer points above the local variables. */
++ frame->frame_pointer_offset = offset;
++ /* Next are the callee-saved FPRs. */
++ if (frame->fmask)
++ {
++ offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG);
++ frame->fp_sp_offset = offset - UNITS_PER_FP_REG;
++ }
++ /* Next are the callee-saved GPRs. */
++ if (frame->mask)
++ {
++ unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
++ unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
++
++ /* Only use save/restore routines if they don't alter the stack size. */
++ if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
++ frame->save_libcall_adjustment = x_save_size;
++
++ offset += x_save_size;
++ frame->gp_sp_offset = offset - UNITS_PER_WORD;
++ }
++ /* The hard frame pointer points above the callee-saved GPRs. */
++ frame->hard_frame_pointer_offset = offset;
++ /* Above the hard frame pointer is the callee-allocated varags save area. */
++ offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
++ frame->arg_pointer_offset = offset;
++ /* Next is the callee-allocated area for pretend stack arguments. */
++ offset += crtl->args.pretend_args_size;
++ frame->total_size = offset;
++ /* Next points the incoming stack pointer and any incoming arguments. */
++
++ /* Only use save/restore routines when the GPRs are atop the frame. */
++ if (frame->hard_frame_pointer_offset != frame->total_size)
++ frame->save_libcall_adjustment = 0;
++}
++
++/* Make sure that we're not trying to eliminate to the wrong hard frame
++ pointer. */
++
++static bool
++riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
++{
++ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
++}
++
++/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
++ or argument pointer. TO is either the stack pointer or hard frame
++ pointer. */
++
++HOST_WIDE_INT
++riscv_initial_elimination_offset (int from, int to)
++{
++ HOST_WIDE_INT src, dest;
++
++ riscv_compute_frame_info ();
++
++ if (to == HARD_FRAME_POINTER_REGNUM)
++ dest = cfun->machine->frame.hard_frame_pointer_offset;
++ else if (to == STACK_POINTER_REGNUM)
++ dest = 0; /* this is the base of all offsets */
++ else
++ gcc_unreachable ();
++
++ if (from == FRAME_POINTER_REGNUM)
++ src = cfun->machine->frame.frame_pointer_offset;
++ else if (from == ARG_POINTER_REGNUM)
++ src = cfun->machine->frame.arg_pointer_offset;
++ else
++ gcc_unreachable ();
++
++ return src - dest;
++}
++
++/* Implement RETURN_ADDR_RTX. We do not support moving back to a
++ previous frame. */
++
++rtx
++riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
++{
++ if (count != 0)
++ return const0_rtx;
++
++ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
++}
++
++/* Emit code to change the current function's return address to
++ ADDRESS. SCRATCH is available as a scratch register, if needed.
++ ADDRESS and SCRATCH are both word-mode GPRs. */
++
++void
++riscv_set_return_address (rtx address, rtx scratch)
++{
++ rtx slot_address;
++
++ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
++ slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
++ cfun->machine->frame.gp_sp_offset);
++ riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
++}
++
++/* A function to save or store a register. The first argument is the
++ register and the second is the stack slot. */
++typedef void (*riscv_save_restore_fn) (rtx, rtx);
++
++/* Use FN to save or restore register REGNO. MODE is the register's
++ mode and OFFSET is the offset of its save slot from the current
++ stack pointer. */
++
++static void
++riscv_save_restore_reg (enum machine_mode mode, int regno,
++ HOST_WIDE_INT offset, riscv_save_restore_fn fn)
++{
++ rtx mem;
++
++ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
++ fn (gen_rtx_REG (mode, regno), mem);
++}
++
++/* Call FN for each register that is saved by the current function.
++ SP_OFFSET is the offset of the current stack pointer from the start
++ of the frame. */
++
++static void
++riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn)
++{
++ HOST_WIDE_INT offset;
++ int regno;
++
++ /* Save the link register and s-registers. */
++ offset = cfun->machine->frame.gp_sp_offset - sp_offset;
++ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
++ {
++ riscv_save_restore_reg (word_mode, regno, offset, fn);
++ offset -= UNITS_PER_WORD;
++ }
++
++ /* This loop must iterate over the same space as its companion in
++ riscv_compute_frame_info. */
++ offset = cfun->machine->frame.fp_sp_offset - sp_offset;
++ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
++ {
++ enum machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
++
++ riscv_save_restore_reg (mode, regno, offset, fn);
++ offset -= GET_MODE_SIZE (mode);
++ }
++}
++
++/* Save register REG to MEM. Make the instruction frame-related. */
++
++static void
++riscv_save_reg (rtx reg, rtx mem)
++{
++ riscv_emit_move (mem, reg);
++ riscv_set_frame_expr (riscv_frame_set (mem, reg));
++}
++
++/* Restore register REG from MEM. */
++
++static void
++riscv_restore_reg (rtx reg, rtx mem)
++{
++ rtx insn = riscv_emit_move (reg, mem);
++ rtx dwarf = NULL_RTX;
++ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
++ REG_NOTES (insn) = dwarf;
++
++ RTX_FRAME_RELATED_P (insn) = 1;
++}
++
++/* Return the code to invoke the GPR save routine. */
++
++const char *
++riscv_output_gpr_save (unsigned mask)
++{
++ static char buf[GP_REG_NUM * 32];
++ size_t len = 0;
++ unsigned n = riscv_save_libcall_count (mask), i;
++ unsigned frame_size = RISCV_STACK_ALIGN ((n + 1) * UNITS_PER_WORD);
++
++ len += sprintf (buf + len, "call\tt0,__riscv_save_%u", n);
++
++#ifdef DWARF2_UNWIND_INFO
++ /* Describe the effect of the call to __riscv_save_X. */
++ if (dwarf2out_do_cfi_asm ())
++ {
++ len += sprintf (buf + len, "\n\t.cfi_def_cfa_offset %u", frame_size);
++
++ for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
++ if (BITSET_P (cfun->machine->frame.mask, i))
++ len += sprintf (buf + len, "\n\t.cfi_offset %u,%d", i,
++ (CALLEE_SAVED_REG_NUMBER (i) + 2) * -UNITS_PER_WORD);
++ }
++#endif
++
++ return buf;
++}
++
++/* Expand the "prologue" pattern. */
++
++void
++riscv_expand_prologue (void)
++{
++ struct riscv_frame_info *frame = &cfun->machine->frame;
++ HOST_WIDE_INT size = frame->total_size;
++ unsigned mask = frame->mask;
++ rtx insn;
++
++ if (flag_stack_usage_info)
++ current_function_static_stack_size = size;
++
++ /* When optimizing for size, call a subroutine to save the registers. */
++ if (riscv_use_save_libcall (frame))
++ {
++ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
++ size -= frame->save_libcall_adjustment;
++ emit_insn (gen_gpr_save (GEN_INT (mask)));
++ }
++
++ /* Save the registers. Allocate up to RISCV_MAX_FIRST_STACK_STEP
++ bytes beforehand; this is enough to cover the register save area
++ without going out of range. */
++ if ((frame->mask | frame->fmask) != 0)
++ {
++ HOST_WIDE_INT step1;
++
++ step1 = MIN (size, RISCV_MAX_FIRST_STACK_STEP);
++ insn = gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ GEN_INT (-step1));
++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++ size -= step1;
++ riscv_for_each_saved_reg (size, riscv_save_reg);
++ }
++
++ frame->mask = mask; /* Undo the above fib. */
++
++ /* Set up the frame pointer, if we're using one. */
++ if (frame_pointer_needed)
++ {
++ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (frame->hard_frame_pointer_offset - size));
++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++ }
++
++ /* Allocate the rest of the frame. */
++ if (size > 0)
++ {
++ if (SMALL_OPERAND (-size))
++ {
++ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (-size));
++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++ }
++ else
++ {
++ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
++ emit_insn (gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ RISCV_PROLOGUE_TEMP (Pmode)));
++
++ /* Describe the effect of the previous instructions. */
++ insn = plus_constant (Pmode, stack_pointer_rtx, -size);
++ insn = gen_rtx_SET (stack_pointer_rtx, insn);
++ riscv_set_frame_expr (insn);
++ }
++ }
++}
++
++/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
++ says which. */
++
++void
++riscv_expand_epilogue (bool sibcall_p)
++{
++ /* Split the frame into two. STEP1 is the amount of stack we should
++ deallocate before restoring the registers. STEP2 is the amount we
++ should deallocate afterwards.
++
++ Start off by assuming that no registers need to be restored. */
++ struct riscv_frame_info *frame = &cfun->machine->frame;
++ unsigned mask = frame->mask;
++ HOST_WIDE_INT step1 = frame->total_size;
++ HOST_WIDE_INT step2 = 0;
++ bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
++ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
++ rtx insn;
++
++ if (!sibcall_p && riscv_can_use_return_insn ())
++ {
++ emit_jump_insn (gen_return ());
++ return;
++ }
++
++ /* Move past any dynamic stack allocations. */
++ if (cfun->calls_alloca)
++ {
++ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
++ if (!SMALL_OPERAND (INTVAL (adjust)))
++ {
++ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
++ adjust = RISCV_PROLOGUE_TEMP (Pmode);
++ }
++
++ emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
++ adjust));
++ }
++
++ /* If we need to restore registers, deallocate as much stack as
++ possible in the second step without going out of range. */
++ if ((frame->mask | frame->fmask) != 0)
++ {
++ step2 = MIN (step1, RISCV_MAX_FIRST_STACK_STEP);
++ step1 -= step2;
++ }
++
++ /* Set TARGET to BASE + STEP1. */
++ if (step1 > 0)
++ {
++ /* Get an rtx for STEP1 that we can add to BASE. */
++ rtx adjust = GEN_INT (step1);
++ if (!SMALL_OPERAND (step1))
++ {
++ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
++ adjust = RISCV_PROLOGUE_TEMP (Pmode);
++ }
++
++ insn = emit_insn (
++ gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
++
++ rtx dwarf = NULL_RTX;
++ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
++ const0_rtx);
++ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ REG_NOTES (insn) = dwarf;
++ }
++
++ if (use_restore_libcall)
++ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
++
++ /* Restore the registers. */
++ riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg);
++
++ if (use_restore_libcall)
++ {
++ frame->mask = mask; /* Undo the above fib. */
++ gcc_assert (step2 >= frame->save_libcall_adjustment);
++ step2 -= frame->save_libcall_adjustment;
++ }
++
++ /* Deallocate the final bit of the frame. */
++ if (step2 > 0)
++ {
++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (step2)));
++
++ rtx dwarf = NULL_RTX;
++ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
++ const0_rtx);
++ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ REG_NOTES (insn) = dwarf;
++ }
++
++ if (use_restore_libcall)
++ {
++ emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
++ emit_jump_insn (gen_gpr_restore_return (ra));
++ return;
++ }
++
++ /* Add in the __builtin_eh_return stack adjustment. */
++ if (crtl->calls_eh_return)
++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ EH_RETURN_STACKADJ_RTX));
++
++ if (!sibcall_p)
++ emit_jump_insn (gen_simple_return_internal (ra));
++}
++
++/* Return nonzero if this function is known to have a null epilogue.
++ This allows the optimizer to omit jumps to jumps if no stack
++ was created. */
++
++bool
++riscv_can_use_return_insn (void)
++{
++ return reload_completed && cfun->machine->frame.total_size == 0;
++}
++
++/* Implement TARGET_REGISTER_MOVE_COST. */
++
++static int
++riscv_register_move_cost (enum machine_mode mode,
++ reg_class_t from, reg_class_t to)
++{
++ return SECONDARY_MEMORY_NEEDED (from, to, mode) ? 8 : 1;
++}
++
++/* Return true if register REGNO can store a value of mode MODE. */
++
++bool
++riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
++{
++ unsigned int size = GET_MODE_SIZE (mode);
++ enum mode_class mclass = GET_MODE_CLASS (mode);
++
++ if (GP_REG_P (regno))
++ {
++ if (size <= UNITS_PER_WORD)
++ return true;
++
++ /* Double-word values must be even-register-aligned. */
++ if (size <= 2 * UNITS_PER_WORD)
++ return regno % 2 == 0;
++
++ /* For __complex__ long long(CDImode) in 32 bit mode,
++ it's equal to two double-word. */
++ if (size <= 4 * UNITS_PER_WORD)
++ return regno % 2 == 0;
++ }
++
++ if (FP_REG_P (regno))
++ {
++ unsigned max_size = UNITS_PER_FP_REG;
++
++ /* Only use callee-saved registers if a potential callee is guaranteed
++ to spill the requisite width. */
++ if (UNITS_PER_FP_ARG < UNITS_PER_FP_REG && !call_used_regs[regno])
++ max_size = UNITS_PER_FP_ARG;
++
++ if (mclass == MODE_FLOAT
++ || mclass == MODE_COMPLEX_FLOAT
++ || mclass == MODE_VECTOR_FLOAT)
++ return size <= max_size;
++ }
++
++ return false;
++}
++
++/* Implement HARD_REGNO_NREGS. */
++
++unsigned int
++riscv_hard_regno_nregs (int regno, enum machine_mode mode)
++{
++ if (FP_REG_P (regno))
++ return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;
++
++ /* All other registers are word-sized. */
++ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++}
++
++/* Implement CLASS_MAX_NREGS. */
++
++static unsigned char
++riscv_class_max_nregs (reg_class_t rclass, enum machine_mode mode)
++{
++ if (reg_class_subset_p (FP_REGS, rclass))
++ return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
++
++ if (reg_class_subset_p (GR_REGS, rclass))
++ return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
++
++ return 0;
++}
++
++/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
++
++static reg_class_t
++riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
++{
++ return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
++ reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
++ rclass;
++}
++
++/* Implement TARGET_MEMORY_MOVE_COST. */
++
++static int
++riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
++{
++ return (tune_info->memory_cost
++ + memory_move_secondary_cost (mode, rclass, in));
++}
++
++/* Implement TARGET_MODE_REP_EXTENDED. */
++
++static int
++riscv_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
++{
++ /* On 64-bit targets, SImode register values are sign-extended to DImode. */
++ if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
++ return SIGN_EXTEND;
++
++ return UNKNOWN;
++}
++
++/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
++
++static bool
++riscv_scalar_mode_supported_p (enum machine_mode mode)
++{
++ if (ALL_FIXED_POINT_MODE_P (mode)
++ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
++ return true;
++
++ return default_scalar_mode_supported_p (mode);
++}
++
++/* Return the number of instructions that can be issued per cycle. */
++
++static int
++riscv_issue_rate (void)
++{
++ return tune_info->issue_rate;
++}
++
++/* Implement TARGET_ASM_FILE_START. */
++
++static void
++riscv_file_start (void)
++{
++ default_file_start ();
++
++ /* Instruct GAS to generate position-[in]dependent code. */
++ fprintf (asm_out_file, "\t.option %spic\n", (flag_pic ? "" : "no"));
++}
++
++/* This structure describes a single built-in function. */
++struct riscv_builtin_description {
++ /* The code of the main .md file instruction. See riscv_builtin_type
++ for more information. */
++ enum insn_code icode;
++
++ /* The name of the built-in function. */
++ const char *name;
++
++ /* Specifies how the function should be expanded. */
++ enum riscv_builtin_type builtin_type;
++
++ /* The function's prototype. */
++ enum riscv_function_type function_type;
++
++ /* Whether the function is available. */
++ unsigned int (*avail) (void);
++};
++
++static unsigned int
++riscv_builtin_avail_riscv (void)
++{
++ return 1;
++}
++
++/* Construct a riscv_builtin_description from the given arguments.
++
++ INSN is the name of the associated instruction pattern, without the
++ leading CODE_FOR_riscv_.
++
++ CODE is the floating-point condition code associated with the
++ function. It can be 'f' if the field is not applicable.
++
++ NAME is the name of the function itself, without the leading
++ "__builtin_riscv_".
++
++ BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
++
++ AVAIL is the name of the availability predicate, without the leading
++ riscv_builtin_avail_. */
++#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \
++ { CODE_FOR_ ## INSN, "__builtin_riscv_" NAME, \
++ BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
++
++/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT function
++ mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE and AVAIL
++ are as for RISCV_BUILTIN. */
++#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
++ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
++
++/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
++ function mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE
++ and AVAIL are as for RISCV_BUILTIN. */
++#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
++ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET, \
++ FUNCTION_TYPE, AVAIL)
++
++static const struct riscv_builtin_description riscv_builtins[] = {
++ DIRECT_NO_TARGET_BUILTIN (nop, RISCV_VOID_FTYPE_VOID, riscv),
++};
++
++/* Index I is the function declaration for riscv_builtins[I], or null if the
++ function isn't defined on this target. */
++static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
++
++
++/* Source-level argument types. */
++#define RISCV_ATYPE_VOID void_type_node
++#define RISCV_ATYPE_INT integer_type_node
++#define RISCV_ATYPE_POINTER ptr_type_node
++#define RISCV_ATYPE_CPOINTER const_ptr_type_node
++
++/* Standard mode-based argument types. */
++#define RISCV_ATYPE_UQI unsigned_intQI_type_node
++#define RISCV_ATYPE_SI intSI_type_node
++#define RISCV_ATYPE_USI unsigned_intSI_type_node
++#define RISCV_ATYPE_DI intDI_type_node
++#define RISCV_ATYPE_UDI unsigned_intDI_type_node
++#define RISCV_ATYPE_SF float_type_node
++#define RISCV_ATYPE_DF double_type_node
++
++/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
++ their associated RISCV_ATYPEs. */
++#define RISCV_FTYPE_ATYPES1(A, B) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B
++
++#define RISCV_FTYPE_ATYPES2(A, B, C) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
++
++#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
++
++#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
++ RISCV_ATYPE_##E
++
++/* Return the function type associated with function prototype TYPE. */
++
++static tree
++riscv_build_function_type (enum riscv_function_type type)
++{
++ static tree types[(int) RISCV_MAX_FTYPE_MAX];
++
++ if (types[(int) type] == NULL_TREE)
++ switch (type)
++ {
++#define DEF_RISCV_FTYPE(NUM, ARGS) \
++ case RISCV_FTYPE_NAME##NUM ARGS: \
++ types[(int) type] \
++ = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS, \
++ NULL_TREE); \
++ break;
++#include "config/riscv/riscv-ftypes.def"
++#undef DEF_RISCV_FTYPE
++ default:
++ gcc_unreachable ();
++ }
++
++ return types[(int) type];
++}
++
++/* Implement TARGET_INIT_BUILTINS. */
++
++static void
++riscv_init_builtins (void)
++{
++ const struct riscv_builtin_description *d;
++ unsigned int i;
++
++ /* Iterate through all of the bdesc arrays, initializing all of the
++ builtin functions. */
++ for (i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
++ {
++ d = &riscv_builtins[i];
++ if (d->avail ())
++ riscv_builtin_decls[i]
++ = add_builtin_function (d->name,
++ riscv_build_function_type (d->function_type),
++ i, BUILT_IN_MD, NULL, NULL);
++ }
++}
++
++/* Implement TARGET_BUILTIN_DECL. */
++
++static tree
++riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
++{
++ if (code >= ARRAY_SIZE (riscv_builtins))
++ return error_mark_node;
++ return riscv_builtin_decls[code];
++}
++
++/* Take argument ARGNO from EXP's argument list and convert it into a
++ form suitable for input operand OPNO of instruction ICODE. Return the
++ value. */
++
++static rtx
++riscv_prepare_builtin_arg (enum insn_code icode,
++ unsigned int opno, tree exp, unsigned int argno)
++{
++ tree arg;
++ rtx value;
++ enum machine_mode mode;
++
++ arg = CALL_EXPR_ARG (exp, argno);
++ value = expand_normal (arg);
++ mode = insn_data[icode].operand[opno].mode;
++ if (!insn_data[icode].operand[opno].predicate (value, mode))
++ {
++ /* We need to get the mode from ARG for two reasons:
++
++ - to cope with address operands, where MODE is the mode of the
++ memory, rather than of VALUE itself.
++
++ - to cope with special predicates like pmode_register_operand,
++ where MODE is VOIDmode. */
++ value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
++
++ /* Check the predicate again. */
++ if (!insn_data[icode].operand[opno].predicate (value, mode))
++ {
++ error ("invalid argument to built-in function");
++ return const0_rtx;
++ }
++ }
++
++ return value;
++}
++
++/* Return an rtx suitable for output operand OP of instruction ICODE.
++ If TARGET is non-null, try to use it where possible. */
++
++static rtx
++riscv_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
++{
++ enum machine_mode mode;
++
++ mode = insn_data[icode].operand[op].mode;
++ if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
++ target = gen_reg_rtx (mode);
++
++ return target;
++}
++
++/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
++ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
++ and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
++ suggests a good place to put the result. */
++
++static rtx
++riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
++ bool has_target_p)
++{
++ rtx ops[MAX_RECOG_OPERANDS];
++ int opno, argno;
++
++ /* Map any target to operand 0. */
++ opno = 0;
++ if (has_target_p)
++ {
++ target = riscv_prepare_builtin_target (icode, opno, target);
++ ops[opno] = target;
++ opno++;
++ }
++
++ /* Map the arguments to the other operands. The n_operands value
++ for an expander includes match_dups and match_scratches as well as
++ match_operands, so n_operands is only an upper bound on the number
++ of arguments to the expander function. */
++ gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
++ for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
++ ops[opno] = riscv_prepare_builtin_arg (icode, opno, exp, argno);
++
++ switch (opno)
++ {
++ case 2:
++ emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
++ break;
++
++ case 3:
++ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
++ break;
++
++ case 4:
++ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++ return target;
++}
++
++/* Implement TARGET_EXPAND_BUILTIN. */
++
++static rtx
++riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree fndecl;
++ unsigned int fcode, avail;
++ const struct riscv_builtin_description *d;
++
++ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
++ fcode = DECL_FUNCTION_CODE (fndecl);
++ gcc_assert (fcode < ARRAY_SIZE (riscv_builtins));
++ d = &riscv_builtins[fcode];
++ avail = d->avail ();
++ gcc_assert (avail != 0);
++ switch (d->builtin_type)
++ {
++ case RISCV_BUILTIN_DIRECT:
++ return riscv_expand_builtin_direct (d->icode, target, exp, true);
++
++ case RISCV_BUILTIN_DIRECT_NO_TARGET:
++ return riscv_expand_builtin_direct (d->icode, target, exp, false);
++ }
++ gcc_unreachable ();
++}
++
++/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
++ in order to avoid duplicating too much logic from elsewhere. */
++
++static void
++riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
++ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
++ tree function)
++{
++ rtx this_rtx, temp1, temp2, fnaddr;
++ rtx_insn *insn;
++ bool use_sibcall_p;
++
++ /* Pretend to be a post-reload pass while generating rtl. */
++ reload_completed = 1;
++
++ /* Mark the end of the (empty) prologue. */
++ emit_note (NOTE_INSN_PROLOGUE_END);
++
++ /* Determine if we can use a sibcall to call FUNCTION directly. */
++ fnaddr = XEXP (DECL_RTL (function), 0);
++ use_sibcall_p = absolute_symbolic_operand (fnaddr, Pmode);
++
++ /* We need two temporary registers in some cases. */
++ temp1 = gen_rtx_REG (Pmode, GP_TEMP_FIRST);
++ temp2 = gen_rtx_REG (Pmode, GP_TEMP_FIRST + 1);
++
++ /* Find out which register contains the "this" pointer. */
++ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
++ else
++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
++
++ /* Add DELTA to THIS_RTX. */
++ if (delta != 0)
++ {
++ rtx offset = GEN_INT (delta);
++ if (!SMALL_OPERAND (delta))
++ {
++ riscv_emit_move (temp1, offset);
++ offset = temp1;
++ }
++ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
++ }
++
++ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
++ if (vcall_offset != 0)
++ {
++ rtx addr;
++
++ /* Set TEMP1 to *THIS_RTX. */
++ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
++
++ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
++ addr = riscv_add_offset (temp2, temp1, vcall_offset);
++
++ /* Load the offset and add it to THIS_RTX. */
++ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
++ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
++ }
++
++ /* Jump to the target function. Use a sibcall if direct jumps are
++ allowed, otherwise load the address into a register first. */
++ if (use_sibcall_p)
++ {
++ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
++ SIBLING_CALL_P (insn) = 1;
++ }
++ else
++ {
++ riscv_emit_move(temp1, fnaddr);
++ emit_jump_insn (gen_indirect_jump (temp1));
++ }
++
++ /* Run just enough of rest_of_compilation. This sequence was
++ "borrowed" from alpha.c. */
++ insn = get_insns ();
++ split_all_insns_noflow ();
++ shorten_branches (insn);
++ final_start_function (insn, file, 1);
++ final (insn, file, 1);
++ final_end_function ();
++
++ /* Clean up the vars set above. Note that final_end_function resets
++ the global pointer for us. */
++ reload_completed = 0;
++}
++
++/* Allocate a chunk of memory for per-function machine-dependent data. */
++
++static struct machine_function *
++riscv_init_machine_status (void)
++{
++ return ggc_cleared_alloc<machine_function> ();
++}
++
++/* Implement TARGET_OPTION_OVERRIDE. */
++
++static void
++riscv_option_override (void)
++{
++ const struct riscv_cpu_info *cpu;
++
++#ifdef SUBTARGET_OVERRIDE_OPTIONS
++ SUBTARGET_OVERRIDE_OPTIONS;
++#endif
++
++ flag_pcc_struct_return = 0;
++
++ if (flag_pic)
++ g_switch_value = 0;
++
++ /* Prefer a call to memcpy over inline code when optimizing for size,
++ though see MOVE_RATIO in riscv.h. */
++ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
++ target_flags |= MASK_MEMCPY;
++
++ /* Handle -mtune. */
++ cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
++ RISCV_TUNE_STRING_DEFAULT);
++ tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
++
++ /* If the user hasn't specified a branch cost, use the processor's
++ default. */
++ if (riscv_branch_cost == 0)
++ riscv_branch_cost = tune_info->branch_cost;
++
++ /* Function to allocate machine-dependent function status. */
++ init_machine_status = &riscv_init_machine_status;
++
++ if (riscv_cmodel_string)
++ {
++ if (strcmp (riscv_cmodel_string, "medlow") == 0)
++ riscv_cmodel = CM_MEDLOW;
++ else if (strcmp (riscv_cmodel_string, "medany") == 0)
++ riscv_cmodel = CM_MEDANY;
++ else
++ error ("unsupported code model: %s", riscv_cmodel_string);
++ }
++
++ if (flag_pic)
++ riscv_cmodel = CM_PIC;
++
++ /* We get better code with explicit relocs for CM_MEDLOW, but
++ worse code for the others (for now). Pick the best default. */
++ if ((target_flags_explicit & MASK_EXPLICIT_RELOCS) == 0)
++ if (riscv_cmodel == CM_MEDLOW)
++ target_flags |= MASK_EXPLICIT_RELOCS;
++
++ /* Require that the ISA supports the requested floating-point ABI. */
++ switch (riscv_float_abi)
++ {
++ case FLOAT_ABI_SOFT:
++ break;
++
++ case FLOAT_ABI_SINGLE:
++ if (!TARGET_HARD_FLOAT)
++ error ("-mfloat-abi=single requires -msingle-float or -mdouble-float");
++ break;
++
++ case FLOAT_ABI_DOUBLE:
++ if (!TARGET_DOUBLE_FLOAT)
++ error ("-mfloat-abi=double requires -mdouble-float");
++ break;
++ }
++}
++
++/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
++
++static void
++riscv_conditional_register_usage (void)
++{
++ if (!TARGET_HARD_FLOAT)
++ {
++ for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ fixed_regs[regno] = call_used_regs[regno] = 1;
++ }
++}
++
++/* Return a register priority for hard reg REGNO. */
++static int
++riscv_register_priority (int regno)
++{
++ /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection. */
++ if (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
++ || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)))
++ return 1;
++
++ return 0;
++}
++
++/* Implement TARGET_TRAMPOLINE_INIT. */
++
++static void
++riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
++{
++ rtx addr, end_addr, mem;
++ uint32_t trampoline[4];
++ unsigned int i;
++ HOST_WIDE_INT static_chain_offset, target_function_offset;
++
++ /* Work out the offsets of the pointers from the start of the
++ trampoline code. */
++ gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
++ static_chain_offset = TRAMPOLINE_CODE_SIZE;
++ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
++
++ /* Get pointers to the beginning and end of the code block. */
++ addr = force_reg (Pmode, XEXP (m_tramp, 0));
++ end_addr = riscv_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE));
++
++ /* auipc t0, 0
++ l[wd] t1, target_function_offset(t0)
++ l[wd] t0, static_chain_offset(t0)
++ jr t1
++ */
++ trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
++ trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
++ | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
++ | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
++ | (target_function_offset << SHIFT_IMM);
++ trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
++ | (STATIC_CHAIN_REGNUM << SHIFT_RD)
++ | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
++ | (static_chain_offset << SHIFT_IMM);
++ trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
++
++ /* Copy the trampoline code. */
++ for (i = 0; i < ARRAY_SIZE (trampoline); i++)
++ {
++ mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
++ riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
++ }
++
++ /* Set up the static chain pointer field. */
++ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
++ riscv_emit_move (mem, chain_value);
++
++ /* Set up the target function field. */
++ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
++ riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
++
++ /* Flush the code part of the trampoline. */
++ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
++ emit_insn (gen_clear_cache (addr, end_addr));
++}
++
++/* Return leaf_function_p () and cache the result. */
++
++static bool
++riscv_leaf_function_p (void)
++{
++ if (cfun->machine->is_leaf == 0)
++ cfun->machine->is_leaf = leaf_function_p () ? 1 : -1;
++
++ return cfun->machine->is_leaf > 0;
++}
++
++/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
++
++static bool
++riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
++ tree exp ATTRIBUTE_UNUSED)
++{
++ /* When optimzing for size, don't use sibcalls in non-leaf routines */
++ if (TARGET_SAVE_RESTORE)
++ return riscv_leaf_function_p ();
++
++ return true;
++}
++
++/* Return true if INSN should not be copied. */
++
++static bool
++riscv_cannot_copy_insn_p (rtx_insn *insn)
++{
++ return recog_memoized (insn) >= 0 && get_attr_cannot_copy (insn);
++}
++
++/* Initialize the GCC target structure. */
++#undef TARGET_ASM_ALIGNED_HI_OP
++#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
++#undef TARGET_ASM_ALIGNED_SI_OP
++#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
++#undef TARGET_ASM_ALIGNED_DI_OP
++#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
++
++#undef TARGET_OPTION_OVERRIDE
++#define TARGET_OPTION_OVERRIDE riscv_option_override
++
++#undef TARGET_LEGITIMIZE_ADDRESS
++#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
++
++#undef TARGET_SCHED_ISSUE_RATE
++#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
++
++#undef TARGET_FUNCTION_OK_FOR_SIBCALL
++#define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
++
++#undef TARGET_REGISTER_MOVE_COST
++#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
++#undef TARGET_MEMORY_MOVE_COST
++#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
++#undef TARGET_RTX_COSTS
++#define TARGET_RTX_COSTS riscv_rtx_costs
++#undef TARGET_ADDRESS_COST
++#define TARGET_ADDRESS_COST riscv_address_cost
++
++#undef TARGET_PREFERRED_RELOAD_CLASS
++#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
++
++#undef TARGET_ASM_FILE_START
++#define TARGET_ASM_FILE_START riscv_file_start
++#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
++#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
++
++#undef TARGET_EXPAND_BUILTIN_VA_START
++#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
++
++#undef TARGET_PROMOTE_FUNCTION_MODE
++#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
++
++#undef TARGET_RETURN_IN_MEMORY
++#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
++
++#undef TARGET_ASM_OUTPUT_MI_THUNK
++#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
++
++#undef TARGET_PRINT_OPERAND
++#define TARGET_PRINT_OPERAND riscv_print_operand
++#undef TARGET_PRINT_OPERAND_ADDRESS
++#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
++
++#undef TARGET_SETUP_INCOMING_VARARGS
++#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
++#undef TARGET_STRICT_ARGUMENT_NAMING
++#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
++#undef TARGET_MUST_PASS_IN_STACK
++#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
++#undef TARGET_PASS_BY_REFERENCE
++#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
++#undef TARGET_ARG_PARTIAL_BYTES
++#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
++#undef TARGET_FUNCTION_ARG
++#define TARGET_FUNCTION_ARG riscv_function_arg
++#undef TARGET_FUNCTION_ARG_ADVANCE
++#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
++#undef TARGET_FUNCTION_ARG_BOUNDARY
++#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
++
++#undef TARGET_MODE_REP_EXTENDED
++#define TARGET_MODE_REP_EXTENDED riscv_mode_rep_extended
++
++#undef TARGET_SCALAR_MODE_SUPPORTED_P
++#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
++
++#undef TARGET_INIT_BUILTINS
++#define TARGET_INIT_BUILTINS riscv_init_builtins
++#undef TARGET_BUILTIN_DECL
++#define TARGET_BUILTIN_DECL riscv_builtin_decl
++#undef TARGET_EXPAND_BUILTIN
++#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
++
++#undef TARGET_HAVE_TLS
++#define TARGET_HAVE_TLS HAVE_AS_TLS
++
++#undef TARGET_CANNOT_FORCE_CONST_MEM
++#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
++
++#undef TARGET_LEGITIMATE_CONSTANT_P
++#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
++
++#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
++#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
++
++#ifdef HAVE_AS_DTPRELWORD
++#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
++#define TARGET_ASM_OUTPUT_DWARF_DTPREL riscv_output_dwarf_dtprel
++#endif
++
++#undef TARGET_LEGITIMATE_ADDRESS_P
++#define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
++
++#undef TARGET_CAN_ELIMINATE
++#define TARGET_CAN_ELIMINATE riscv_can_eliminate
++
++#undef TARGET_CONDITIONAL_REGISTER_USAGE
++#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
++
++#undef TARGET_CLASS_MAX_NREGS
++#define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
++
++#undef TARGET_TRAMPOLINE_INIT
++#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
++
++#undef TARGET_IN_SMALL_DATA_P
++#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
++
++#undef TARGET_ASM_SELECT_RTX_SECTION
++#define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
++
++#undef TARGET_MIN_ANCHOR_OFFSET
++#define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
++
++#undef TARGET_MAX_ANCHOR_OFFSET
++#define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
++
++#undef TARGET_LRA_P
++#define TARGET_LRA_P hook_bool_void_true
++
++#undef TARGET_REGISTER_PRIORITY
++#define TARGET_REGISTER_PRIORITY riscv_register_priority
++
++#undef TARGET_CANNOT_COPY_INSN_P
++#define TARGET_CANNOT_COPY_INSN_P riscv_cannot_copy_insn_p
++
++struct gcc_target targetm = TARGET_INITIALIZER;
++
++#include "gt-riscv.h"
+diff --git original-gcc/gcc/config/riscv/riscv.h gcc-6.2.0/gcc/config/riscv/riscv.h
+new file mode 100644
+index 0000000..51b4a6a
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv.h
+@@ -0,0 +1,1092 @@
++/* Definition of RISC-V target for GNU compiler.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/ >. */
++
++#ifndef GCC_RISCV_H
++#define GCC_RISCV_H
++
++#include "config/riscv/riscv-opts.h"
++
++/* Target CPU builtins. */
++#define TARGET_CPU_CPP_BUILTINS() \
++ do \
++ { \
++ builtin_assert ("machine=riscv"); \
++ builtin_assert ("cpu=riscv"); \
++ builtin_define ("__riscv__"); \
++ builtin_define ("__riscv"); \
++ \
++ if (TARGET_64BIT) \
++ builtin_define ("__riscv64"); \
++ else \
++ builtin_define ("__riscv32"); \
++ \
++ builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE); \
++ builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE); \
++ builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE); \
++ \
++ if (TARGET_RVC) \
++ builtin_define ("__riscv_compressed"); \
++ \
++ if (TARGET_ATOMIC) \
++ builtin_define ("__riscv_atomic"); \
++ \
++ if (TARGET_MUL) \
++ builtin_define ("__riscv_mul"); \
++ if (TARGET_DIV) \
++ builtin_define ("__riscv_div"); \
++ if (TARGET_DIV && TARGET_MUL) \
++ builtin_define ("__riscv_muldiv"); \
++ \
++ builtin_define_with_int_value ("__riscv_xlen", \
++ UNITS_PER_WORD * 8); \
++ if (TARGET_HARD_FLOAT) \
++ builtin_define_with_int_value ("__riscv_flen", \
++ UNITS_PER_FP_REG * 8); \
++ \
++ if (TARGET_HARD_FLOAT && TARGET_FDIV) \
++ { \
++ builtin_define ("__riscv_fdiv"); \
++ builtin_define ("__riscv_fsqrt"); \
++ } \
++ \
++ switch (riscv_float_abi) \
++ { \
++ case FLOAT_ABI_SOFT: \
++ builtin_define ("__riscv_float_abi_soft"); \
++ break; \
++ \
++ case FLOAT_ABI_SINGLE: \
++ builtin_define ("__riscv_float_abi_single"); \
++ break; \
++ \
++ case FLOAT_ABI_DOUBLE: \
++ builtin_define ("__riscv_float_abi_double"); \
++ break; \
++ } \
++ \
++ /* The base RISC-V ISA is always little-endian. */ \
++ builtin_define_std ("RISCVEL"); \
++ \
++ if (riscv_cmodel == CM_MEDANY) \
++ builtin_define ("_RISCV_CMODEL_MEDANY"); \
++ } \
++ while (0)
++
++/* Default target_flags if no switches are specified */
++
++#ifndef TARGET_DEFAULT
++#define TARGET_DEFAULT 0
++#endif
++
++#ifndef RISCV_TUNE_STRING_DEFAULT
++#define RISCV_TUNE_STRING_DEFAULT "rocket"
++#endif
++
++#if TARGET_64BIT_DEFAULT
++# define MULTILIB_ARCH_DEFAULT "m64"
++# define OPT_ARCH64 "!m32"
++# define OPT_ARCH32 "m32"
++#else
++# define MULTILIB_ARCH_DEFAULT "m32"
++# define OPT_ARCH64 "m64"
++# define OPT_ARCH32 "!m64"
++#endif
++
++#ifndef MULTILIB_DEFAULTS
++#define MULTILIB_DEFAULTS \
++ { MULTILIB_ARCH_DEFAULT }
++#endif
++
++
++/* Support for a compile-time default CPU, et cetera. The rules are:
++ --with-tune is ignored if -mtune is specified.
++ --with-float is ignored if -mfloat-abi is specified. */
++#define OPTION_DEFAULT_SPECS \
++ {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \
++ {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \
++ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
++ {"float", "%{!mfloat-abi=*:%{!mno-float:-mfloat-abi=%(VALUE)}}"}, \
++
++#define DRIVER_SELF_SPECS ""
++
++#ifdef IN_LIBGCC2
++#undef TARGET_64BIT
++/* Make this compile time constant for libgcc2 */
++#ifdef __riscv64
++#define TARGET_64BIT 1
++#else
++#define TARGET_64BIT 0
++#endif
++#endif /* IN_LIBGCC2 */
++
++/* Tell collect what flags to pass to nm. */
++#ifndef NM_FLAGS
++#define NM_FLAGS "-Bn"
++#endif
++
++#undef ASM_SPEC
++#define ASM_SPEC "\
++%(subtarget_asm_debugging_spec) \
++%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \
++%{mrvc} %{mno-rvc} \
++%{fPIC|fpic|fPIE|fpie:-fpic} \
++%{march=*} \
++%{mfloat-abi=*} \
++%{mno-float:-mfloat-abi=soft} \
++%(subtarget_asm_spec)"
++
++/* Extra switches sometimes passed to the linker. */
++
++#ifndef LINK_SPEC
++#define LINK_SPEC "\
++%{!T:-dT riscv.ld} \
++%{m64:-melf64lriscv} \
++%{m32:-melf32lriscv} \
++%{shared}"
++#endif /* LINK_SPEC defined */
++
++/* This macro defines names of additional specifications to put in the specs
++ that can be used in various specifications like CC1_SPEC. Its definition
++ is an initializer with a subgrouping for each command option.
++
++ Each subgrouping contains a string constant, that defines the
++ specification name, and a string constant that used by the GCC driver
++ program.
++
++ Do not define this macro if it does not need to do anything. */
++
++#define EXTRA_SPECS \
++ { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT }, \
++ SUBTARGET_EXTRA_SPECS
++
++#ifndef SUBTARGET_EXTRA_SPECS
++#define SUBTARGET_EXTRA_SPECS
++#endif
++
++#define TARGET_DEFAULT_CMODEL CM_MEDLOW
++
++/* By default, turn on GDB extensions. */
++#define DEFAULT_GDB_EXTENSIONS 1
++
++#define LOCAL_LABEL_PREFIX "."
++#define USER_LABEL_PREFIX ""
++
++#define DWARF2_DEBUGGING_INFO 1
++#define DWARF2_ASM_LINE_DEBUG_INFO 0
++
++/* The mapping from gcc register number to DWARF 2 CFA column number. */
++#define DWARF_FRAME_REGNUM(REGNO) \
++ (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
++
++/* The DWARF 2 CFA column which tracks the return address. */
++#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
++
++/* Don't emit .cfi_sections, as it does not work */
++#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
++#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0
++
++/* Before the prologue, RA lives in r31. */
++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
++
++/* Describe how we implement __builtin_eh_return. */
++#define EH_RETURN_DATA_REGNO(N) \
++ ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
++
++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
++
++/* Target machine storage layout */
++
++#define BITS_BIG_ENDIAN 0
++#define BYTES_BIG_ENDIAN 0
++#define WORDS_BIG_ENDIAN 0
++
++#define MAX_BITS_PER_WORD 64
++
++/* Width of a word, in units (bytes). */
++#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
++#ifndef IN_LIBGCC2
++#define MIN_UNITS_PER_WORD 4
++#endif
++
++/* The `Q' extension is not yet supported. */
++#define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4)
++
++/* The largest size of value that can be held in floating-point
++ registers and moved with a single instruction. */
++#define UNITS_PER_FP_ARG \
++ (riscv_float_abi == FLOAT_ABI_SOFT ? 0 : \
++ riscv_float_abi == FLOAT_ABI_SINGLE ? 4 : 8)
++
++/* The number of bytes in a double. */
++#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
++
++/* Set the sizes of the core types. */
++#define SHORT_TYPE_SIZE 16
++#define INT_TYPE_SIZE 32
++#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32)
++#define LONG_LONG_TYPE_SIZE 64
++
++#define FLOAT_TYPE_SIZE 32
++#define DOUBLE_TYPE_SIZE 64
++#define LONG_DOUBLE_TYPE_SIZE (TARGET_64BIT ? 128 : 64)
++
++#ifdef IN_LIBGCC2
++# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
++#endif
++
++/* Allocation boundary (in *bits*) for storing arguments in argument list. */
++#define PARM_BOUNDARY BITS_PER_WORD
++
++/* Allocation boundary (in *bits*) for the code of a function. */
++#define FUNCTION_BOUNDARY (TARGET_RVC ? 16 : 32)
++
++/* There is no point aligning anything to a rounder boundary than this. */
++#define BIGGEST_ALIGNMENT 128
++
++/* All accesses must be aligned. */
++#define STRICT_ALIGNMENT 1
++
++/* Define this if you wish to imitate the way many other C compilers
++ handle alignment of bitfields and the structures that contain
++ them.
++
++ The behavior is that the type written for a bit-field (`int',
++ `short', or other integer type) imposes an alignment for the
++ entire structure, as if the structure really did contain an
++ ordinary field of that type. In addition, the bit-field is placed
++ within the structure so that it would fit within such a field,
++ not crossing a boundary for it.
++
++ Thus, on most machines, a bit-field whose type is written as `int'
++ would not cross a four-byte boundary, and would force four-byte
++ alignment for the whole structure. (The alignment used may not
++ be four bytes; it is controlled by the other alignment
++ parameters.)
++
++ If the macro is defined, its definition should be a C expression;
++ a nonzero value for the expression enables this behavior. */
++
++#define PCC_BITFIELD_TYPE_MATTERS 1
++
++/* If defined, a C expression to compute the alignment given to a
++ constant that is being placed in memory. CONSTANT is the constant
++ and ALIGN is the alignment that the object would ordinarily have.
++ The value of this macro is used instead of that alignment to align
++ the object.
++
++ If this macro is not defined, then ALIGN is used.
++
++ The typical use of this macro is to increase alignment for string
++ constants to be word aligned so that `strcpy' calls that copy
++ constants can be done inline. */
++
++#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
++ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
++ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
++
++/* If defined, a C expression to compute the alignment for a static
++ variable. TYPE is the data type, and ALIGN is the alignment that
++ the object would ordinarily have. The value of this macro is used
++ instead of that alignment to align the object.
++
++ If this macro is not defined, then ALIGN is used.
++
++ One use of this macro is to increase alignment of medium-size
++ data to make it all fit in fewer cache lines. Another is to
++ cause character arrays to be word-aligned so that `strcpy' calls
++ that copy constants to character arrays can be done inline. */
++
++#undef DATA_ALIGNMENT
++#define DATA_ALIGNMENT(TYPE, ALIGN) \
++ ((((ALIGN) < BITS_PER_WORD) \
++ && (TREE_CODE (TYPE) == ARRAY_TYPE \
++ || TREE_CODE (TYPE) == UNION_TYPE \
++ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
++
++/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
++ character arrays to be word-aligned so that `strcpy' calls that copy
++ constants to character arrays can be done inline, and 'strcmp' can be
++ optimised to use word loads. */
++#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
++ DATA_ALIGNMENT (TYPE, ALIGN)
++
++/* Define if operations between registers always perform the operation
++ on the full register even if a narrower mode is specified. */
++#define WORD_REGISTER_OPERATIONS 1
++
++/* When in 64-bit mode, move insns will sign extend SImode and CCmode
++ moves. All other references are zero extended. */
++#define LOAD_EXTEND_OP(MODE) \
++ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
++ ? SIGN_EXTEND : ZERO_EXTEND)
++
++/* Define this macro if it is advisable to hold scalars in registers
++ in a wider mode than that declared by the program. In such cases,
++ the value is constrained to be within the bounds of the declared
++ type, but kept valid in the wider mode. The signedness of the
++ extension may differ from that of the type. */
++
++#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
++ if (GET_MODE_CLASS (MODE) == MODE_INT \
++ && GET_MODE_SIZE (MODE) < 4) \
++ { \
++ (MODE) = Pmode; \
++ }
++
++/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
++ Extensions of pointers to word_mode must be signed. */
++#define POINTERS_EXTEND_UNSIGNED false
++
++/* When floating-point registers are wider than integer ones, moves between
++ them must go through memory. */
++#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
++ (GET_MODE_SIZE (MODE) > UNITS_PER_WORD \
++ && ((CLASS1) == FP_REGS) != ((CLASS2) == FP_REGS))
++
++/* Define if loading short immediate values into registers sign extends. */
++#define SHORT_IMMEDIATES_SIGN_EXTEND 1
++
++/* Standard register usage. */
++
++/* Number of hardware registers. We have:
++
++ - 32 integer registers
++ - 32 floating point registers
++ - 2 fake registers:
++ - ARG_POINTER_REGNUM
++ - FRAME_POINTER_REGNUM */
++
++#define FIRST_PSEUDO_REGISTER 66
++
++/* x0, sp, gp, and tp are fixed. */
++
++#define FIXED_REGISTERS \
++{ /* General registers. */ \
++ 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ /* Floating-point registers. */ \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ /* Others. */ \
++ 1, 1 \
++}
++
++
++/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
++ The call RTLs themselves clobber ra. */
++
++#define CALL_USED_REGISTERS \
++{ /* General registers. */ \
++ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Floating-point registers. */ \
++ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Others. */ \
++ 1, 1 \
++}
++
++#define CALL_REALLY_USED_REGISTERS \
++{ /* General registers. */ \
++ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Floating-point registers. */ \
++ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Others. */ \
++ 1, 1 \
++}
++
++/* Internal macros to classify an ISA register's type. */
++
++#define GP_REG_FIRST 0
++#define GP_REG_LAST 31
++#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
++
++#define FP_REG_FIRST 32
++#define FP_REG_LAST 63
++#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
++
++/* The DWARF 2 CFA column which tracks the return address from a
++ signal handler context. This means that to maintain backwards
++ compatibility, no hard register can be assigned this column if it
++ would need to be handled by the DWARF unwinder. */
++#define DWARF_ALT_FRAME_RETURN_COLUMN 64
++
++#define GP_REG_P(REGNO) \
++ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
++#define FP_REG_P(REGNO) \
++ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
++
++#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
++
++/* Return coprocessor number from register number. */
++
++#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) \
++ (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2' \
++ : COP3_REG_P (REGNO) ? '3' : '?')
++
++
++#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
++
++#define HARD_REGNO_MODE_OK(REGNO, MODE) \
++ riscv_hard_regno_mode_ok_p (REGNO, MODE)
++
++#define MODES_TIEABLE_P(MODE1, MODE2) \
++ ((MODE1) == (MODE2) || (GET_MODE_CLASS (MODE1) == MODE_INT \
++ && GET_MODE_CLASS (MODE2) == MODE_INT))
++
++/* Use s0 as the frame pointer if it is so requested. */
++#define HARD_FRAME_POINTER_REGNUM 8
++#define STACK_POINTER_REGNUM 2
++#define THREAD_POINTER_REGNUM 4
++
++/* These two registers don't really exist: they get eliminated to either
++ the stack or hard frame pointer. */
++#define ARG_POINTER_REGNUM 64
++#define FRAME_POINTER_REGNUM 65
++
++#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
++#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
++
++/* Register in which static-chain is passed to a function. */
++#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
++
++/* Registers used as temporaries in prologue/epilogue code.
++
++ The prologue registers mustn't conflict with any
++ incoming arguments, the static chain pointer, or the frame pointer.
++ The epilogue temporary mustn't conflict with the return registers,
++ the frame pointer, the EH stack adjustment, or the EH data registers. */
++
++#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
++#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
++
++#define MCOUNT_NAME "_mcount"
++
++#define NO_PROFILE_COUNTERS 1
++
++/* Emit rtl for profiling. Output assembler code to FILE
++ to call "_mcount" for profiling a function entry. */
++#define PROFILE_HOOK(LABEL) \
++ { \
++ rtx fun, ra; \
++ ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); \
++ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \
++ emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, ra, Pmode); \
++ }
++
++/* All the work done in PROFILE_HOOK, but still required. */
++#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0)
++
++/* Define this macro if it is as good or better to call a constant
++ function address than to call an address kept in a register. */
++#define NO_FUNCTION_CSE 1
++
++/* Define the classes of registers for register constraints in the
++ machine description. Also define ranges of constants.
++
++ One of the classes must always be named ALL_REGS and include all hard regs.
++ If there is more than one class, another class must be named NO_REGS
++ and contain no registers.
++
++ The name GENERAL_REGS must be the name of a class (or an alias for
++ another name such as ALL_REGS). This is the class of registers
++ that is allowed by "g" or "r" in a register constraint.
++ Also, registers outside this class are allocated only when
++ instructions express preferences for them.
++
++ The classes must be numbered in nondecreasing order; that is,
++ a larger-numbered class must never be contained completely
++ in a smaller-numbered class.
++
++ For any two classes, it is very desirable that there be another
++ class that represents their union. */
++
++enum reg_class
++{
++ NO_REGS, /* no registers in set */
++ T_REGS, /* registers used by indirect sibcalls */
++ JALR_REGS, /* registers used by indirect calls */
++ GR_REGS, /* integer registers */
++ FP_REGS, /* floating point registers */
++ FRAME_REGS, /* $arg and $frame */
++ ALL_REGS, /* all registers */
++ LIM_REG_CLASSES /* max value + 1 */
++};
++
++#define N_REG_CLASSES (int) LIM_REG_CLASSES
++
++#define GENERAL_REGS GR_REGS
++
++/* An initializer containing the names of the register classes as C
++ string constants. These names are used in writing some of the
++ debugging dumps. */
++
++#define REG_CLASS_NAMES \
++{ \
++ "NO_REGS", \
++ "T_REGS", \
++ "JALR_REGS", \
++ "GR_REGS", \
++ "FP_REGS", \
++ "FRAME_REGS", \
++ "ALL_REGS" \
++}
++
++/* An initializer containing the contents of the register classes,
++ as integers which are bit masks. The Nth integer specifies the
++ contents of class N. The way the integer MASK is interpreted is
++ that register R is in the class if `MASK & (1 << R)' is 1.
++
++ When the machine has more than 32 registers, an integer does not
++ suffice. Then the integers are replaced by sub-initializers,
++ braced groupings containing several integers. Each
++ sub-initializer must be suitable as an initializer for the type
++ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
++
++#define REG_CLASS_CONTENTS \
++{ \
++ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
++ { 0xf0000040, 0x00000000, 0x00000000 }, /* T_REGS */ \
++ { 0xffffff40, 0x00000000, 0x00000000 }, /* JALR_REGS */ \
++ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \
++ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
++ { 0x00000000, 0x00000000, 0x00000003 }, /* FRAME_REGS */ \
++ { 0xffffffff, 0xffffffff, 0x00000003 } /* ALL_REGS */ \
++}
++
++/* A C expression whose value is a register class containing hard
++ register REGNO. In general there is more that one such class;
++ choose a class which is "minimal", meaning that no smaller class
++ also contains the register. */
++
++#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
++
++/* A macro whose definition is the name of the class to which a
++ valid base register must belong. A base register is one used in
++ an address which is the register value plus a displacement. */
++
++#define BASE_REG_CLASS GR_REGS
++
++/* A macro whose definition is the name of the class to which a
++ valid index register must belong. An index register is one used
++ in an address where its value is either multiplied by a scale
++ factor or added to another register (as well as added to a
++ displacement). */
++
++#define INDEX_REG_CLASS NO_REGS
++
++/* We generally want to put call-clobbered registers ahead of
++ call-saved ones. (IRA expects this.) */
++
++#define REG_ALLOC_ORDER \
++{ \
++ /* Call-clobbered GPRs. */ \
++ 15, 14, 13, 12, 11, 10, 16, 17, 6, 28, 29, 30, 31, 5, 7, 1, \
++ /* Call-saved GPRs. */ \
++ 8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \
++ /* GPRs that can never be exposed to the register allocator. */ \
++ 0, 2, 3, 4, \
++ /* Call-clobbered FPRs. */ \
++ 47, 46, 45, 44, 43, 42, 32, 33, 34, 35, 36, 37, 38, 39, 48, 49, \
++ 60, 61, 62, 63, \
++ /* Call-saved FPRs. */ \
++ 40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
++ /* None of the remaining classes have defined call-saved \
++ registers. */ \
++ 64, 65 \
++}
++
++/* True if VALUE is a signed 12-bit number. */
++
++#define SMALL_OPERAND(VALUE) \
++ ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH/2 < IMM_REACH)
++
++/* True if VALUE can be loaded into a register using LUI. */
++
++#define LUI_OPERAND(VALUE) \
++ (((VALUE) | ((1UL<<31) - IMM_REACH)) == ((1UL<<31) - IMM_REACH) \
++ || ((VALUE) | ((1UL<<31) - IMM_REACH)) + IMM_REACH == 0)
++
++#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
++ reg_classes_intersect_p (FP_REGS, CLASS)
++
++/* Stack layout; function entry, exit and calling. */
++
++#define STACK_GROWS_DOWNWARD 1
++
++#define FRAME_GROWS_DOWNWARD 1
++
++#define STARTING_FRAME_OFFSET 0
++
++#define RETURN_ADDR_RTX riscv_return_addr
++
++#define ELIMINABLE_REGS \
++{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
++ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
++
++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
++ (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
++
++/* Allocate stack space for arguments at the beginning of each function. */
++#define ACCUMULATE_OUTGOING_ARGS 1
++
++/* The argument pointer always points to the first argument. */
++#define FIRST_PARM_OFFSET(FNDECL) 0
++
++#define REG_PARM_STACK_SPACE(FNDECL) 0
++
++/* Define this if it is the responsibility of the caller to
++ allocate the area reserved for arguments passed in registers.
++ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
++ of this macro is to determine whether the space is included in
++ `crtl->outgoing_args_size'. */
++#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
++
++#define STACK_BOUNDARY 128
++
++/* Symbolic macros for the registers used to return integer and floating
++ point values. */
++
++#define GP_RETURN GP_ARG_FIRST
++#define FP_RETURN \
++ (riscv_float_abi == FLOAT_ABI_SOFT ? GP_RETURN : FP_ARG_FIRST)
++
++#define MAX_ARGS_IN_REGISTERS 8
++
++/* Symbolic macros for the first/last argument registers. */
++
++#define GP_ARG_FIRST (GP_REG_FIRST + 10)
++#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
++#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
++#define FP_ARG_FIRST (FP_REG_FIRST + 10)
++#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
++
++#define CALLEE_SAVED_REG_NUMBER(REGNO) \
++ ((REGNO) >= 8 && (REGNO) <= 9 ? (REGNO) - 8 : \
++ (REGNO) >= 18 && (REGNO) <= 27 ? (REGNO) - 16 : -1)
++
++#define LIBCALL_VALUE(MODE) \
++ riscv_function_value (NULL_TREE, NULL_TREE, MODE)
++
++#define FUNCTION_VALUE(VALTYPE, FUNC) \
++ riscv_function_value (VALTYPE, FUNC, VOIDmode)
++
++#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
++
++/* 1 if N is a possible register number for function argument passing.
++ We have no FP argument registers when soft-float. When FP registers
++ are 32 bits, we can't directly reference the odd numbered ones. */
++
++/* Accept arguments in a0-a7, and in fa0-fa7 if permitted by the ABI. */
++#define FUNCTION_ARG_REGNO_P(N) \
++ (IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST) \
++ || (riscv_float_abi != FLOAT_ABI_SOFT \
++ && IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST)))
++
++/* The ABI views the arguments as a structure, of which the first 8
++ words go in registers and the rest go on the stack. If I < 8, N, the Ith
++ word might go in the Ith integer argument register or the Ith
++ floating-point argument register. */
++
++typedef struct {
++ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
++ unsigned int num_gprs;
++
++ /* Number of words passed on the stack. */
++ unsigned int stack_words;
++} CUMULATIVE_ARGS;
++
++/* Initialize a variable CUM of type CUMULATIVE_ARGS
++ for a call to a function whose data type is FNTYPE.
++ For a library call, FNTYPE is 0. */
++
++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
++ memset (&(CUM), 0, sizeof (CUM))
++
++#define EPILOGUE_USES(REGNO) ((REGNO) == RETURN_ADDR_REGNUM)
++
++/* ABI requires 16-byte alignment, even on ven on RV32. */
++#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
++
++/* Define this macro if the code for function profiling should come
++ before the function prologue. Normally, the profiling code comes
++ after. */
++
++/* #define PROFILE_BEFORE_PROLOGUE */
++
++/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
++ the stack pointer does not matter. The value is tested only in
++ functions that have frame pointers.
++ No definition is equivalent to always zero. */
++
++#define EXIT_IGNORE_STACK 1
++
++
++/* Trampolines are a block of code followed by two pointers. */
++
++#define TRAMPOLINE_CODE_SIZE 16
++#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
++#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
++
++/* Addressing modes, and classification of registers for them. */
++
++#define REGNO_OK_FOR_INDEX_P(REGNO) 0
++#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
++
++/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
++ and check its validity for a certain class.
++ We have two alternate definitions for each of them.
++ The usual definition accepts all pseudo regs; the other rejects them all.
++ The symbol REG_OK_STRICT causes the latter definition to be used.
++
++ Most source files want to accept pseudo regs in the hope that
++ they will get allocated to the class that the insn wants them to be in.
++ Some source files that are used after register allocation
++ need to be strict. */
++
++#ifndef REG_OK_STRICT
++#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
++#else
++#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
++#endif
++
++#define REG_OK_FOR_INDEX_P(X) 0
++
++
++/* Maximum number of registers that can appear in a valid memory address. */
++
++#define MAX_REGS_PER_ADDRESS 1
++
++#define CONSTANT_ADDRESS_P(X) \
++ (CONSTANT_P (X) && memory_address_p (SImode, X))
++
++/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
++ 'the start of the function that this code is output in'. */
++
++#define ASM_OUTPUT_LABELREF(FILE,NAME) \
++ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
++ asm_fprintf ((FILE), "%U%s", \
++ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
++ else \
++ asm_fprintf ((FILE), "%U%s", (NAME))
++
++/* This flag marks functions that cannot be lazily bound. */
++#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
++#define SYMBOL_REF_BIND_NOW_P(RTX) \
++ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
++
++#define JUMP_TABLES_IN_TEXT_SECTION 0
++#define CASE_VECTOR_MODE SImode
++#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
++
++/* The load-address macro is used for PC-relative addressing of symbols
++ that bind locally. Don't use it for symbols that should be addressed
++ via the GOT. Also, avoid it for CM_MEDLOW, where LUI addressing
++ currently results in more opportunities for linker relaxation. */
++#define USE_LOAD_ADDRESS_MACRO(sym) \
++ (!TARGET_EXPLICIT_RELOCS && \
++ ((flag_pic \
++ && ((SYMBOL_REF_P (sym) && SYMBOL_REF_LOCAL_P (sym)) \
++ || ((GET_CODE (sym) == CONST) \
++ && SYMBOL_REF_P (XEXP (XEXP (sym, 0),0)) \
++ && SYMBOL_REF_LOCAL_P (XEXP (XEXP (sym, 0),0))))) \
++ || riscv_cmodel == CM_MEDANY))
++
++/* Define this as 1 if `char' should by default be signed; else as 0. */
++#define DEFAULT_SIGNED_CHAR 0
++
++/* Consider using fld/fsd to move 8 bytes at a time for RV32IFD. */
++#define MOVE_MAX UNITS_PER_WORD
++#define MAX_MOVE_MAX 8
++
++#define SLOW_BYTE_ACCESS 0
++
++#define SHIFT_COUNT_TRUNCATED 1
++
++/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
++ is done just by pretending it is already truncated. */
++#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
++ (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
++
++/* Specify the machine mode that pointers have.
++ After generation of rtl, the compiler makes no further distinction
++ between pointers and any other objects of this machine mode. */
++
++#ifndef Pmode
++#define Pmode (TARGET_64BIT ? DImode : SImode)
++#endif
++
++/* Give call MEMs SImode since it is the "most permissive" mode
++ for both 32-bit and 64-bit targets. */
++
++#define FUNCTION_MODE SImode
++
++/* A C expression for the cost of a branch instruction. A value of 2
++ seems to minimize code size. */
++
++#define BRANCH_COST(speed_p, predictable_p) \
++ ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
++
++#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
++
++/* Control the assembler format that we output. */
++
++/* Output to assembler file text saying following lines
++ may contain character constants, extra white space, comments, etc. */
++
++#ifndef ASM_APP_ON
++#define ASM_APP_ON " #APP\n"
++#endif
++
++/* Output to assembler file text saying following lines
++ no longer contain unusual constructs. */
++
++#ifndef ASM_APP_OFF
++#define ASM_APP_OFF " #NO_APP\n"
++#endif
++
++#define REGISTER_NAMES \
++{ "zero","ra", "sp", "gp", "tp", "t0", "t1", "t2", \
++ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", \
++ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", \
++ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", \
++ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", \
++ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", \
++ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", \
++ "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11", \
++ "arg", "frame", }
++
++#define ADDITIONAL_REGISTER_NAMES \
++{ \
++ { "x0", 0 + GP_REG_FIRST }, \
++ { "x1", 1 + GP_REG_FIRST }, \
++ { "x2", 2 + GP_REG_FIRST }, \
++ { "x3", 3 + GP_REG_FIRST }, \
++ { "x4", 4 + GP_REG_FIRST }, \
++ { "x5", 5 + GP_REG_FIRST }, \
++ { "x6", 6 + GP_REG_FIRST }, \
++ { "x7", 7 + GP_REG_FIRST }, \
++ { "x8", 8 + GP_REG_FIRST }, \
++ { "x9", 9 + GP_REG_FIRST }, \
++ { "x10", 10 + GP_REG_FIRST }, \
++ { "x11", 11 + GP_REG_FIRST }, \
++ { "x12", 12 + GP_REG_FIRST }, \
++ { "x13", 13 + GP_REG_FIRST }, \
++ { "x14", 14 + GP_REG_FIRST }, \
++ { "x15", 15 + GP_REG_FIRST }, \
++ { "x16", 16 + GP_REG_FIRST }, \
++ { "x17", 17 + GP_REG_FIRST }, \
++ { "x18", 18 + GP_REG_FIRST }, \
++ { "x19", 19 + GP_REG_FIRST }, \
++ { "x20", 20 + GP_REG_FIRST }, \
++ { "x21", 21 + GP_REG_FIRST }, \
++ { "x22", 22 + GP_REG_FIRST }, \
++ { "x23", 23 + GP_REG_FIRST }, \
++ { "x24", 24 + GP_REG_FIRST }, \
++ { "x25", 25 + GP_REG_FIRST }, \
++ { "x26", 26 + GP_REG_FIRST }, \
++ { "x27", 27 + GP_REG_FIRST }, \
++ { "x28", 28 + GP_REG_FIRST }, \
++ { "x29", 29 + GP_REG_FIRST }, \
++ { "x30", 30 + GP_REG_FIRST }, \
++ { "x31", 31 + GP_REG_FIRST }, \
++ { "f0", 0 + FP_REG_FIRST }, \
++ { "f1", 1 + FP_REG_FIRST }, \
++ { "f2", 2 + FP_REG_FIRST }, \
++ { "f3", 3 + FP_REG_FIRST }, \
++ { "f4", 4 + FP_REG_FIRST }, \
++ { "f5", 5 + FP_REG_FIRST }, \
++ { "f6", 6 + FP_REG_FIRST }, \
++ { "f7", 7 + FP_REG_FIRST }, \
++ { "f8", 8 + FP_REG_FIRST }, \
++ { "f9", 9 + FP_REG_FIRST }, \
++ { "f10", 10 + FP_REG_FIRST }, \
++ { "f11", 11 + FP_REG_FIRST }, \
++ { "f12", 12 + FP_REG_FIRST }, \
++ { "f13", 13 + FP_REG_FIRST }, \
++ { "f14", 14 + FP_REG_FIRST }, \
++ { "f15", 15 + FP_REG_FIRST }, \
++ { "f16", 16 + FP_REG_FIRST }, \
++ { "f17", 17 + FP_REG_FIRST }, \
++ { "f18", 18 + FP_REG_FIRST }, \
++ { "f19", 19 + FP_REG_FIRST }, \
++ { "f20", 20 + FP_REG_FIRST }, \
++ { "f21", 21 + FP_REG_FIRST }, \
++ { "f22", 22 + FP_REG_FIRST }, \
++ { "f23", 23 + FP_REG_FIRST }, \
++ { "f24", 24 + FP_REG_FIRST }, \
++ { "f25", 25 + FP_REG_FIRST }, \
++ { "f26", 26 + FP_REG_FIRST }, \
++ { "f27", 27 + FP_REG_FIRST }, \
++ { "f28", 28 + FP_REG_FIRST }, \
++ { "f29", 29 + FP_REG_FIRST }, \
++ { "f30", 30 + FP_REG_FIRST }, \
++ { "f31", 31 + FP_REG_FIRST }, \
++}
++
++/* Globalizing directive for a label. */
++#define GLOBAL_ASM_OP "\t.globl\t"
++
++/* This is how to store into the string LABEL
++ the symbol_ref name of an internal numbered label where
++ PREFIX is the class of label and NUM is the number within the class.
++ This is suitable for output with `assemble_name'. */
++
++#undef ASM_GENERATE_INTERNAL_LABEL
++#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
++ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
++
++/* This is how to output an element of a case-vector that is absolute. */
++
++#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
++ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
++
++/* This is how to output an element of a PIC case-vector. */
++
++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
++ fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n", \
++ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
++
++/* This is how to output an assembler line
++ that says to advance the location counter
++ to a multiple of 2**LOG bytes. */
++
++#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
++ fprintf (STREAM, "\t.align\t%d\n", (LOG))
++
++/* Define the strings to put out for each section in the object file. */
++#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
++#define DATA_SECTION_ASM_OP "\t.data" /* large data */
++#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
++#define BSS_SECTION_ASM_OP "\t.bss"
++#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\",@nobits"
++#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\",@progbits"
++
++#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
++do \
++ { \
++ fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM], \
++ TARGET_64BIT ? "sd" : "sw", \
++ reg_names[REGNO], \
++ reg_names[STACK_POINTER_REGNUM]); \
++ } \
++while (0)
++
++#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
++do \
++ { \
++ fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n", \
++ TARGET_64BIT ? "ld" : "lw", \
++ reg_names[REGNO], \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM]); \
++ } \
++while (0)
++
++#define ASM_COMMENT_START "#"
++
++#undef SIZE_TYPE
++#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
++
++#undef PTRDIFF_TYPE
++#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
++
++/* The maximum number of bytes that can be copied by one iteration of
++ a movmemsi loop; see riscv_block_move_loop. */
++#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER 32
++
++/* The maximum number of bytes that can be copied by a straight-line
++ implementation of movmemsi; see riscv_block_move_straight. We want
++ to make sure that any loop-based implementation will iterate at
++ least twice. */
++#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
++
++/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
++
++#define RISCV_CALL_RATIO 6
++
++/* Any loop-based implementation of movmemsi will have at least
++ RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
++ moves, so allow individual copies of fewer elements.
++
++ When movmemsi is not available, use a value approximating
++ the length of a memcpy call sequence, so that move_by_pieces
++ will generate inline code if it is shorter than a function call.
++ Since move_by_pieces_ninsns counts memory-to-memory moves, but
++ we'll have to generate a load/store pair for each, halve the
++ value of RISCV_CALL_RATIO to take that into account. */
++
++#define MOVE_RATIO(speed) \
++ (HAVE_movmemsi \
++ ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
++ : RISCV_CALL_RATIO / 2)
++
++/* For CLEAR_RATIO, when optimizing for size, give a better estimate
++ of the length of a memset call, but use the default otherwise. */
++
++#define CLEAR_RATIO(speed)\
++ ((speed) ? 15 : RISCV_CALL_RATIO)
++
++/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
++ optimizing for size adjust the ratio to account for the overhead of
++ loading the constant and replicating it across the word. */
++
++#define SET_RATIO(speed) \
++ ((speed) ? 15 : RISCV_CALL_RATIO - 2)
++
++#ifndef HAVE_AS_TLS
++#define HAVE_AS_TLS 0
++#endif
++
++#ifndef USED_FOR_TARGET
++
++extern const enum reg_class riscv_regno_to_class[];
++extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
++#endif
++
++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
++ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
++
++/* ISA constants needed for code generation. */
++#define OPCODE_LW 0x2003
++#define OPCODE_LD 0x3003
++#define OPCODE_AUIPC 0x17
++#define OPCODE_JALR 0x67
++#define SHIFT_RD 7
++#define SHIFT_RS1 15
++#define SHIFT_IMM 20
++#define IMM_BITS 12
++
++#define IMM_REACH (1LL << IMM_BITS)
++#define CONST_HIGH_PART(VALUE) (((VALUE) + (IMM_REACH/2)) & ~(IMM_REACH-1))
++#define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE))
++
++#endif /* ! GCC_RISCV_H */
+diff --git original-gcc/gcc/config/riscv/riscv.md gcc-6.2.0/gcc/config/riscv/riscv.md
+new file mode 100644
+index 0000000..9661bb3
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv.md
+@@ -0,0 +1,2377 @@
++;; Machine description for RISC-V for GNU compiler.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/ >.
++
++(define_c_enum "unspec" [
++ ;; Floating-point moves.
++ UNSPEC_LOAD_LOW
++ UNSPEC_LOAD_HIGH
++ UNSPEC_STORE_WORD
++
++ ;; GP manipulation.
++ UNSPEC_EH_RETURN
++
++ ;; Symbolic accesses.
++ UNSPEC_ADDRESS_FIRST
++ UNSPEC_PCREL
++ UNSPEC_LOAD_GOT
++ UNSPEC_TLS
++ UNSPEC_TLS_LE
++ UNSPEC_TLS_IE
++ UNSPEC_TLS_GD
++
++ UNSPEC_AUIPC
++
++ ;; Register save and restore.
++ UNSPEC_GPR_SAVE
++ UNSPEC_GPR_RESTORE
++
++ ;; Blockage and synchronisation.
++ UNSPEC_BLOCKAGE
++ UNSPEC_FENCE
++ UNSPEC_FENCE_I
++])
++
++(define_constants
++ [(RETURN_ADDR_REGNUM 1)
++ (T0_REGNUM 5)
++ (T1_REGNUM 6)
++])
++
++(include "predicates.md")
++(include "constraints.md")
++
++;; ....................
++;;
++;; Attributes
++;;
++;; ....................
++
++(define_attr "got" "unset,xgot_high,load"
++ (const_string "unset"))
++
++;; Classification of moves, extensions and truncations. Most values
++;; are as for "type" (see below) but there are also the following
++;; move-specific values:
++;;
++;; andi a single ANDI instruction
++;; shift_shift a shift left followed by a shift right
++;;
++;; This attribute is used to determine the instruction's length and
++;; scheduling type. For doubleword moves, the attribute always describes
++;; the split instructions; in some cases, it is more appropriate for the
++;; scheduling type to be "multi" instead.
++(define_attr "move_type"
++ "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
++ const,logical,arith,andi,shift_shift"
++ (const_string "unknown"))
++
++;; Main data type used by the insn
++(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
++ (const_string "unknown"))
++
++;; True if the main data type is twice the size of a word.
++(define_attr "dword_mode" "no,yes"
++ (cond [(and (eq_attr "mode" "DI,DF")
++ (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
++ (const_string "yes")
++
++ (and (eq_attr "mode" "TI,TF")
++ (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
++ (const_string "yes")]
++ (const_string "no")))
++
++;; Classification of each insn.
++;; branch conditional branch
++;; jump unconditional jump
++;; call unconditional call
++;; load load instruction(s)
++;; fpload floating point load
++;; store store instruction(s)
++;; fpstore floating point store
++;; mtc transfer to coprocessor
++;; mfc transfer from coprocessor
++;; const load constant
++;; arith integer arithmetic instructions
++;; logical integer logical instructions
++;; shift integer shift instructions
++;; slt set less than instructions
++;; imul integer multiply
++;; idiv integer divide
++;; move integer register move (addi rd, rs1, 0)
++;; fmove floating point register move
++;; fadd floating point add/subtract
++;; fmul floating point multiply
++;; fmadd floating point multiply-add
++;; fdiv floating point divide
++;; fcmp floating point compare
++;; fcvt floating point convert
++;; fsqrt floating point square root
++;; multi multiword sequence (or user asm statements)
++;; nop no operation
++;; ghost an instruction that produces no real code
++(define_attr "type"
++ "unknown,branch,jump,call,load,fpload,store,fpstore,
++ mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
++ fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
++ (cond [(eq_attr "got" "load") (const_string "load")
++
++ ;; If a doubleword move uses these expensive instructions,
++ ;; it is usually better to schedule them in the same way
++ ;; as the singleword form, rather than as "multi".
++ (eq_attr "move_type" "load") (const_string "load")
++ (eq_attr "move_type" "fpload") (const_string "fpload")
++ (eq_attr "move_type" "store") (const_string "store")
++ (eq_attr "move_type" "fpstore") (const_string "fpstore")
++ (eq_attr "move_type" "mtc") (const_string "mtc")
++ (eq_attr "move_type" "mfc") (const_string "mfc")
++
++ ;; These types of move are always single insns.
++ (eq_attr "move_type" "fmove") (const_string "fmove")
++ (eq_attr "move_type" "arith") (const_string "arith")
++ (eq_attr "move_type" "logical") (const_string "logical")
++ (eq_attr "move_type" "andi") (const_string "logical")
++
++ ;; These types of move are always split.
++ (eq_attr "move_type" "shift_shift")
++ (const_string "multi")
++
++ ;; These types of move are split for doubleword modes only.
++ (and (eq_attr "move_type" "move,const")
++ (eq_attr "dword_mode" "yes"))
++ (const_string "multi")
++ (eq_attr "move_type" "move") (const_string "move")
++ (eq_attr "move_type" "const") (const_string "const")]
++ (const_string "unknown")))
++
++;; Mode for conversion types (fcvt)
++;; I2S integer to float single (SI/DI to SF)
++;; I2D integer to float double (SI/DI to DF)
++;; S2I float to integer (SF to SI/DI)
++;; D2I float to integer (DF to SI/DI)
++;; D2S double to float single
++;; S2D float single to double
++
++(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D"
++ (const_string "unknown"))
++
++;; Length of instruction in bytes.
++(define_attr "length" ""
++ (cond [
++ ;; Direct branch instructions have a range of [-0x1000,0xffc],
++ ;; relative to the address of the delay slot. If a branch is
++ ;; outside this range, convert a branch like:
++ ;;
++ ;; bne r1,r2,target
++ ;;
++ ;; to:
++ ;;
++ ;; beq r1,r2,1f
++ ;; j target
++ ;; 1:
++ ;;
++ (eq_attr "type" "branch")
++ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
++ (le (minus (pc) (match_dup 0)) (const_int 4092)))
++ (const_int 4)
++ (const_int 8))
++
++ ;; Conservatively assume calls take two instructions (AUIPC + JALR).
++ ;; The linker will opportunistically relax the sequence to JAL.
++ (eq_attr "type" "call") (const_int 8)
++
++ ;; "Ghost" instructions occupy no space.
++ (eq_attr "type" "ghost") (const_int 0)
++
++ (eq_attr "got" "load") (const_int 8)
++
++ (eq_attr "type" "fcmp") (const_int 8)
++
++ ;; SHIFT_SHIFTs are decomposed into two separate instructions.
++ (eq_attr "move_type" "shift_shift")
++ (const_int 8)
++
++ ;; Check for doubleword moves that are decomposed into two
++ ;; instructions.
++ (and (eq_attr "move_type" "mtc,mfc,move")
++ (eq_attr "dword_mode" "yes"))
++ (const_int 8)
++
++ ;; Doubleword CONST{,N} moves are split into two word
++ ;; CONST{,N} moves.
++ (and (eq_attr "move_type" "const")
++ (eq_attr "dword_mode" "yes"))
++ (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
++
++ ;; Otherwise, constants, loads and stores are handled by external
++ ;; routines.
++ (eq_attr "move_type" "load,fpload")
++ (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
++ (eq_attr "move_type" "store,fpstore")
++ (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
++ ] (const_int 4)))
++
++;; Is copying of this instruction disallowed?
++(define_attr "cannot_copy" "no,yes" (const_string "no"))
++
++;; Describe a user's asm statement.
++(define_asm_attributes
++ [(set_attr "type" "multi")])
++
++;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
++;; from the same template.
++(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
++(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
++
++;; A copy of GPR that can be used when a pattern has two independent
++;; modes.
++(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
++
++;; This mode iterator allows :P to be used for patterns that operate on
++;; pointer-sized quantities. Exactly one of the two alternatives will match.
++(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
++
++;; 32-bit integer moves for which we provide move patterns.
++(define_mode_iterator IMOVE32 [SI])
++
++;; 64-bit modes for which we provide move patterns.
++(define_mode_iterator MOVE64 [DI DF])
++
++;; This mode iterator allows the QI and HI extension patterns to be
++;; defined from the same template.
++(define_mode_iterator SHORT [QI HI])
++
++;; Likewise the 64-bit truncate-and-shift patterns.
++(define_mode_iterator SUBDI [QI HI SI])
++(define_mode_iterator HISI [HI SI])
++(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
++
++;; This mode iterator allows :ANYF to be used where SF or DF is allowed.
++(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
++ (DF "TARGET_DOUBLE_FLOAT")])
++(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
++ (SF "TARGET_HARD_FLOAT")
++ (DF "TARGET_DOUBLE_FLOAT")])
++
++;; A floating-point mode for which moves involving FPRs may need to be split.
++(define_mode_iterator SPLITF
++ [(DF "!TARGET_64BIT")
++ (DI "!TARGET_64BIT")
++ (TF "TARGET_64BIT")])
++
++;; This attribute gives the length suffix for a sign- or zero-extension
++;; instruction.
++(define_mode_attr size [(QI "b") (HI "h")])
++
++;; Mode attributes for loads.
++(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
++
++;; Instruction names for stores.
++(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
++
++;; This attribute gives the best constraint to use for registers of
++;; a given mode.
++(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
++
++;; This attribute gives the format suffix for floating-point operations.
++(define_mode_attr fmt [(SF "s") (DF "d")])
++
++;; This attribute gives the format suffix for atomic memory operations.
++(define_mode_attr amo [(SI "w") (DI "d")])
++
++;; This attribute gives the upper-case mode name for one unit of a
++;; floating-point mode.
++(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
++
++;; This attribute gives the integer mode that has half the size of
++;; the controlling mode.
++(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
++
++;; This code iterator allows signed and unsigned widening multiplications
++;; to use the same template.
++(define_code_iterator any_extend [sign_extend zero_extend])
++
++;; This code iterator allows the two right shift instructions to be
++;; generated from the same template.
++(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
++
++;; This code iterator allows the three shift instructions to be generated
++;; from the same template.
++(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
++
++;; This code iterator allows unsigned and signed division to be generated
++;; from the same template.
++(define_code_iterator any_div [div udiv])
++
++;; This code iterator allows unsigned and signed modulus to be generated
++;; from the same template.
++(define_code_iterator any_mod [mod umod])
++
++;; These code iterators allow the signed and unsigned scc operations to use
++;; the same template.
++(define_code_iterator any_gt [gt gtu])
++(define_code_iterator any_ge [ge geu])
++(define_code_iterator any_lt [lt ltu])
++(define_code_iterator any_le [le leu])
++
++;; <u> expands to an empty string when doing a signed operation and
++;; "u" when doing an unsigned operation.
++(define_code_attr u [(sign_extend "") (zero_extend "u")
++ (div "") (udiv "u")
++ (mod "") (umod "u")
++ (gt "") (gtu "u")
++ (ge "") (geu "u")
++ (lt "") (ltu "u")
++ (le "") (leu "u")])
++
++;; <su> is like <u>, but the signed form expands to "s" rather than "".
++(define_code_attr su [(sign_extend "s") (zero_extend "u")])
++
++;; <optab> expands to the name of the optab for a particular code.
++(define_code_attr optab [(ashift "ashl")
++ (ashiftrt "ashr")
++ (lshiftrt "lshr")
++ (ior "ior")
++ (xor "xor")
++ (and "and")
++ (plus "add")
++ (minus "sub")])
++
++;; <insn> expands to the name of the insn that implements a particular code.
++(define_code_attr insn [(ashift "sll")
++ (ashiftrt "sra")
++ (lshiftrt "srl")
++ (ior "or")
++ (xor "xor")
++ (and "and")
++ (plus "add")
++ (minus "sub")])
++
++;; Ghost instructions produce no real code and introduce no hazards.
++;; They exist purely to express an effect on dataflow.
++(define_insn_reservation "ghost" 0
++ (eq_attr "type" "ghost")
++ "nothing")
++
++;;
++;; ....................
++;;
++;; ADDITION
++;;
++;; ....................
++;;
++
++(define_insn "add<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ ""
++ "fadd.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "add<mode>3"
++ [(set (match_operand:GPR 0 "register_operand")
++ (plus:GPR (match_operand:GPR 1 "register_operand")
++ (match_operand:GPR 2 "arith_operand")))]
++ "")
++
++(define_insn "*addsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (plus:SI (match_operand:GPR 1 "register_operand" "r,r")
++ (match_operand:GPR2 2 "arith_operand" "r,Q")))]
++ ""
++ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*adddi3"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
++ (match_operand:DI 2 "arith_operand" "r,Q")))]
++ "TARGET_64BIT"
++ "add\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "DI")])
++
++(define_insn "*addsi3_extended"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (sign_extend:DI
++ (plus:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "arith_operand" "r,Q"))))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*adddisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
++ (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*adddisisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
++ (match_operand:SI 2 "arith_operand" "r,Q")))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*adddi3_truncsi"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (truncate:SI
++ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
++ (match_operand:DI 2 "arith_operand" "r,Q"))))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; SUBTRACTION
++;;
++;; ....................
++;;
++
++(define_insn "sub<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ ""
++ "fsub.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "sub<mode>3"
++ [(set (match_operand:GPR 0 "register_operand")
++ (minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
++ (match_operand:GPR 2 "register_operand")))]
++ "")
++
++(define_insn "*subdi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_64BIT"
++ "sub\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "DI")])
++
++(define_insn "*subsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
++ (match_operand:GPR2 2 "register_operand" "r")))]
++ ""
++ { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subsi3_extended"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "DI")])
++
++(define_insn "*subdisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
++ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subdisisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subsidisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subdi3_truncsi"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (truncate:SI
++ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
++ (match_operand:DI 2 "arith_operand" "r,Q"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; MULTIPLICATION
++;;
++;; ....................
++;;
++
++(define_insn "mul<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ ""
++ "fmul.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmul")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "mul<mode>3"
++ [(set (match_operand:GPR 0 "register_operand")
++ (mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
++ (match_operand:GPR 2 "register_operand")))]
++ "TARGET_MUL")
++
++(define_insn "*mulsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (mult:SI (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR2 2 "register_operand" "r")))]
++ "TARGET_MUL"
++ { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++(define_insn "*muldisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
++ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mulw\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++(define_insn "*muldi3_truncsi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (mult:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r"))))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mulw\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++(define_insn "*muldi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mul\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++;;
++;; ........................
++;;
++;; MULTIPLICATION HIGH-PART
++;;
++;; ........................
++;;
++
++
++(define_expand "<u>mulditi3"
++ [(set (match_operand:TI 0 "register_operand")
++ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
++ (any_extend:TI (match_operand:DI 2 "register_operand"))))]
++ "TARGET_MUL && TARGET_64BIT"
++{
++ rtx low = gen_reg_rtx (DImode);
++ emit_insn (gen_muldi3 (low, operands[1], operands[2]));
++
++ rtx high = gen_reg_rtx (DImode);
++ emit_insn (gen_<u>muldi3_highpart (high, operands[1], operands[2]));
++
++ emit_move_insn (gen_lowpart (DImode, operands[0]), low);
++ emit_move_insn (gen_highpart (DImode, operands[0]), high);
++ DONE;
++})
++
++(define_insn "<u>muldi3_highpart"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (truncate:DI
++ (lshiftrt:TI
++ (mult:TI (any_extend:TI
++ (match_operand:DI 1 "register_operand" "r"))
++ (any_extend:TI
++ (match_operand:DI 2 "register_operand" "r")))
++ (const_int 64))))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mulh<u>\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++(define_expand "usmulditi3"
++ [(set (match_operand:TI 0 "register_operand")
++ (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
++ (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
++ "TARGET_MUL && TARGET_64BIT"
++{
++ rtx low = gen_reg_rtx (DImode);
++ emit_insn (gen_muldi3 (low, operands[1], operands[2]));
++
++ rtx high = gen_reg_rtx (DImode);
++ emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
++
++ emit_move_insn (gen_lowpart (DImode, operands[0]), low);
++ emit_move_insn (gen_highpart (DImode, operands[0]), high);
++ DONE;
++})
++
++(define_insn "usmuldi3_highpart"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (truncate:DI
++ (lshiftrt:TI
++ (mult:TI (zero_extend:TI
++ (match_operand:DI 1 "register_operand" "r"))
++ (sign_extend:TI
++ (match_operand:DI 2 "register_operand" "r")))
++ (const_int 64))))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mulhsu\t%0,%2,%1"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++(define_expand "<u>mulsidi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (any_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (any_extend:DI
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_MUL && !TARGET_64BIT"
++{
++ rtx temp = gen_reg_rtx (SImode);
++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++ emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
++ operands[1], operands[2]));
++ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
++ DONE;
++}
++ )
++
++(define_insn "<u>mulsi3_highpart"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (lshiftrt:DI
++ (mult:DI (any_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (any_extend:DI
++ (match_operand:SI 2 "register_operand" "r")))
++ (const_int 32))))]
++ "TARGET_MUL && !TARGET_64BIT"
++ "mulh<u>\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++
++(define_expand "usmulsidi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (zero_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_MUL && !TARGET_64BIT"
++{
++ rtx temp = gen_reg_rtx (SImode);
++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++ emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
++ operands[1], operands[2]));
++ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
++ DONE;
++}
++ )
++
++(define_insn "usmulsi3_highpart"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (lshiftrt:DI
++ (mult:DI (zero_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI
++ (match_operand:SI 2 "register_operand" "r")))
++ (const_int 32))))]
++ "TARGET_MUL && !TARGET_64BIT"
++ "mulhsu\t%0,%2,%1"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; DIVISION and REMAINDER
++;;
++;; ....................
++;;
++
++(define_insn "<u>divsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_div:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_DIV"
++ { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; }
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "SI")])
++
++(define_insn "<u>divdi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (any_div:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_DIV && TARGET_64BIT"
++ "div<u>\t%0,%1,%2"
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "DI")])
++
++(define_insn "<u>modsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_mod:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_DIV"
++ { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; }
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "SI")])
++
++(define_insn "<u>moddi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (any_mod:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_DIV && TARGET_64BIT"
++ "rem<u>\t%0,%1,%2"
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "DI")])
++
++(define_insn "div<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (div:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_FDIV"
++ "fdiv.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fdiv")
++ (set_attr "mode" "<UNITMODE>")])
++
++;;
++;; ....................
++;;
++;; SQUARE ROOT
++;;
++;; ....................
++
++(define_insn "sqrt<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_FDIV"
++{
++ return "fsqrt.<fmt>\t%0,%1";
++}
++ [(set_attr "type" "fsqrt")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; Floating point multiply accumulate instructions.
++
++(define_insn "fma<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "fms<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT"
++ "fmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "nfma<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT"
++ "fnmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "nfms<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
++ "TARGET_HARD_FLOAT"
++ "fnmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; modulo signed zeros, -(a*b+c) == -c-a*b
++(define_insn "*nfma<mode>4_fastmath"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (minus:ANYF
++ (match_operand:ANYF 3 "register_operand" "f")
++ (mult:ANYF
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
++ (match_operand:ANYF 2 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++ "fnmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; modulo signed zeros, -(a*b-c) == c-a*b
++(define_insn "*nfms<mode>4_fastmath"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (minus:ANYF
++ (match_operand:ANYF 3 "register_operand" "f")
++ (mult:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++ "fnmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;;
++;; ....................
++;;
++;; ABSOLUTE VALUE
++;;
++;; ....................
++
++(define_insn "abs<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fabs.<fmt>\t%0,%1"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++
++;;
++;; ....................
++;;
++;; MIN/MAX
++;;
++;; ....................
++
++(define_insn "smin<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmin.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "smax<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmax.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++
++;;
++;; ....................
++;;
++;; NEGATION and ONE'S COMPLEMENT '
++;;
++;; ....................
++
++(define_insn "neg<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fneg.<fmt>\t%0,%1"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "one_cmpl<mode>2"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (not:GPR (match_operand:GPR 1 "register_operand" "r")))]
++ ""
++ "not\t%0,%1"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++;;
++;; ....................
++;;
++;; LOGICAL
++;;
++;; ....................
++;;
++
++(define_insn "and<mode>3"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++ (match_operand:GPR 2 "arith_operand" "r,Q")))]
++ ""
++ "and\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "ior<mode>3"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++ (match_operand:GPR 2 "arith_operand" "r,Q")))]
++ ""
++ "or\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "xor<mode>3"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++ (match_operand:GPR 2 "arith_operand" "r,Q")))]
++ ""
++ "xor\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++;;
++;; ....................
++;;
++;; TRUNCATION
++;;
++;; ....................
++
++(define_insn "truncdfsf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_DOUBLE_FLOAT"
++ "fcvt.s.d\t%0,%1"
++ [(set_attr "type" "fcvt")
++ (set_attr "cnv_mode" "D2S")
++ (set_attr "mode" "SF")])
++
++;; Integer truncation patterns. Truncating to HImode/QImode is a no-op.
++;; Truncating from DImode to SImode is not, because we always keep SImode
++;; values sign-extended in a register so we can safely use DImode branches
++;; and comparisons on SImode values.
++
++(define_insn "truncdisi2"
++ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
++ (truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
++ "TARGET_64BIT"
++ "@
++ sext.w\t%0,%1
++ sw\t%1,%0"
++ [(set_attr "move_type" "arith,store")
++ (set_attr "mode" "SI")])
++
++;; Combiner patterns to optimize shift/truncate combinations.
++
++(define_insn "*ashr_trunc<mode>"
++ [(set (match_operand:SUBDI 0 "register_operand" "=r")
++ (truncate:SUBDI
++ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "const_arith_operand" ""))))]
++ "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
++ "sra\t%0,%1,%2"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "*lshr32_trunc<mode>"
++ [(set (match_operand:SUBDI 0 "register_operand" "=r")
++ (truncate:SUBDI
++ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
++ (const_int 32))))]
++ "TARGET_64BIT"
++ "sra\t%0,%1,32"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "<MODE>")])
++
++;;
++;; ....................
++;;
++;; ZERO EXTENSION
++;;
++;; ....................
++
++;; Extension insns.
++
++(define_insn_and_split "zero_extendsidi2"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
++ "TARGET_64BIT"
++ "@
++ #
++ lwu\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0)
++ (ashift:DI (match_dup 1) (const_int 32)))
++ (set (match_dup 0)
++ (lshiftrt:DI (match_dup 0) (const_int 32)))]
++ { operands[1] = gen_lowpart (DImode, operands[1]); }
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "DI")])
++
++;; Combine is not allowed to convert this insn into a zero_extendsidi2
++;; because of TRULY_NOOP_TRUNCATION.
++
++(define_insn_and_split "*clear_upper32"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
++ (const_int 4294967295)))]
++ "TARGET_64BIT"
++{
++ if (which_alternative == 0)
++ return "#";
++
++ operands[1] = gen_lowpart (SImode, operands[1]);
++ return "lwu\t%0,%1";
++}
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0)
++ (ashift:DI (match_dup 1) (const_int 32)))
++ (set (match_dup 0)
++ (lshiftrt:DI (match_dup 0) (const_int 32)))]
++ ""
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "DI")])
++
++(define_insn_and_split "zero_extendhi<GPR:mode>2"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ #
++ lhu\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0)
++ (ashift:GPR (match_dup 1) (match_dup 2)))
++ (set (match_dup 0)
++ (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
++ {
++ operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
++ operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
++ }
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "zero_extendqi<SUPERQI:mode>2"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
++ (zero_extend:SUPERQI
++ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ and\t%0,%1,0xff
++ lbu\t%0,%1"
++ [(set_attr "move_type" "andi,load")
++ (set_attr "mode" "<SUPERQI:MODE>")])
++
++;;
++;; ....................
++;;
++;; SIGN EXTENSION
++;;
++;; ....................
++
++;; Extension insns.
++;; Those for integer source operand are ordered widest source type first.
++
++;; When TARGET_64BIT, all SImode integer registers should already be in
++;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2). We can
++;; therefore get rid of register->register instructions if we constrain
++;; the source to be in the same register as the destination.
++;;
++;; The register alternative has type "arith" so that the pre-reload
++;; scheduler will treat it as a move. This reflects what happens if
++;; the register alternative needs a reload.
++(define_insn_and_split "extendsidi2"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
++ "TARGET_64BIT"
++ "@
++ #
++ lw\t%0,%1"
++ "&& reload_completed && register_operand (operands[1], VOIDmode)"
++ [(set (match_dup 0) (match_dup 1))]
++{
++ if (REGNO (operands[0]) == REGNO (operands[1]))
++ {
++ emit_note (NOTE_INSN_DELETED);
++ DONE;
++ }
++ operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
++}
++ [(set_attr "move_type" "move,load")
++ (set_attr "mode" "DI")])
++
++(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
++ (sign_extend:SUPERQI
++ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ #
++ l<SHORT:size>\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
++ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
++{
++ operands[0] = gen_lowpart (SImode, operands[0]);
++ operands[1] = gen_lowpart (SImode, operands[1]);
++ operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
++ - GET_MODE_BITSIZE (<SHORT:MODE>mode));
++}
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "SI")])
++
++(define_insn "extendsfdf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_DOUBLE_FLOAT"
++ "fcvt.d.s\t%0,%1"
++ [(set_attr "type" "fcvt")
++ (set_attr "cnv_mode" "S2D")
++ (set_attr "mode" "DF")])
++
++;;
++;; ....................
++;;
++;; CONVERSIONS
++;;
++;; ....................
++
++(define_insn "fix_truncdfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (fix:SI (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_DOUBLE_FLOAT"
++ "fcvt.w.d %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "D2I")])
++
++
++(define_insn "fix_truncsfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (fix:SI (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.w.s %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "S2I")])
++
++
++(define_insn "fix_truncdfdi2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (fix:DI (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
++ "fcvt.l.d %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "D2I")])
++
++
++(define_insn "fix_truncsfdi2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (fix:DI (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.l.s %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "S2I")])
++
++
++(define_insn "floatsidf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_DOUBLE_FLOAT"
++ "fcvt.d.w\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "I2D")])
++
++
++(define_insn "floatdidf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
++ "fcvt.d.l\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "I2D")])
++
++
++(define_insn "floatsisf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.s.w\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "I2S")])
++
++
++(define_insn "floatdisf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.s.l\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "I2S")])
++
++
++(define_insn "floatunssidf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_DOUBLE_FLOAT"
++ "fcvt.d.wu\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "I2D")])
++
++
++(define_insn "floatunsdidf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
++ "fcvt.d.lu\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "I2D")])
++
++
++(define_insn "floatunssisf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.s.wu\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "I2S")])
++
++
++(define_insn "floatunsdisf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.s.lu\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "I2S")])
++
++
++(define_insn "fixuns_truncdfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_DOUBLE_FLOAT"
++ "fcvt.wu.d %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "D2I")])
++
++
++(define_insn "fixuns_truncsfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.wu.s %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "S2I")])
++
++
++(define_insn "fixuns_truncdfdi2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_64BIT && TARGET_DOUBLE_FLOAT"
++ "fcvt.lu.d %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "D2I")])
++
++
++(define_insn "fixuns_truncsfdi2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.lu.s %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "S2I")])
++
++;;
++;; ....................
++;;
++;; DATA MOVEMENT
++;;
++;; ....................
++
++;; Lower-level instructions for loading an address from the GOT.
++;; We could use MEMs, but an unspec gives more optimization
++;; opportunities.
++
++(define_insn "got_load<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_LOAD_GOT))]
++ "flag_pic"
++ "la\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "tls_add_tp_le<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "register_operand" "r")
++ (match_operand:P 2 "register_operand" "r")
++ (match_operand:P 3 "symbolic_operand" "")]
++ UNSPEC_TLS_LE))]
++ "!flag_pic || flag_pie"
++ "add\t%0,%1,%2,%%tprel_add(%3)"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "got_load_tls_gd<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_TLS_GD))]
++ "flag_pic"
++ "la.tls.gd\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "got_load_tls_ie<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_TLS_IE))]
++ "flag_pic"
++ "la.tls.ie\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "auipc<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")
++ (match_operand:P 2 "const_int_operand")
++ (pc)]
++ UNSPEC_AUIPC))]
++ ""
++ ".LA%2: auipc\t%0,%h1"
++ [(set_attr "type" "arith")
++ (set_attr "cannot_copy" "yes")])
++
++;; Instructions for adding the low 16 bits of an address to a register.
++;; Operand 2 is the address: riscv_print_operand works out which relocation
++;; should be applied.
++
++(define_insn "*low<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (lo_sum:P (match_operand:P 1 "register_operand" "r")
++ (match_operand:P 2 "symbolic_operand" "")))]
++ ""
++ "add\t%0,%1,%R2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "<MODE>")])
++
++;; Allow combine to split complex const_int load sequences, using operand 2
++;; to store the intermediate results. See move_operand for details.
++(define_split
++ [(set (match_operand:GPR 0 "register_operand")
++ (match_operand:GPR 1 "splittable_const_int_operand"))
++ (clobber (match_operand:GPR 2 "register_operand"))]
++ ""
++ [(const_int 0)]
++{
++ riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
++ DONE;
++})
++
++;; Likewise, for symbolic operands.
++(define_split
++ [(set (match_operand:P 0 "register_operand")
++ (match_operand:P 1))
++ (clobber (match_operand:P 2 "register_operand"))]
++ "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
++ [(set (match_dup 0) (match_dup 3))]
++{
++ riscv_split_symbol (operands[2], operands[1],
++ MAX_MACHINE_MODE, &operands[3]);
++})
++
++;; 64-bit integer moves
++
++;; Unlike most other insns, the move insns can't be split with '
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++
++(define_expand "movdi"
++ [(set (match_operand:DI 0 "")
++ (match_operand:DI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (DImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movdi_32bit"
++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m, *f,*f,*r,*f,*m")
++ (match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f"))]
++ "!TARGET_64BIT
++ && (register_operand (operands[0], DImode)
++ || reg_or_0_operand (operands[1], DImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore")
++ (set_attr "mode" "DI")])
++
++(define_insn "*movdi_64bit"
++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m,*f,*f,*r,*f,*m")
++ (match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f"))]
++ "TARGET_64BIT
++ && (register_operand (operands[0], DImode)
++ || reg_or_0_operand (operands[1], DImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore")
++ (set_attr "mode" "DI")])
++
++;; 32-bit Integer moves
++
++;; Unlike most other insns, the move insns can't be split with
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++
++(define_expand "mov<mode>"
++ [(set (match_operand:IMOVE32 0 "")
++ (match_operand:IMOVE32 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*mov<mode>_internal"
++ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
++ (match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
++ "(register_operand (operands[0], <MODE>mode)
++ || reg_or_0_operand (operands[1], <MODE>mode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
++ (set_attr "mode" "SI")])
++
++;; 16-bit Integer moves
++
++;; Unlike most other insns, the move insns can't be split with
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
++
++(define_expand "movhi"
++ [(set (match_operand:HI 0 "")
++ (match_operand:HI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (HImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movhi_internal"
++ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
++ (match_operand:HI 1 "move_operand" "r,T,m,rJ,*r*J,*f"))]
++ "(register_operand (operands[0], HImode)
++ || reg_or_0_operand (operands[1], HImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
++ (set_attr "mode" "HI")])
++
++;; HImode constant generation; see riscv_move_integer for details.
++;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
++
++(define_insn "add<mode>hi3"
++ [(set (match_operand:HI 0 "register_operand" "=r,r")
++ (plus:HI (match_operand:HISI 1 "register_operand" "r,r")
++ (match_operand:HISI 2 "arith_operand" "r,Q")))]
++ ""
++ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "HI")])
++
++(define_insn "xor<mode>hi3"
++ [(set (match_operand:HI 0 "register_operand" "=r,r")
++ (xor:HI (match_operand:HISI 1 "register_operand" "r,r")
++ (match_operand:HISI 2 "arith_operand" "r,Q")))]
++ ""
++ "xor\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "HI")])
++
++;; 8-bit Integer moves
++
++(define_expand "movqi"
++ [(set (match_operand:QI 0 "")
++ (match_operand:QI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (QImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movqi_internal"
++ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
++ (match_operand:QI 1 "move_operand" "r,I,m,rJ,*r*J,*f"))]
++ "(register_operand (operands[0], QImode)
++ || reg_or_0_operand (operands[1], QImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
++ (set_attr "mode" "QI")])
++
++;; 32-bit floating point moves
++
++(define_expand "movsf"
++ [(set (match_operand:SF 0 "")
++ (match_operand:SF 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movsf_hardfloat"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
++ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
++ "TARGET_HARD_FLOAT
++ && (register_operand (operands[0], SFmode)
++ || reg_or_0_operand (operands[1], SFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
++ (set_attr "mode" "SF")])
++
++(define_insn "*movsf_softfloat"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
++ (match_operand:SF 1 "move_operand" "Gr,m,r"))]
++ "!TARGET_HARD_FLOAT
++ && (register_operand (operands[0], SFmode)
++ || reg_or_0_operand (operands[1], SFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,load,store")
++ (set_attr "mode" "SF")])
++
++;; 64-bit floating point moves
++
++(define_expand "movdf"
++ [(set (match_operand:DF 0 "")
++ (match_operand:DF 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
++ DONE;
++})
++
++;; In RV32, we lack mtf.d/mff.d. Go through memory instead.
++;; (except for moving a constant 0 to an FPR. for that we use fcvt.d.w.)
++(define_insn "*movdf_hardfloat_rv32"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
++ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
++ "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
++ (set_attr "mode" "DF")])
++
++(define_insn "*movdf_hardfloat_rv64"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
++ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
++ "TARGET_64BIT && TARGET_DOUBLE_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
++ (set_attr "mode" "DF")])
++
++(define_insn "*movdf_softfloat"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
++ (match_operand:DF 1 "move_operand" "rG,m,rG"))]
++ "!TARGET_DOUBLE_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,load,store")
++ (set_attr "mode" "DF")])
++
++(define_split
++ [(set (match_operand:MOVE64 0 "nonimmediate_operand")
++ (match_operand:MOVE64 1 "move_operand"))]
++ "reload_completed && !TARGET_64BIT
++ && riscv_split_64bit_move_p (operands[0], operands[1])"
++ [(const_int 0)]
++{
++ riscv_split_doubleword_move (operands[0], operands[1]);
++ DONE;
++})
++
++;; 64-bit paired-single floating point moves
++
++;; Load the low word of operand 0 with operand 1.
++(define_insn "load_low<mode>"
++ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
++ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")]
++ UNSPEC_LOAD_LOW))]
++ "TARGET_HARD_FLOAT"
++{
++ operands[0] = riscv_subword (operands[0], 0);
++ return riscv_output_move (operands[0], operands[1]);
++}
++ [(set_attr "move_type" "mtc,fpload")
++ (set_attr "mode" "<HALFMODE>")])
++
++;; Load the high word of operand 0 from operand 1, preserving the value
++;; in the low word.
++(define_insn "load_high<mode>"
++ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
++ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")
++ (match_operand:SPLITF 2 "register_operand" "0,0")]
++ UNSPEC_LOAD_HIGH))]
++ "TARGET_HARD_FLOAT"
++{
++ operands[0] = riscv_subword (operands[0], 1);
++ return riscv_output_move (operands[0], operands[1]);
++}
++ [(set_attr "move_type" "mtc,fpload")
++ (set_attr "mode" "<HALFMODE>")])
++
++;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the
++;; high word and 0 to store the low word.
++(define_insn "store_word<mode>"
++ [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=r,m")
++ (unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f")
++ (match_operand 2 "const_int_operand")]
++ UNSPEC_STORE_WORD))]
++ "TARGET_HARD_FLOAT"
++{
++ operands[1] = riscv_subword (operands[1], INTVAL (operands[2]));
++ return riscv_output_move (operands[0], operands[1]);
++}
++ [(set_attr "move_type" "mfc,fpstore")
++ (set_attr "mode" "<HALFMODE>")])
++
++;; Expand in-line code to clear the instruction cache between operand[0] and
++;; operand[1].
++(define_expand "clear_cache"
++ [(match_operand 0 "pmode_register_operand")
++ (match_operand 1 "pmode_register_operand")]
++ ""
++ "
++{
++ emit_insn(gen_fence_i());
++ DONE;
++}")
++
++(define_insn "fence"
++ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
++ ""
++ "%|fence%-")
++
++(define_insn "fence_i"
++ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
++ ""
++ "fence.i")
++
++;; Block moves, see riscv.c for more details.
++;; Argument 0 is the destination
++;; Argument 1 is the source
++;; Argument 2 is the length
++;; Argument 3 is the alignment
++
++(define_expand "movmemsi"
++ [(parallel [(set (match_operand:BLK 0 "general_operand")
++ (match_operand:BLK 1 "general_operand"))
++ (use (match_operand:SI 2 ""))
++ (use (match_operand:SI 3 "const_int_operand"))])]
++ "!TARGET_MEMCPY"
++{
++ if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
++ DONE;
++ else
++ FAIL;
++})
++
++;;
++;; ....................
++;;
++;; SHIFTS
++;;
++;; ....................
++
++(define_insn "<optab>si3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "arith_operand" "rI")))]
++ ""
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2])
++ & (GET_MODE_BITSIZE (SImode) - 1));
++
++ return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "*<optab>disi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
++ (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
++ "TARGET_64BIT"
++ "<insn>w\t%0,%1,%2"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "*ashldi3_truncsi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (ashift:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "const_arith_operand" "I"))))]
++ "TARGET_64BIT && INTVAL (operands[2]) < 32"
++ "sllw\t%0,%1,%2"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "*ashldisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ashift:SI (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR2 2 "arith_operand" "rI")))]
++ "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
++ "sllw\t%0,%1,%2"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "<optab>di3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (any_shift:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "arith_operand" "rI")))]
++ "TARGET_64BIT"
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2])
++ & (GET_MODE_BITSIZE (DImode) - 1));
++
++ return "<insn>\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "DI")])
++
++(define_insn "<optab>si3_extend"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "arith_operand" "rI"))))]
++ "TARGET_64BIT"
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
++
++ return "<insn>w\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; CONDITIONAL BRANCHES
++;;
++;; ....................
++
++;; Conditional branches
++
++(define_insn "*branch_order<mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 1 "order_operator"
++ [(match_operand:GPR 2 "register_operand" "r")
++ (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++{
++ if (GET_CODE (operands[3]) == CONST_INT)
++ return "b%C1z\t%2,%0";
++ return "b%C1\t%2,%3,%0";
++}
++ [(set_attr "type" "branch")
++ (set_attr "mode" "none")])
++
++;; Used to implement built-in functions.
++(define_expand "condjump"
++ [(set (pc)
++ (if_then_else (match_operand 0)
++ (label_ref (match_operand 1))
++ (pc)))])
++
++(define_expand "cbranch<mode>4"
++ [(set (pc)
++ (if_then_else (match_operator 0 "comparison_operator"
++ [(match_operand:GPR 1 "register_operand")
++ (match_operand:GPR 2 "nonmemory_operand")])
++ (label_ref (match_operand 3 ""))
++ (pc)))]
++ ""
++{
++ riscv_expand_conditional_branch (operands);
++ DONE;
++})
++
++(define_expand "cbranch<mode>4"
++ [(set (pc)
++ (if_then_else (match_operator 0 "comparison_operator"
++ [(match_operand:ANYF 1 "register_operand")
++ (match_operand:ANYF 2 "register_operand")])
++ (label_ref (match_operand 3 ""))
++ (pc)))]
++ ""
++{
++ riscv_expand_conditional_branch (operands);
++ DONE;
++})
++
++(define_insn_and_split "*branch_on_bit<GPR:mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 0 "equality_operator"
++ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
++ (const_int 1)
++ (match_operand 3 "branch_on_bit_operand"))
++ (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))
++ (clobber (match_scratch:GPR 4 "=&r"))]
++ ""
++ "#"
++ "reload_completed"
++ [(set (match_dup 4)
++ (ashift:GPR (match_dup 2) (match_dup 3)))
++ (set (pc)
++ (if_then_else
++ (match_op_dup 0 [(match_dup 4) (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))]
++{
++ int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
++ operands[3] = GEN_INT (shift);
++
++ if (GET_CODE (operands[0]) == EQ)
++ operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
++ else
++ operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
++})
++
++(define_insn_and_split "*branch_on_bit_range<GPR:mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 0 "equality_operator"
++ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
++ (match_operand 3 "branch_on_bit_operand")
++ (const_int 0))
++ (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))
++ (clobber (match_scratch:GPR 4 "=&r"))]
++ ""
++ "#"
++ "reload_completed"
++ [(set (match_dup 4)
++ (ashift:GPR (match_dup 2) (match_dup 3)))
++ (set (pc)
++ (if_then_else
++ (match_op_dup 0 [(match_dup 4) (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))]
++{
++ operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
++})
++
++;;
++;; ....................
++;;
++;; SETTING A REGISTER FROM A COMPARISON
++;;
++;; ....................
++
++;; Destination is always set in SI mode.
++
++(define_expand "cstore<mode>4"
++ [(set (match_operand:SI 0 "register_operand")
++ (match_operator:SI 1 "order_operator"
++ [(match_operand:GPR 2 "register_operand")
++ (match_operand:GPR 3 "nonmemory_operand")]))]
++ ""
++{
++ riscv_expand_scc (operands);
++ DONE;
++})
++
++(define_insn "cstore<mode>4"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (match_operator:SI 1 "fp_order_operator"
++ [(match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f")]))]
++ "TARGET_HARD_FLOAT"
++{
++ if (GET_CODE (operands[1]) == NE)
++ return "feq.<fmt>\t%0,%2,%3; seqz %0, %0";
++ return "f%C1.<fmt>\t%0,%2,%3";
++}
++ [(set_attr "type" "fcmp")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (const_int 0)))]
++ ""
++ "seqz\t%0,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (const_int 0)))]
++ ""
++ "snez\t%0,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
++ ""
++ "slt<u>\t%0,%z2,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (const_int 1)))]
++ ""
++ "slt<u>\t%0,zero,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR 2 "arith_operand" "rI")))]
++ ""
++ "slt<u>\t%0,%1,%2"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR 2 "sle_operand" "")))]
++ ""
++{
++ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
++ return "slt<u>\t%0,%1,%2";
++}
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++;;
++;; ....................
++;;
++;; UNCONDITIONAL BRANCHES
++;;
++;; ....................
++
++;; Unconditional branches.
++
++(define_insn "jump"
++ [(set (pc)
++ (label_ref (match_operand 0 "" "")))]
++ ""
++ "j\t%l0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++(define_expand "indirect_jump"
++ [(set (pc) (match_operand 0 "register_operand"))]
++ ""
++{
++ operands[0] = force_reg (Pmode, operands[0]);
++ if (Pmode == SImode)
++ emit_jump_insn (gen_indirect_jumpsi (operands[0]));
++ else
++ emit_jump_insn (gen_indirect_jumpdi (operands[0]));
++ DONE;
++})
++
++(define_insn "indirect_jump<mode>"
++ [(set (pc) (match_operand:P 0 "register_operand" "l"))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++(define_expand "tablejump"
++ [(set (pc) (match_operand 0 "register_operand" ""))
++ (use (label_ref (match_operand 1 "" "")))]
++ ""
++{
++ if (CASE_VECTOR_PC_RELATIVE)
++ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
++ gen_rtx_LABEL_REF (Pmode, operands[1]),
++ NULL_RTX, 0, OPTAB_DIRECT);
++
++ if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
++ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
++ else
++ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
++ DONE;
++})
++
++(define_insn "tablejump<mode>"
++ [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
++ (use (label_ref (match_operand 1 "" "")))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;;
++;; ....................
++;;
++;; Function prologue/epilogue
++;;
++;; ....................
++;;
++
++(define_expand "prologue"
++ [(const_int 1)]
++ ""
++{
++ riscv_expand_prologue ();
++ DONE;
++})
++
++;; Block any insns from being moved before this point, since the
++;; profiling call to mcount can use various registers that aren't
++;; saved or used to pass arguments.
++
++(define_insn "blockage"
++ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
++ ""
++ ""
++ [(set_attr "type" "ghost")
++ (set_attr "mode" "none")])
++
++(define_expand "epilogue"
++ [(const_int 2)]
++ ""
++{
++ riscv_expand_epilogue (false);
++ DONE;
++})
++
++(define_expand "sibcall_epilogue"
++ [(const_int 2)]
++ ""
++{
++ riscv_expand_epilogue (true);
++ DONE;
++})
++
++;; Trivial return. Make it look like a normal return insn as that
++;; allows jump optimizations to work better.
++
++(define_expand "return"
++ [(simple_return)]
++ "riscv_can_use_return_insn ()"
++ "")
++
++(define_insn "simple_return"
++ [(simple_return)]
++ ""
++ "ret"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;; Normal return.
++
++(define_insn "simple_return_internal"
++ [(simple_return)
++ (use (match_operand 0 "pmode_register_operand" ""))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;; This is used in compiling the unwind routines.
++(define_expand "eh_return"
++ [(use (match_operand 0 "general_operand"))]
++ ""
++{
++ if (GET_MODE (operands[0]) != word_mode)
++ operands[0] = convert_to_mode (word_mode, operands[0], 0);
++ if (TARGET_64BIT)
++ emit_insn (gen_eh_set_lr_di (operands[0]));
++ else
++ emit_insn (gen_eh_set_lr_si (operands[0]));
++ DONE;
++})
++
++;; Clobber the return address on the stack. We can't expand this
++;; until we know where it will be put in the stack frame.
++
++(define_insn "eh_set_lr_si"
++ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch:SI 1 "=&r"))]
++ "! TARGET_64BIT"
++ "#")
++
++(define_insn "eh_set_lr_di"
++ [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch:DI 1 "=&r"))]
++ "TARGET_64BIT"
++ "#")
++
++(define_split
++ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch 1))]
++ "reload_completed"
++ [(const_int 0)]
++{
++ riscv_set_return_address (operands[0], operands[1]);
++ DONE;
++})
++
++;;
++;; ....................
++;;
++;; FUNCTION CALLS
++;;
++;; ....................
++
++;; Sibling calls. All these patterns use jump instructions.
++
++;; call_insn_operand will only accept constant
++;; addresses if a direct jump is acceptable. Since the 'S' constraint
++;; is defined in terms of call_insn_operand, the same is true of the
++;; constraints.
++
++;; When we use an indirect jump, we need a register that will be
++;; preserved by the epilogue (constraint j).
++
++(define_expand "sibcall"
++ [(parallel [(call (match_operand 0 "")
++ (match_operand 1 ""))
++ (use (match_operand 2 "")) ;; next_arg_reg
++ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
++ ""
++{
++ riscv_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]);
++ DONE;
++})
++
++(define_insn "sibcall_internal"
++ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
++ (match_operand 1 "" ""))]
++ "SIBLING_CALL_P (insn)"
++ { return REG_P (operands[0]) ? "jr\t%0"
++ : absolute_symbolic_operand (operands[0], VOIDmode) ? "tail\t%0"
++ : "tail\t%0@plt"; }
++ [(set_attr "type" "call")])
++
++(define_expand "sibcall_value"
++ [(parallel [(set (match_operand 0 "")
++ (call (match_operand 1 "")
++ (match_operand 2 "")))
++ (use (match_operand 3 ""))])] ;; next_arg_reg
++ ""
++{
++ riscv_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]);
++ DONE;
++})
++
++(define_insn "sibcall_value_internal"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
++ (match_operand 2 "" "")))]
++ "SIBLING_CALL_P (insn)"
++ { return REG_P (operands[1]) ? "jr\t%1"
++ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
++ : "tail\t%1@plt"; }
++ [(set_attr "type" "call")])
++
++(define_insn "sibcall_value_multiple_internal"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
++ (match_operand 2 "" "")))
++ (set (match_operand 3 "register_operand" "")
++ (call (mem:SI (match_dup 1))
++ (match_dup 2)))]
++ "SIBLING_CALL_P (insn)"
++ { return REG_P (operands[1]) ? "jr\t%1"
++ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
++ : "tail\t%1@plt"; }
++ [(set_attr "type" "call")])
++
++(define_expand "call"
++ [(parallel [(call (match_operand 0 "")
++ (match_operand 1 ""))
++ (use (match_operand 2 "")) ;; next_arg_reg
++ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
++ ""
++{
++ riscv_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]);
++ DONE;
++})
++
++(define_insn "call_internal"
++ [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S"))
++ (match_operand 1 "" ""))
++ (clobber (reg:SI RETURN_ADDR_REGNUM))]
++ ""
++ { return REG_P (operands[0]) ? "jalr\t%0"
++ : absolute_symbolic_operand (operands[0], VOIDmode) ? "call\t%0"
++ : "call\t%0@plt"; }
++ [(set_attr "type" "call")])
++
++(define_expand "call_value"
++ [(parallel [(set (match_operand 0 "")
++ (call (match_operand 1 "")
++ (match_operand 2 "")))
++ (use (match_operand 3 ""))])] ;; next_arg_reg
++ ""
++{
++ riscv_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]);
++ DONE;
++})
++
++;; See comment for call_internal.
++(define_insn "call_value_internal"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
++ (match_operand 2 "" "")))
++ (clobber (reg:SI RETURN_ADDR_REGNUM))]
++ ""
++ { return REG_P (operands[1]) ? "jalr\t%1"
++ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
++ : "call\t%1@plt"; }
++ [(set_attr "type" "call")])
++
++;; See comment for call_internal.
++(define_insn "call_value_multiple_internal"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "l,S"))
++ (match_operand 2 "" "")))
++ (set (match_operand 3 "register_operand" "")
++ (call (mem:SI (match_dup 1))
++ (match_dup 2)))
++ (clobber (reg:SI RETURN_ADDR_REGNUM))]
++ ""
++ { return REG_P (operands[1]) ? "jalr\t%1"
++ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
++ : "call\t%1@plt"; }
++ [(set_attr "type" "call")])
++
++;; Call subroutine returning any type.
++
++(define_expand "untyped_call"
++ [(parallel [(call (match_operand 0 "")
++ (const_int 0))
++ (match_operand 1 "")
++ (match_operand 2 "")])]
++ ""
++{
++ int i;
++
++ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
++
++ for (i = 0; i < XVECLEN (operands[2], 0); i++)
++ {
++ rtx set = XVECEXP (operands[2], 0, i);
++ riscv_emit_move (SET_DEST (set), SET_SRC (set));
++ }
++
++ emit_insn (gen_blockage ());
++ DONE;
++})
++
++(define_insn "nop"
++ [(const_int 0)]
++ ""
++ "nop"
++ [(set_attr "type" "nop")
++ (set_attr "mode" "none")])
++
++(define_insn "trap"
++ [(trap_if (const_int 1) (const_int 0))]
++ ""
++ "sbreak")
++
++(define_insn "gpr_save"
++ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_SAVE)
++ (clobber (reg:SI T0_REGNUM))
++ (clobber (reg:SI T1_REGNUM))]
++ ""
++ { return riscv_output_gpr_save (INTVAL (operands[0])); })
++
++(define_insn "gpr_restore"
++ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_RESTORE)]
++ ""
++ "tail\t__riscv_restore_%0")
++
++(define_insn "gpr_restore_return"
++ [(return)
++ (use (match_operand 0 "pmode_register_operand" ""))
++ (const_int 0)]
++ ""
++ "")
++
++(include "sync.md")
++(include "peephole.md")
++(include "generic.md")
+diff --git original-gcc/gcc/config/riscv/riscv.opt gcc-6.2.0/gcc/config/riscv/riscv.opt
+new file mode 100644
+index 0000000..1d048be
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/riscv.opt
+@@ -0,0 +1,123 @@
++; Options for the RISC-V port of the compiler
++;
++; Copyright (C) 2011-2016 Free Software Foundation, Inc.
++;
++; This file is part of GCC.
++;
++; GCC is free software; you can redistribute it and/or modify it under
++; the terms of the GNU General Public License as published by the Free
++; Software Foundation; either version 3, or (at your option) any later
++; version.
++;
++; GCC is distributed in the hope that it will be useful, but WITHOUT
++; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++; License for more details.
++;
++; You should have received a copy of the GNU General Public License
++; along with GCC; see the file COPYING3. If not see
++; <http://www.gnu.org/licenses/ >.
++
++HeaderInclude
++config/riscv/riscv-opts.h
++
++m32
++Target RejectNegative Mask(32BIT)
++Generate RV32 code.
++
++m64
++Target RejectNegative InverseMask(32BIT, 64BIT)
++Generate RV64 code.
++
++mbranch-cost=
++Target RejectNegative Joined UInteger Var(riscv_branch_cost)
++-mbranch-cost=N Set the cost of branches to roughly N instructions.
++
++mmemcpy
++Target Report Mask(MEMCPY)
++Don't optimize block moves.
++
++mplt
++Target Report Var(TARGET_PLT) Init(1)
++When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
++
++mfloat-abi=
++Target Report RejectNegative Joined Enum(float_abi_type) Var(riscv_float_abi) Init(FLOAT_ABI_SOFT)
++Specify floating-point calling convention.
++
++Enum
++Name(float_abi_type) Type(enum riscv_float_abi_type)
++Known floating-point ABIs (for use with the -mfloat-abi= option):
++
++EnumValue
++Enum(float_abi_type) String(soft) Value(FLOAT_ABI_SOFT)
++
++EnumValue
++Enum(float_abi_type) String(single) Value(FLOAT_ABI_SINGLE)
++
++EnumValue
++Enum(float_abi_type) String(double) Value(FLOAT_ABI_DOUBLE)
++
++mno-fdiv
++Target Report RejectNegative Undocumented Mask(NO_FDIV)
++Don't use hardware floating-point divide and square root instructions.
++
++mfdiv
++Target Report RejectNegative InverseMask(NO_FDIV, FDIV)
++Use hardware floating-point divide and square root instructions.
++
++march=
++Target Report RejectNegative Joined
++-march= Generate code for given RISC-V ISA (e.g. RV64IM).
++
++mtune=
++Target RejectNegative Joined Var(riscv_tune_string)
++-mtune=PROCESSOR Optimize the output for PROCESSOR.
++
++msmall-data-limit=
++Target Joined Separate UInteger Var(g_switch_value) Init(8)
++-msmall-data-limit=N Put global and static data smaller than <number> bytes into a special section (on some targets).
++
++matomic
++Target Report Mask(ATOMIC)
++Use hardware atomic memory instructions.
++
++mmuldiv
++Target Report
++Use hardware instructions for integer multiplication and division.
++
++mmul
++Target Report Mask(MUL)
++Use hardware instructions for integer multiplication
++
++mdiv
++Target Report Mask(DIV)
++Use hardware instructions for integer division.
++
++mrvc
++Target Report Mask(RVC)
++Use compressed instruction encoding.
++
++msave-restore
++Target Report Mask(SAVE_RESTORE)
++Use smaller but slower prologue and epilogue code.
++
++mcmodel=
++Target RejectNegative Joined Var(riscv_cmodel_string)
++Use given RISC-V code model (medlow or medany).
++
++mexplicit-relocs
++Target Report Mask(EXPLICIT_RELOCS)
++Use %reloc() operators, rather than assembly macros, to load addresses.
++
++mno-float
++Target Report RejectNegative
++Disable hardware floating-point. Implies -mfloat-abi=soft.
++
++msingle-float
++Target Report RejectNegative Mask(HARD_FLOAT)
++Enable only single-precision floating-point.
++
++mdouble-float
++Target Report RejectNegative Mask(DOUBLE_FLOAT)
++Enable single- and double-precision floating-point.
+diff --git original-gcc/gcc/config/riscv/sync.md gcc-6.2.0/gcc/config/riscv/sync.md
+new file mode 100644
+index 0000000..4f7f4f3
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/sync.md
+@@ -0,0 +1,204 @@
++;; Machine description for RISC-V atomic operations.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/ >.
++
++(define_c_enum "unspec" [
++ UNSPEC_COMPARE_AND_SWAP
++ UNSPEC_SYNC_OLD_OP
++ UNSPEC_SYNC_EXCHANGE
++ UNSPEC_ATOMIC_STORE
++ UNSPEC_MEMORY_BARRIER
++])
++
++(define_code_iterator any_atomic [plus ior xor and])
++(define_code_attr atomic_optab
++ [(plus "add") (ior "or") (xor "xor") (and "and")])
++
++;; Memory barriers.
++
++(define_expand "mem_thread_fence"
++ [(match_operand:SI 0 "const_int_operand" "")] ;; model
++ ""
++{
++ if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
++ {
++ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
++ MEM_VOLATILE_P (mem) = 1;
++ emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
++ }
++ DONE;
++})
++
++(define_insn "mem_thread_fence_1"
++ [(set (match_operand:BLK 0 "" "")
++ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
++ (match_operand:SI 1 "const_int_operand" "")] ;; model
++ ""
++{
++ long model = INTVAL (operands[1]);
++
++ switch (model)
++ {
++ case MEMMODEL_SEQ_CST:
++ case MEMMODEL_SYNC_SEQ_CST:
++ case MEMMODEL_ACQ_REL:
++ return "fence rw,rw";
++ case MEMMODEL_ACQUIRE:
++ case MEMMODEL_SYNC_ACQUIRE:
++ case MEMMODEL_CONSUME:
++ return "fence r,rw";
++ case MEMMODEL_RELEASE:
++ case MEMMODEL_SYNC_RELEASE:
++ return "fence rw,w";
++ default:
++ fprintf(stderr, "mem_thread_fence_1(%ld)\n", model);
++ gcc_unreachable();
++ }
++})
++
++;; Atomic memory operations.
++
++;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
++(define_insn "atomic_store<mode>"
++ [(set (match_operand:GPR 0 "memory_operand" "=A")
++ (unspec_volatile:GPR
++ [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
++ (match_operand:SI 2 "const_int_operand")] ;; model
++ UNSPEC_ATOMIC_STORE))]
++ "TARGET_ATOMIC"
++ "amoswap.<amo>%A2 zero,%z1,%0")
++
++(define_insn "atomic_<atomic_optab><mode>"
++ [(set (match_operand:GPR 0 "memory_operand" "+A")
++ (unspec_volatile:GPR
++ [(any_atomic:GPR (match_dup 0)
++ (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
++ (match_operand:SI 2 "const_int_operand")] ;; model
++ UNSPEC_SYNC_OLD_OP))]
++ "TARGET_ATOMIC"
++ "amo<insn>.<amo>%A2 zero,%z1,%0")
++
++(define_insn "atomic_fetch_<atomic_optab><mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (match_operand:GPR 1 "memory_operand" "+A"))
++ (set (match_dup 1)
++ (unspec_volatile:GPR
++ [(any_atomic:GPR (match_dup 1)
++ (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
++ (match_operand:SI 3 "const_int_operand")] ;; model
++ UNSPEC_SYNC_OLD_OP))]
++ "TARGET_ATOMIC"
++ "amo<insn>.<amo>%A3 %0,%z2,%1")
++
++(define_insn "atomic_exchange<mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (unspec_volatile:GPR
++ [(match_operand:GPR 1 "memory_operand" "+A")
++ (match_operand:SI 3 "const_int_operand")] ;; model
++ UNSPEC_SYNC_EXCHANGE))
++ (set (match_dup 1)
++ (match_operand:GPR 2 "register_operand" "0"))]
++ "TARGET_ATOMIC"
++ "amoswap.<amo>%A3 %0,%z2,%1")
++
++(define_insn "atomic_cas_value_strong<mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (match_operand:GPR 1 "memory_operand" "+A"))
++ (set (match_dup 1)
++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
++ (match_operand:GPR 3 "reg_or_0_operand" "rJ")
++ (match_operand:SI 4 "const_int_operand") ;; mod_s
++ (match_operand:SI 5 "const_int_operand")] ;; mod_f
++ UNSPEC_COMPARE_AND_SWAP))
++ (clobber (match_scratch:GPR 6 "=&r"))]
++ "TARGET_ATOMIC"
++ "1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
++ [(set (attr "length") (const_int 16))])
++
++(define_expand "atomic_compare_and_swap<mode>"
++ [(match_operand:SI 0 "register_operand" "") ;; bool output
++ (match_operand:GPR 1 "register_operand" "") ;; val output
++ (match_operand:GPR 2 "memory_operand" "") ;; memory
++ (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
++ (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
++ (match_operand:SI 5 "const_int_operand" "") ;; is_weak
++ (match_operand:SI 6 "const_int_operand" "") ;; mod_s
++ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
++ "TARGET_ATOMIC"
++{
++ emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
++ operands[3], operands[4],
++ operands[6], operands[7]));
++
++ rtx compare = operands[1];
++ if (operands[3] != const0_rtx)
++ {
++ rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
++ compare = gen_reg_rtx (<MODE>mode);
++ emit_insn (gen_rtx_SET (compare, difference));
++ }
++
++ rtx eq = gen_rtx_EQ (<MODE>mode, compare, const0_rtx);
++ rtx result = gen_reg_rtx (<MODE>mode);
++ emit_insn (gen_rtx_SET (result, eq));
++ emit_insn (gen_rtx_SET (operands[0], gen_lowpart (SImode, result)));
++ DONE;
++})
++
++(define_expand "atomic_test_and_set"
++ [(match_operand:QI 0 "register_operand" "") ;; bool output
++ (match_operand:QI 1 "memory_operand" "+A") ;; memory
++ (match_operand:SI 2 "const_int_operand" "")] ;; model
++ "TARGET_ATOMIC"
++{
++ /* We have no QImode atomics, so use the address LSBs to form a mask,
++ then use an aligned SImode atomic. */
++ rtx result = operands[0];
++ rtx mem = operands[1];
++ rtx model = operands[2];
++ rtx addr = force_reg (Pmode, XEXP (mem, 0));
++
++ rtx aligned_addr = gen_reg_rtx (Pmode);
++ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
++
++ rtx aligned_mem = change_address (mem, SImode, aligned_addr);
++ set_mem_alias_set (aligned_mem, 0);
++
++ rtx offset = gen_reg_rtx (SImode);
++ emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
++ GEN_INT (3)));
++
++ rtx tmp = gen_reg_rtx (SImode);
++ emit_move_insn (tmp, GEN_INT (1));
++
++ rtx shmt = gen_reg_rtx (SImode);
++ emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
++
++ rtx word = gen_reg_rtx (SImode);
++ emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
++
++ tmp = gen_reg_rtx (SImode);
++ emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
++
++ emit_move_insn (gen_lowpart (SImode, result),
++ gen_rtx_LSHIFTRT (SImode, tmp,
++ gen_lowpart (SImode, shmt)));
++ DONE;
++})
+diff --git original-gcc/gcc/config/riscv/t-elf gcc-6.2.0/gcc/config/riscv/t-elf
+new file mode 100644
+index 0000000..ebb6e92
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/t-elf
+@@ -0,0 +1,9 @@
++# Build the libraries for both hard and soft floating point
++
++MULTILIB_OPTIONS = m64/m32
++MULTILIB_DIRNAMES = 64 32
++
++ifneq ($(with_float), soft)
++MULTILIB_OPTIONS += mno-float
++MULTILIB_DIRNAMES += soft-float
++endif
+diff --git original-gcc/gcc/config/riscv/t-linux gcc-6.2.0/gcc/config/riscv/t-linux
+new file mode 100644
+index 0000000..8747ecd
+--- /dev/null
++++ gcc-6.2.0/gcc/config/riscv/t-linux
+@@ -0,0 +1,11 @@
++# Build the libraries for both hard and soft floating point
++
++MULTILIB_OPTIONS = m64/m32
++MULTILIB_DIRNAMES = 64 32
++MULTILIB_OSDIRNAMES = ../lib ../lib32
++
++ifneq ($(with_float), soft)
++MULTILIB_OPTIONS += mno-float
++MULTILIB_DIRNAMES += soft-float
++MULTILIB_OSDIRNAMES += soft-float
++endif
+diff --git original-gcc/gcc/configure gcc-6.2.0/gcc/configure
+index fc83cc8..ebf12f9 100755
+--- original-gcc/gcc/configure
++++ gcc-6.2.0/gcc/configure
+@@ -24134,6 +24134,25 @@ x3: .space 4
+ tls_first_minor=14
+ tls_as_opt="-a32 --fatal-warnings"
+ ;;
++ riscv*-*-*)
++ conftest_s='
++ .section .tdata,"awT",@progbits
++x:
++ .word 2
++ .text
++ la.tls.gd a0,x
++ la.tls.ie a1,x
++ lui a0,%tls_ie_pcrel_hi(x)
++ lw a0,%pcrel_lo(x)(a0)
++ add a0,a0,tp
++ lw a0,0(a0)
++ lui a0,%tprel_hi(x)
++ add a0,a0,tp,%tprel_add(x)
++ lw a0,%tprel_lo(x)(a0)'
++ tls_first_major=2
++ tls_first_minor=21
++ tls_as_opt='-m32 --fatal-warnings'
++ ;;
+ s390-*-*)
+ conftest_s='
+ .section ".tdata","awT",@progbits
+diff --git original-gcc/gcc/configure.ac gcc-6.2.0/gcc/configure.ac
+index dc22d3c..2591e5e 100644
+--- original-gcc/gcc/configure.ac
++++ gcc-6.2.0/gcc/configure.ac
+@@ -3367,6 +3367,25 @@ x3: .space 4
+ tls_first_minor=14
+ tls_as_opt="-a32 --fatal-warnings"
+ ;;
++ riscv*-*-*)
++ conftest_s='
++ .section .tdata,"awT",@progbits
++x:
++ .word 2
++ .text
++ la.tls.gd a0,x
++ la.tls.ie a1,x
++ lui a0,%tls_ie_pcrel_hi(x)
++ lw a0,%pcrel_lo(x)(a0)
++ add a0,a0,tp
++ lw a0,0(a0)
++ lui a0,%tprel_hi(x)
++ add a0,a0,tp,%tprel_add(x)
++ lw a0,%tprel_lo(x)(a0)'
++ tls_first_major=2
++ tls_first_minor=21
++ tls_as_opt='-m32 --fatal-warnings'
++ ;;
+ s390-*-*)
+ conftest_s='
+ .section ".tdata","awT",@progbits
+diff --git original-gcc/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C gcc-6.2.0/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C
+index 80a571a..2e0ef68 100644
+--- original-gcc/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C
++++ gcc-6.2.0/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C
+@@ -2,7 +2,7 @@
+ // { dg-do compile { target c++11 } }
+ // { dg-additional-options -G0 { target { { alpha*-*-* frv*-*-* ia64-*-* lm32*-*-* m32r*-*-* microblaze*-*-* mips*-*-* nios2-*-* powerpc*-*-* rs6000*-*-* } && { ! { *-*-darwin* *-*-aix* alpha*-*-*vms* } } } } }
+ // { dg-final { scan-assembler "\\.rdata" { target mips*-*-* } } }
+-// { dg-final { scan-assembler "rodata" { target { { *-*-linux-gnu *-*-gnu* *-*-elf } && { ! mips*-*-* } } } } }
++// { dg-final { scan-assembler "rodata" { target { { *-*-linux-gnu *-*-gnu* *-*-elf } && { ! { mips*-*-* riscv*-*-* } } } } } }
+
+ struct Data
+ {
+diff --git original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c gcc-6.2.0/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
+index 744763f..2b06bd6 100644
+--- original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
++++ gcc-6.2.0/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
+@@ -6,6 +6,9 @@
+ #elif defined (__powerpc__) || defined (__PPC__) || defined (__ppc__) || defined (__POWERPC__) || defined (__ppc)
+ /* On PPC division by zero does not trap. */
+ # define DO_TEST 0
++#elif defined (__riscv__)
++ /* On RISC-V division by zero does not trap. */
++# define DO_TEST 0
+ #elif defined (__SPU__)
+ /* On SPU division by zero does not trap. */
+ # define DO_TEST 0
+diff --git original-gcc/gcc/testsuite/gcc.dg/20020312-2.c gcc-6.2.0/gcc/testsuite/gcc.dg/20020312-2.c
+index 5fce50d..f77862c 100644
+--- original-gcc/gcc/testsuite/gcc.dg/20020312-2.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/20020312-2.c
+@@ -67,6 +67,8 @@ extern void abort (void);
+ # else
+ # define PIC_REG "30"
+ # endif
++#elif defined(__riscv__)
++/* No pic register. */
+ #elif defined(__RX__)
+ /* No pic register. */
+ #elif defined(__s390__)
+diff --git original-gcc/gcc/testsuite/gcc.dg/ifcvt-4.c gcc-6.2.0/gcc/testsuite/gcc.dg/ifcvt-4.c
+index 319b583..2a86344 100644
+--- original-gcc/gcc/testsuite/gcc.dg/ifcvt-4.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/ifcvt-4.c
+@@ -1,6 +1,6 @@
+ /* { dg-options "-fdump-rtl-ce1 -O2 --param max-rtl-if-conversion-insns=3" } */
+ /* { dg-additional-options "-misel" { target { powerpc*-*-* } } } */
+-/* { dg-skip-if "Multiple set if-conversion not guaranteed on all subtargets" { "arm*-*-* hppa*64*-*-* visium-*-*" } } */
++/* { dg-skip-if "Multiple set if-conversion not guaranteed on all subtargets" { "arm*-*-* hppa*64*-*-* visium-*-*" riscv*-*-* } } */
+
+ typedef int word __attribute__((mode(word)));
+
+diff --git original-gcc/gcc/testsuite/gcc.dg/loop-8.c gcc-6.2.0/gcc/testsuite/gcc.dg/loop-8.c
+index 463c5d0..a760072 100644
+--- original-gcc/gcc/testsuite/gcc.dg/loop-8.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/loop-8.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-options "-O1 -fdump-rtl-loop2_invariant" } */
+-/* { dg-skip-if "unexpected IV" { "hppa*-*-* visium-*-*" } { "*" } { "" } } */
++/* { dg-skip-if "unexpected IV" { "hppa*-*-* visium-*-* riscv*-*-*" } { "*" } { "" } } */
+
+ void
+ f (int *a, int *b)
+diff --git original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c gcc-6.2.0/gcc/testsuite/gcc.dg/stack-usage-1.c
+index 7864c6a..12f91a8 100644
+--- original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/stack-usage-1.c
+@@ -63,6 +63,8 @@
+ # else
+ # define SIZE 240
+ # endif
++#elif defined (__riscv__)
++# define SIZE 240
+ #elif defined (__AVR__)
+ # define SIZE 254
+ #elif defined (__s390x__)
+diff --git original-gcc/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c
+index f7b5dfa..a1237cf 100644
+--- original-gcc/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c
+@@ -33,4 +33,4 @@ void test55 (int x, int y)
+ that the && should be emitted (based on BRANCH_COST). Fix this
+ by teaching dom to look through && and register all components
+ as true. */
+-/* { dg-final { scan-tree-dump-times "link_error" 0 "optimized" { xfail { ! "alpha*-*-* arm*-*-* aarch64*-*-* powerpc*-*-* cris-*-* crisv32-*-* hppa*-*-* i?86-*-* mmix-*-* mips*-*-* m68k*-*-* moxie-*-* nds32*-*-* s390*-*-* sh*-*-* sparc*-*-* spu-*-* visium-*-* x86_64-*-*" } } } } */
++/* { dg-final { scan-tree-dump-times "link_error" 0 "optimized" { xfail { ! "alpha*-*-* arm*-*-* aarch64*-*-* powerpc*-*-* cris-*-* crisv32-*-* hppa*-*-* i?86-*-* mmix-*-* mips*-*-* m68k*-*-* moxie-*-* nds32*-*-* s390*-*-* sh*-*-* sparc*-*-* spu-*-* visium-*-* x86_64-*-* riscv*-*-*" } } } } */
+diff --git original-gcc/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
+index 1a4bfe6..665ac23 100644
+--- original-gcc/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
+@@ -25,4 +25,4 @@ foo ()
+ but the loop reads only one element at a time, and DOM cannot resolve these.
+ The same happens on powerpc depending on the SIMD support available. */
+
+-/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* powerpc64*-*-* } || { sparc*-*-* && lp64 } } } } } */
++/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* powerpc64*-*-* riscv*64*-*-* } || { sparc*-*-* && lp64 } } } } } */
+diff --git original-gcc/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c
+index a287dad..0aecfed 100644
+--- original-gcc/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c
++++ gcc-6.2.0/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c
+@@ -5,7 +5,7 @@
+
+ When the condition is true, we distribute "(int) (a + b)" as
+ "(int) a + (int) b", otherwise we keep the original. */
+-/* { dg-do compile { target { { ! mips64 } && { ! spu-*-* } } } } */
++/* { dg-do compile { target { { ! mips64 } && { { ! spu-*-* } && { ! riscv*64*-*-* } } } } } */
+ /* { dg-options "-O -fno-tree-forwprop -fno-tree-ccp -fwrapv -fdump-tree-fre1-details" } */
+
+ /* From PR14844. */
+diff --git original-gcc/gcc/testsuite/lib/target-supports.exp gcc-6.2.0/gcc/testsuite/lib/target-supports.exp
+index 6d9b488..1c96306 100644
+--- original-gcc/gcc/testsuite/lib/target-supports.exp
++++ gcc-6.2.0/gcc/testsuite/lib/target-supports.exp
+@@ -6902,6 +6902,7 @@ proc check_effective_target_logical_op_short_circuit {} {
+ || [istarget s390*-*-*]
+ || [istarget powerpc*-*-*]
+ || [istarget nios2*-*-*]
++ || [istarget riscv*-*-*]
+ || [istarget visium-*-*]
+ || [check_effective_target_arm_cortex_m] } {
+ return 1
+diff --git original-gcc/libatomic/configure.tgt gcc-6.2.0/libatomic/configure.tgt
+index eab2765..1343e37 100644
+--- original-gcc/libatomic/configure.tgt
++++ gcc-6.2.0/libatomic/configure.tgt
+@@ -37,6 +37,7 @@ case "${target_cpu}" in
+ ARCH=alpha
+ ;;
+ rs6000 | powerpc*) ARCH=powerpc ;;
++ riscv*) ARCH=riscv ;;
+ sh*) ARCH=sh ;;
+
+ arm*)
+diff --git original-gcc/libgcc/config.host gcc-6.2.0/libgcc/config.host
+index 16a45c8..0545bbc 100644
+--- original-gcc/libgcc/config.host
++++ gcc-6.2.0/libgcc/config.host
+@@ -169,6 +169,9 @@ powerpc*-*-*)
+ ;;
+ rs6000*-*-*)
+ ;;
++riscv*)
++ cpu_type=riscv
++ ;;
+ sparc64*-*-*)
+ cpu_type=sparc
+ ;;
+@@ -1088,6 +1091,14 @@ powerpcle-*-eabi*)
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
+ ;;
++riscv*-*-linux*)
++ tmake_file="${tmake_file} t-softfp-sfdf riscv/t-softfp${host_address} t-softfp riscv/t-elf riscv/t-elf${host_address}"
++ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
++ ;;
++riscv*-*-*)
++ tmake_file="${tmake_file} t-softfp-sfdf riscv/t-softfp${host_address} t-softfp riscv/t-elf riscv/t-elf${host_address}"
++ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
++ ;;
+ rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
+ md_unwind_header=rs6000/aix-unwind.h
+ tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
+diff --git original-gcc/libgcc/config/riscv/atomic.c gcc-6.2.0/libgcc/config/riscv/atomic.c
+new file mode 100644
+index 0000000..00e8111
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/atomic.c
+@@ -0,0 +1,111 @@
++/* Legacy sub-word atomics for RISC-V.
++
++ Copyright (C) 2016 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++<http://www.gnu.org/licenses/ >. */
++
++#ifdef __riscv_atomic
++
++#include <stdbool.h>
++
++#define INVERT "not %[tmp1], %[tmp1]\n\t"
++#define DONT_INVERT ""
++
++#define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop) \
++ type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v) \
++ { \
++ unsigned long aligned_addr = ((unsigned long) p) & ~3UL; \
++ int shift = (((unsigned long) p) & 3) * 8; \
++ unsigned mask = ((1U << ((sizeof v) * 8)) - 1) << shift; \
++ unsigned old, tmp1, tmp2; \
++ \
++ asm volatile ("1:\n\t" \
++ "lr.w.aq %[old], %[mem]\n\t" \
++ #insn " %[tmp1], %[old], %[value]\n\t" \
++ invert \
++ "and %[tmp1], %[tmp1], %[mask]\n\t" \
++ "and %[tmp2], %[old], %[not_mask]\n\t" \
++ "or %[tmp2], %[tmp2], %[tmp1]\n\t" \
++ "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t" \
++ "bnez %[tmp1], 1b" \
++ : [old] "=&r" (old), \
++ [mem] "+A" (*(volatile unsigned*) aligned_addr), \
++ [tmp1] "=&r" (tmp1), \
++ [tmp2] "=&r" (tmp2) \
++ : [value] "r" (((unsigned) v) << shift), \
++ [mask] "r" (mask), \
++ [not_mask] "r" (~mask)); \
++ \
++ return (type) (old >> shift); \
++ } \
++ \
++ type __sync_ ## opname ## _and_fetch_ ## size (type *p, type v) \
++ { \
++ type o = __sync_fetch_and_ ## opname ## _ ## size (p, v); \
++ return cop; \
++ }
++
++#define GENERATE_COMPARE_AND_SWAP(type, size) \
++ type __sync_val_compare_and_swap_ ## size (type *p, type o, type n) \
++ { \
++ unsigned long aligned_addr = ((unsigned long) p) & ~3UL; \
++ int shift = (((unsigned long) p) & 3) * 8; \
++ unsigned mask = ((1U << ((sizeof o) * 8)) - 1) << shift; \
++ unsigned old, tmp1; \
++ \
++ asm volatile ("1:\n\t" \
++ "lr.w.aq %[old], %[mem]\n\t" \
++ "and %[tmp1], %[old], %[mask]\n\t" \
++ "bne %[tmp1], %[o], 1f\n\t" \
++ "and %[tmp1], %[old], %[not_mask]\n\t" \
++ "or %[tmp1], %[tmp1], %[n]\n\t" \
++ "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t" \
++ "bnez %[tmp1], 1b\n\t" \
++ "1:" \
++ : [old] "=&r" (old), \
++ [mem] "+A" (*(volatile unsigned*) aligned_addr), \
++ [tmp1] "=&r" (tmp1) \
++ : [o] "r" ((((unsigned) o) << shift) & mask), \
++ [n] "r" ((((unsigned) n) << shift) & mask), \
++ [mask] "r" (mask), \
++ [not_mask] "r" (~mask)); \
++ \
++ return (type) (old >> shift); \
++ } \
++ bool __sync_bool_compare_and_swap_ ## size (type *p, type o, type n) \
++ { \
++ return __sync_val_compare_and_swap(p, o, n) == o; \
++ }
++
++#define GENERATE_ALL(type, size) \
++ GENERATE_FETCH_AND_OP(type, size, add, add, DONT_INVERT, o + v) \
++ GENERATE_FETCH_AND_OP(type, size, sub, sub, DONT_INVERT, o - v) \
++ GENERATE_FETCH_AND_OP(type, size, and, and, DONT_INVERT, o & v) \
++ GENERATE_FETCH_AND_OP(type, size, xor, xor, DONT_INVERT, o ^ v) \
++ GENERATE_FETCH_AND_OP(type, size, or, or, DONT_INVERT, o | v) \
++ GENERATE_FETCH_AND_OP(type, size, nand, and, INVERT, ~(o & v)) \
++ GENERATE_COMPARE_AND_SWAP(type, size)
++
++GENERATE_ALL(unsigned char, 1)
++GENERATE_ALL(unsigned short, 2)
++
++#endif
+diff --git original-gcc/libgcc/config/riscv/crti.S gcc-6.2.0/libgcc/config/riscv/crti.S
+new file mode 100644
+index 0000000..89bac70
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/crti.S
+@@ -0,0 +1 @@
++/* crti.S is empty because .init_array/.fini_array are used exclusively. */
+diff --git original-gcc/libgcc/config/riscv/crtn.S gcc-6.2.0/libgcc/config/riscv/crtn.S
+new file mode 100644
+index 0000000..ca6ee7b
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/crtn.S
+@@ -0,0 +1 @@
++/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
+diff --git original-gcc/libgcc/config/riscv/div.S gcc-6.2.0/libgcc/config/riscv/div.S
+new file mode 100644
+index 0000000..385634a
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/div.S
+@@ -0,0 +1,121 @@
++ .text
++ .align 2
++
++#ifndef __riscv64
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
++# define __udivdi3 __udivsi3
++# define __umoddi3 __umodsi3
++# define __divdi3 __divsi3
++# define __moddi3 __modsi3
++#else
++ .globl __udivsi3
++__udivsi3:
++ /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
++ sll a0, a0, 32
++ sll a1, a1, 32
++ move t0, ra
++ jal __udivdi3
++ sext.w a0, a0
++ jr t0
++
++ .globl __umodsi3
++__umodsi3:
++ /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
++ sll a0, a0, 32
++ sll a1, a1, 32
++ srl a0, a0, 32
++ srl a1, a1, 32
++ move t0, ra
++ jal __udivdi3
++ sext.w a0, a1
++ jr t0
++
++ .globl __modsi3
++ __modsi3 = __moddi3
++
++ .globl __divsi3
++__divsi3:
++ /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
++ li t0, -1
++ beq a1, t0, .L20
++#endif
++
++ .globl __divdi3
++__divdi3:
++ bltz a0, .L10
++ bltz a1, .L11
++ /* Since the quotient is positive, fall into __udivdi3. */
++
++ .globl __udivdi3
++__udivdi3:
++ mv a2, a1
++ mv a1, a0
++ li a0, -1
++ beqz a2, .L5
++ li a3, 1
++ bgeu a2, a1, .L2
++.L1:
++ blez a2, .L2
++ slli a2, a2, 1
++ slli a3, a3, 1
++ bgtu a1, a2, .L1
++.L2:
++ li a0, 0
++.L3:
++ bltu a1, a2, .L4
++ sub a1, a1, a2
++ or a0, a0, a3
++.L4:
++ srli a3, a3, 1
++ srli a2, a2, 1
++ bnez a3, .L3
++.L5:
++ ret
++
++ .globl __umoddi3
++__umoddi3:
++ /* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
++ move t0, ra
++ jal __udivdi3
++ move a0, a1
++ jr t0
++
++ /* Handle negative arguments to __divdi3. */
++.L10:
++ neg a0, a0
++ bgez a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
++ neg a1, a1
++ j __divdi3 /* Compute __udivdi3(-a0, -a1). */
++.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
++ neg a1, a1
++.L12:
++ move t0, ra
++ jal __divdi3
++ neg a0, a0
++ jr t0
++
++ .globl __moddi3
++__moddi3:
++ move t0, ra
++ bltz a1, .L31
++ bltz a0, .L32
++.L30:
++ jal __udivdi3 /* The dividend is not negative. */
++ move a0, a1
++ jr t0
++.L31:
++ neg a1, a1
++ bgez a0, .L30
++.L32:
++ neg a0, a0
++ jal __udivdi3 /* The dividend is hella negative. */
++ neg a0, a1
++ jr t0
++
++#ifdef __riscv64
++ /* continuation of __divsi3 */
++.L20:
++ sll t0, t0, 31
++ bne a0, t0, __divdi3
++ ret
++#endif
+diff --git original-gcc/libgcc/config/riscv/muldi3.S gcc-6.2.0/libgcc/config/riscv/muldi3.S
+new file mode 100644
+index 0000000..f5061b9
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/muldi3.S
+@@ -0,0 +1,21 @@
++ .text
++ .align 2
++
++#ifndef __riscv64
++/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */
++# define __muldi3 __mulsi3
++#endif
++
++ .globl __muldi3
++__muldi3:
++ mv a2, a0
++ li a0, 0
++.L1:
++ andi a3, a1, 1
++ beqz a3, .L2
++ add a0, a0, a2
++.L2:
++ srli a1, a1, 1
++ slli a2, a2, 1
++ bnez a1, .L1
++ ret
+diff --git original-gcc/libgcc/config/riscv/multi3.S gcc-6.2.0/libgcc/config/riscv/multi3.S
+new file mode 100644
+index 0000000..849951e
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/multi3.S
+@@ -0,0 +1,56 @@
++ .text
++ .align 2
++
++#ifndef __riscv64
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
++# define __multi3 __muldi3
++#endif
++
++ .globl __multi3
++__multi3:
++
++#ifndef __riscv64
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
++# define __muldi3 __mulsi3
++#endif
++
++/* We rely on the fact that __muldi3 doesn't clobber the t-registers. */
++
++ mv t0, ra
++ mv t5, a0
++ mv a0, a1
++ mv t6, a3
++ mv a1, t5
++ mv a4, a2
++ li a5, 0
++ li t2, 0
++ li t4, 0
++.L1:
++ add a6, t2, a1
++ andi t3, a4, 1
++ slli a7, a5, 1
++ slti t1, a1, 0
++ srli a4, a4, 1
++ add a5, t4, a5
++ beqz t3, .L2
++ sltu t3, a6, t2
++ mv t2, a6
++ add t4, t3, a5
++.L2:
++ slli a1, a1, 1
++ or a5, t1, a7
++ bnez a4, .L1
++ beqz a0, .L3
++ mv a1, a2
++ call __muldi3
++ add t4, t4, a0
++.L3:
++ beqz t6, .L4
++ mv a1, t6
++ mv a0, t5
++ call __muldi3
++ add t4, t4, a0
++.L4:
++ mv a0, t2
++ mv a1, t4
++ jr t0
+diff --git original-gcc/libgcc/config/riscv/save-restore.S gcc-6.2.0/libgcc/config/riscv/save-restore.S
+new file mode 100644
+index 0000000..bbf0e33
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/save-restore.S
+@@ -0,0 +1,220 @@
++ .text
++
++ .globl __riscv_save_12
++ .globl __riscv_save_11
++ .globl __riscv_save_10
++ .globl __riscv_save_9
++ .globl __riscv_save_8
++ .globl __riscv_save_7
++ .globl __riscv_save_6
++ .globl __riscv_save_5
++ .globl __riscv_save_4
++ .globl __riscv_save_3
++ .globl __riscv_save_2
++ .globl __riscv_save_1
++ .globl __riscv_save_0
++
++ .globl __riscv_restore_12
++ .globl __riscv_restore_11
++ .globl __riscv_restore_10
++ .globl __riscv_restore_9
++ .globl __riscv_restore_8
++ .globl __riscv_restore_7
++ .globl __riscv_restore_6
++ .globl __riscv_restore_5
++ .globl __riscv_restore_4
++ .globl __riscv_restore_3
++ .globl __riscv_restore_2
++ .globl __riscv_restore_1
++ .globl __riscv_restore_0
++
++#ifdef __riscv64
++
++__riscv_save_12:
++ addi sp, sp, -112
++ li t1, 0
++ sd s11, 8(sp)
++ j .Ls10
++
++__riscv_save_11:
++__riscv_save_10:
++ addi sp, sp, -112
++ li t1, -16
++.Ls10:
++ sd s10, 16(sp)
++ sd s9, 24(sp)
++ j .Ls8
++
++__riscv_save_9:
++__riscv_save_8:
++ addi sp, sp, -112
++ li t1, -32
++.Ls8:
++ sd s8, 32(sp)
++ sd s7, 40(sp)
++ j .Ls6
++
++__riscv_save_7:
++__riscv_save_6:
++ addi sp, sp, -112
++ li t1, -48
++.Ls6:
++ sd s6, 48(sp)
++ sd s5, 56(sp)
++ j .Ls4
++
++__riscv_save_5:
++__riscv_save_4:
++ addi sp, sp, -112
++ li t1, -64
++.Ls4:
++ sd s4, 64(sp)
++ sd s3, 72(sp)
++ j .Ls2
++
++__riscv_save_3:
++__riscv_save_2:
++ addi sp, sp, -112
++ li t1, -80
++.Ls2:
++ sd s2, 80(sp)
++ sd s1, 88(sp)
++ sd s0, 96(sp)
++ sd ra, 104(sp)
++ sub sp, sp, t1
++ jr t0
++
++__riscv_save_1:
++__riscv_save_0:
++ addi sp, sp, -16
++ sd s0, 0(sp)
++ sd ra, 8(sp)
++ jr t0
++
++__riscv_restore_12:
++ ld s11, 8(sp)
++ addi sp, sp, 16
++
++__riscv_restore_11:
++__riscv_restore_10:
++ ld s10, 0(sp)
++ ld s9, 8(sp)
++ addi sp, sp, 16
++
++__riscv_restore_9:
++__riscv_restore_8:
++ ld s8, 0(sp)
++ ld s7, 8(sp)
++ addi sp, sp, 16
++
++__riscv_restore_7:
++__riscv_restore_6:
++ ld s6, 0(sp)
++ ld s5, 8(sp)
++ addi sp, sp, 16
++
++__riscv_restore_5:
++__riscv_restore_4:
++ ld s4, 0(sp)
++ ld s3, 8(sp)
++ addi sp, sp, 16
++
++__riscv_restore_3:
++__riscv_restore_2:
++ ld s2, 0(sp)
++ ld s1, 8(sp)
++ addi sp, sp, 16
++
++__riscv_restore_1:
++__riscv_restore_0:
++ ld s0, 0(sp)
++ ld ra, 8(sp)
++ addi sp, sp, 16
++ ret
++
++#else
++
++__riscv_save_12:
++ addi sp, sp, -64
++ li t1, 0
++ sw s11, 12(sp)
++ j .Ls10
++
++__riscv_save_11:
++__riscv_save_10:
++__riscv_save_9:
++__riscv_save_8:
++ addi sp, sp, -64
++ li t1, -16
++.Ls10:
++ sw s10, 16(sp)
++ sw s9, 20(sp)
++ sw s8, 24(sp)
++ sw s7, 28(sp)
++ j .Ls6
++
++__riscv_save_7:
++__riscv_save_6:
++__riscv_save_5:
++__riscv_save_4:
++ addi sp, sp, -64
++ li t1, -32
++.Ls6:
++ sw s6, 32(sp)
++ sw s5, 36(sp)
++ sw s4, 40(sp)
++ sw s3, 44(sp)
++ sw s2, 48(sp)
++ sw s1, 52(sp)
++ sw s0, 56(sp)
++ sw ra, 60(sp)
++ sub sp, sp, t1
++ jr t0
++
++__riscv_save_3:
++__riscv_save_2:
++__riscv_save_1:
++__riscv_save_0:
++ addi sp, sp, -16
++ sw s2, 0(sp)
++ sw s1, 4(sp)
++ sw s0, 8(sp)
++ sw ra, 12(sp)
++ jr t0
++
++__riscv_restore_12:
++ lw s11, 12(sp)
++ addi sp, sp, 16
++
++__riscv_restore_11:
++__riscv_restore_10:
++__riscv_restore_9:
++__riscv_restore_8:
++ lw s10, 0(sp)
++ lw s9, 4(sp)
++ lw s8, 8(sp)
++ lw s7, 12(sp)
++ addi sp, sp, 16
++
++__riscv_restore_7:
++__riscv_restore_6:
++__riscv_restore_5:
++__riscv_restore_4:
++ lw s6, 0(sp)
++ lw s5, 4(sp)
++ lw s4, 8(sp)
++ lw s3, 12(sp)
++ addi sp, sp, 16
++
++__riscv_restore_3:
++__riscv_restore_2:
++__riscv_restore_1:
++__riscv_restore_0:
++ lw s2, 0(sp)
++ lw s1, 4(sp)
++ lw s0, 8(sp)
++ lw ra, 12(sp)
++ addi sp, sp, 16
++ ret
++
++#endif
+diff --git original-gcc/libgcc/config/riscv/sfp-machine.h gcc-6.2.0/libgcc/config/riscv/sfp-machine.h
+new file mode 100644
+index 0000000..c1f90c4
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/sfp-machine.h
+@@ -0,0 +1,100 @@
++
++#ifdef __riscv32
++
++#define _FP_W_TYPE_SIZE 32
++#define _FP_W_TYPE unsigned long
++#define _FP_WS_TYPE signed long
++#define _FP_I_TYPE long
++
++#define _FP_MUL_MEAT_S(R,X,Y) \
++ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_D(R,X,Y) \
++ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_Q(R,X,Y) \
++ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
++
++#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
++#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
++#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
++
++#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
++#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
++#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
++
++#else
++
++#define _FP_W_TYPE_SIZE 64
++#define _FP_W_TYPE unsigned long long
++#define _FP_WS_TYPE signed long long
++#define _FP_I_TYPE long long
++
++#define _FP_MUL_MEAT_S(R,X,Y) \
++ _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
++#define _FP_MUL_MEAT_D(R,X,Y) \
++ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_Q(R,X,Y) \
++ _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
++
++#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
++#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
++#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
++
++#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
++#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1)
++#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1
++
++#endif
++
++#ifdef __riscv64
++typedef int TItype __attribute__ ((mode (TI)));
++typedef unsigned int UTItype __attribute__ ((mode (TI)));
++#define TI_BITS (__CHAR_BIT__ * (int)sizeof(TItype))
++#endif
++
++/* The type of the result of a floating point comparison. This must
++ match __libgcc_cmp_return__ in GCC for the target. */
++typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
++#define CMPtype __gcc_CMPtype
++
++#define _FP_NANSIGN_S 0
++#define _FP_NANSIGN_D 0
++#define _FP_NANSIGN_Q 0
++
++#define _FP_KEEPNANFRACP 1
++#define _FP_QNANNEGATEDP 0
++
++
++/* From my experiments it seems X is chosen unless one of the
++ NaNs is sNaN, in which case the result is NANSIGN/NANFRAC. */
++#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
++ do { \
++ if ((_FP_FRAC_HIGH_RAW_##fs(X) | \
++ _FP_FRAC_HIGH_RAW_##fs(Y)) & _FP_QNANBIT_##fs) \
++ { \
++ R##_s = _FP_NANSIGN_##fs; \
++ _FP_FRAC_SET_##wc(R,_FP_NANFRAC_##fs); \
++ } \
++ else \
++ { \
++ R##_s = X##_s; \
++ _FP_FRAC_COPY_##wc(R,X); \
++ } \
++ R##_c = FP_CLS_NAN; \
++ } while (0)
++
++#define _FP_TININESS_AFTER_ROUNDING 0
++
++#define __LITTLE_ENDIAN 1234
++#define __BIG_ENDIAN 4321
++
++#if defined __big_endian__
++# define __BYTE_ORDER __BIG_ENDIAN
++#else
++# define __BYTE_ORDER __LITTLE_ENDIAN
++#endif
++
++
++/* Define ALIASNAME as a strong alias for NAME. */
++# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
++# define _strong_alias(name, aliasname) \
++ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+diff --git original-gcc/libgcc/config/riscv/t-elf gcc-6.2.0/libgcc/config/riscv/t-elf
+new file mode 100644
+index 0000000..01d5eba
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/t-elf
+@@ -0,0 +1,6 @@
++LIB2ADD += $(srcdir)/config/riscv/save-restore.S \
++ $(srcdir)/config/riscv/muldi3.S \
++ $(srcdir)/config/riscv/multi3.S \
++ $(srcdir)/config/riscv/div.S \
++ $(srcdir)/config/riscv/atomic.c \
++
+diff --git original-gcc/libgcc/config/riscv/t-elf32 gcc-6.2.0/libgcc/config/riscv/t-elf32
+new file mode 100644
+index 0000000..83363ce
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/t-elf32
+@@ -0,0 +1,4 @@
++LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
++
++HOST_LIBGCC2_CFLAGS += -m32
++CRTSTUFF_CFLAGS += -m32
+diff --git original-gcc/libgcc/config/riscv/t-elf64 gcc-6.2.0/libgcc/config/riscv/t-elf64
+new file mode 100644
+index 0000000..f375123
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/t-elf64
+@@ -0,0 +1 @@
++LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
+diff --git original-gcc/libgcc/config/riscv/t-softfp32 gcc-6.2.0/libgcc/config/riscv/t-softfp32
+new file mode 100644
+index 0000000..e69de29
+diff --git original-gcc/libgcc/config/riscv/t-softfp64 gcc-6.2.0/libgcc/config/riscv/t-softfp64
+new file mode 100644
+index 0000000..61a8bff
+--- /dev/null
++++ gcc-6.2.0/libgcc/config/riscv/t-softfp64
+@@ -0,0 +1,4 @@
++softfp_float_modes += tf
++softfp_int_modes += ti
++softfp_extensions += sftf dftf
++softfp_truncations += tfsf tfdf
+diff --git original-gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_linux.cc
+index 2cefa20..76dd411 100644
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc
++++ gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_linux.cc
+@@ -1136,6 +1136,11 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.gregs[30];
+ *sp = ucontext->uc_mcontext.gregs[29];
++# elif defined(__riscv__)
++ ucontext_t *ucontext = (ucontext_t*)context;
++ *pc = ucontext->uc_mcontext.gregs[REG_PC];
++ *bp = ucontext->uc_mcontext.gregs[REG_S0];
++ *sp = ucontext->uc_mcontext.gregs[REG_SP];
+ #else
+ # error "Unsupported arch"
+ #endif
+diff --git original-gcc/libsanitizer/sanitizer_common/sanitizer_platform.h gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform.h
+index 7d0ff28..cdd62d9 100644
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform.h
++++ gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform.h
+@@ -113,9 +113,9 @@
+
+ // The AArch64 linux port uses the canonical syscall set as mandated by
+ // the upstream linux community for all new ports. Other ports may still
+-// use legacy syscalls.
++// use legacy syscalls. The RISC-V port also does this.
+ #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+-# if defined(__aarch64__) && SANITIZER_LINUX
++# if (defined(__aarch64__) || defined(__riscv__)) && SANITIZER_LINUX
+ # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
+ # else
+ # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
+diff --git original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+index a1f0432..6c25901 100644
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
++++ gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+@@ -61,7 +61,8 @@ namespace __sanitizer {
+ } // namespace __sanitizer
+
+ #if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
+- && !defined(__mips__) && !defined(__sparc__)
++ && !defined(__mips__) && !defined(__sparc__)\
++ && !defined(__riscv__)
+ COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
+ #endif
+
+diff --git original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+index b6f90eb..3aa9338 100644
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
++++ gcc-6.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+@@ -81,6 +81,9 @@ namespace __sanitizer {
+ const unsigned struct_kernel_stat_sz = 144;
+ #endif
+ const unsigned struct_kernel_stat64_sz = 104;
++#elif defined(__riscv__)
++ const unsigned struct_kernel_stat_sz = 128;
++ const unsigned struct_kernel_stat64_sz = 128;
+ #elif defined(__sparc__) && defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 104;
+@@ -109,7 +112,7 @@ namespace __sanitizer {
+
+ #if SANITIZER_LINUX || SANITIZER_FREEBSD
+
+-#if defined(__powerpc64__)
++#if defined(__powerpc64__) || defined(__riscv__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ #elif !defined(__sparc__)
+ const unsigned struct___old_kernel_stat_sz = 32;
+@@ -532,7 +535,7 @@ namespace __sanitizer {
+ typedef long __sanitizer___kernel_off_t;
+ #endif
+
+-#if defined(__powerpc__) || defined(__mips__)
++#if defined(__powerpc__) || defined(__mips__) || defined(__riscv__)
+ typedef unsigned int __sanitizer___kernel_old_uid_t;
+ typedef unsigned int __sanitizer___kernel_old_gid_t;
+ #else
diff --git a/util/crossgcc/sum/gcc-5.3.0.tar.bz2.cksum b/util/crossgcc/sum/gcc-5.3.0.tar.bz2.cksum
deleted file mode 100644
index bb05e39..0000000
--- a/util/crossgcc/sum/gcc-5.3.0.tar.bz2.cksum
+++ /dev/null
@@ -1 +0,0 @@
-0612270b103941da08376df4d0ef4e5662a2e9eb tarballs/gcc-5.3.0.tar.bz2
diff --git a/util/crossgcc/sum/gcc-6.2.0.tar.bz2.cksum b/util/crossgcc/sum/gcc-6.2.0.tar.bz2.cksum
new file mode 100644
index 0000000..669ad1e
--- /dev/null
+++ b/util/crossgcc/sum/gcc-6.2.0.tar.bz2.cksum
@@ -0,0 +1 @@
+583e29c7fe69d9a1031a89752c2551ab5aeacb91 tarballs/gcc-6.2.0.tar.bz2
diff --git a/util/crossgcc/sum/gmp-6.1.0.tar.xz.cksum b/util/crossgcc/sum/gmp-6.1.0.tar.xz.cksum
deleted file mode 100644
index 348b80f..0000000
--- a/util/crossgcc/sum/gmp-6.1.0.tar.xz.cksum
+++ /dev/null
@@ -1 +0,0 @@
-99d691607613e749aa5d7c0c2a89aeab38fec070 tarballs/gmp-6.1.0.tar.xz
diff --git a/util/crossgcc/sum/gmp-6.1.1.tar.xz.cksum b/util/crossgcc/sum/gmp-6.1.1.tar.xz.cksum
new file mode 100644
index 0000000..bf50891
--- /dev/null
+++ b/util/crossgcc/sum/gmp-6.1.1.tar.xz.cksum
@@ -0,0 +1 @@
+4da491d63ef850a7662f41da27ad1ba99c2dbaa1 tarballs/gmp-6.1.1.tar.xz
diff --git a/util/crossgcc/sum/mpfr-3.1.4.tar.xz.cksum b/util/crossgcc/sum/mpfr-3.1.4.tar.xz.cksum
deleted file mode 100644
index 90f90eb..0000000
--- a/util/crossgcc/sum/mpfr-3.1.4.tar.xz.cksum
+++ /dev/null
@@ -1 +0,0 @@
-cedc0055d55b6ee4cd17e1e6119ed412520ff81a tarballs/mpfr-3.1.4.tar.xz
diff --git a/util/crossgcc/sum/mpfr-3.1.5.tar.xz.cksum b/util/crossgcc/sum/mpfr-3.1.5.tar.xz.cksum
new file mode 100644
index 0000000..c2ae697
--- /dev/null
+++ b/util/crossgcc/sum/mpfr-3.1.5.tar.xz.cksum
@@ -0,0 +1 @@
+c0fab77c6da4cb710c81cc04092fb9bea11a9403 tarballs/mpfr-3.1.5.tar.xz
Iru Cai (mytbk920423(a)gmail.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/17189
-gerrit
commit 25a6eab7c1ae8760c977f75922efa09f0d9cfd83
Author: Iru Cai <mytbk920423(a)gmail.com>
Date: Sat Oct 29 23:19:36 2016 +0800
buildgcc: Update binutils to 2.27
There is a build error for MIPS gold so I add patch for it.
The binutils RISC-V patch is generated with the following command when I have
official binutils git source and riscv-binutils-gdb (branch riscv-binutils-2.27
and rebase on binutils-2_27) in the same directory:
git diff --src-prefix=original-binutils/ --dst-prefix=binutils-2_27/ \
binutils-2_27 bfd gas binutils ld opcodes include/{*.h,elf,opcode}
Change-Id: I20fea838d798c430d8c4d2cc6b07614d967c60c5
Signed-off-by: Iru Cai <mytbk920423(a)gmail.com>
---
util/crossgcc/buildgcc | 2 +-
util/crossgcc/patches/binutils-2.26.1_aarch.patch | 92 -
.../patches/binutils-2.26.1_no-bfd-doc.patch | 12 -
util/crossgcc/patches/binutils-2.26.1_riscv.patch | 9780 ------------------
util/crossgcc/patches/binutils-2.27_aarch.patch | 92 +
.../crossgcc/patches/binutils-2.27_mips-gold.patch | 11 +
.../patches/binutils-2.27_no-bfd-doc.patch | 12 +
util/crossgcc/patches/binutils-2.27_riscv.patch | 10251 +++++++++++++++++++
util/crossgcc/sum/binutils-2.26.1.tar.bz2.cksum | 1 -
util/crossgcc/sum/binutils-2.27.tar.bz2.cksum | 1 +
10 files changed, 10368 insertions(+), 9886 deletions(-)
diff --git a/util/crossgcc/buildgcc b/util/crossgcc/buildgcc
index 4883754..0bd344b 100755
--- a/util/crossgcc/buildgcc
+++ b/util/crossgcc/buildgcc
@@ -40,7 +40,7 @@ MPC_VERSION=1.0.3
LIBELF_VERSION=0.8.13
GCC_VERSION=5.3.0
GCC_AUTOCONF_VERSION=2.69
-BINUTILS_VERSION=2.26.1
+BINUTILS_VERSION=2.27
GDB_VERSION=7.11
IASL_VERSION=20160831
PYTHON_VERSION=3.5.1
diff --git a/util/crossgcc/patches/binutils-2.26.1_aarch.patch b/util/crossgcc/patches/binutils-2.26.1_aarch.patch
deleted file mode 100644
index 4a04418..0000000
--- a/util/crossgcc/patches/binutils-2.26.1_aarch.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
-index 2d491f6..e221ef4 100644
---- a/gas/config/tc-aarch64.c
-+++ b/gas/config/tc-aarch64.c
-@@ -1736,13 +1736,13 @@ s_ltorg (int ignored ATTRIBUTE_UNUSED)
- if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
- continue;
-
-- mapping_state (MAP_DATA);
--
- /* Align pool as you have word accesses.
- Only make a frag if we have to. */
- if (!need_pass_2)
- frag_align (align, 0, 0);
-
-+ mapping_state (MAP_DATA);
-+
- record_alignment (now_seg, align);
-
- sprintf (sym_name, "$$lit_\002%x", pool->id);
-@@ -6373,11 +6373,15 @@ aarch64_init_frag (fragS * fragP, int max_chars)
-
- switch (fragP->fr_type)
- {
-- case rs_align:
- case rs_align_test:
- case rs_fill:
- mapping_state_2 (MAP_DATA, max_chars);
- break;
-+ case rs_align:
-+ /* PR 20364: We can get alignment frags in code sections,
-+ so do not just assume that we should use the MAP_DATA state. */
-+ mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
-+ break;
- case rs_align_code:
- mapping_state_2 (MAP_INSN, max_chars);
- break;
-diff --git a/gas/testsuite/gas/aarch64/pr20364.d b/gas/testsuite/gas/aarch64/pr20364.d
-new file mode 100644
-index 0000000..babcff1
---- /dev/null
-+++ b/gas/testsuite/gas/aarch64/pr20364.d
-@@ -0,0 +1,13 @@
-+# Check that ".align <size>, <fill>" does not set the mapping state to DATA, causing unnecessary frag generation.
-+#name: PR20364
-+#objdump: -d
-+
-+.*: file format .*
-+
-+Disassembly of section \.vectors:
-+
-+0+000 <.*>:
-+ 0: d2800000 mov x0, #0x0 // #0
-+ 4: 94000000 bl 0 <plat_report_exception>
-+ 8: 17fffffe b 0 <bl1_exceptions>
-+
-diff --git a/gas/testsuite/gas/aarch64/pr20364.s b/gas/testsuite/gas/aarch64/pr20364.s
-new file mode 100644
-index 0000000..594ad7c
---- /dev/null
-+++ b/gas/testsuite/gas/aarch64/pr20364.s
-@@ -0,0 +1,28 @@
-+ .macro vector_base label
-+ .section .vectors, "ax"
-+ .align 11, 0
-+ \label:
-+ .endm
-+
-+ .macro vector_entry label
-+ .section .vectors, "ax"
-+ .align 7, 0
-+ \label:
-+ .endm
-+
-+ .macro check_vector_size since
-+ .if (. - \since) > (32 * 4)
-+ .error "Vector exceeds 32 instructions"
-+ .endif
-+ .endm
-+
-+ .globl bl1_exceptions
-+
-+vector_base bl1_exceptions
-+
-+vector_entry SynchronousExceptionSP0
-+ mov x0, #0x0
-+ bl plat_report_exception
-+ b SynchronousExceptionSP0
-+ check_vector_size SynchronousExceptionSP0
-+
---
-1.7.1
diff --git a/util/crossgcc/patches/binutils-2.26.1_no-bfd-doc.patch b/util/crossgcc/patches/binutils-2.26.1_no-bfd-doc.patch
deleted file mode 100644
index 35c22ff..0000000
--- a/util/crossgcc/patches/binutils-2.26.1_no-bfd-doc.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -ur binutils-2.26.1/bfd/Makefile.in binutils-2.26.1.patched/bfd/Makefile.in
---- binutils-2.26.1/bfd/Makefile.in 2015-11-13 16:27:40.000000000 +0800
-+++ binutils-2.26.1.patched/bfd/Makefile.in 2016-04-02 11:05:43.398422394 +0800
-@@ -341,7 +341,7 @@
- ACLOCAL_AMFLAGS = -I . -I .. -I ../config
- INCDIR = $(srcdir)/../include
- CSEARCH = -I. -I$(srcdir) -I$(INCDIR)
--SUBDIRS = doc po
-+SUBDIRS = po
- bfddocdir = doc
- libbfd_la_LDFLAGS = $(am__append_1) -release `cat libtool-soversion` \
- @SHARED_LDFLAGS@ $(am__empty)
diff --git a/util/crossgcc/patches/binutils-2.26.1_riscv.patch b/util/crossgcc/patches/binutils-2.26.1_riscv.patch
deleted file mode 100644
index 248ee40..0000000
--- a/util/crossgcc/patches/binutils-2.26.1_riscv.patch
+++ /dev/null
@@ -1,9780 +0,0 @@
-diff -urN empty/bfd/cpu-riscv.c binutils-2.26.1/bfd/cpu-riscv.c
---- empty/bfd/cpu-riscv.c 1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/bfd/cpu-riscv.c 2016-04-03 10:33:12.058793036 +0800
-@@ -0,0 +1,76 @@
-+/* BFD backend for RISC-V
-+ Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on MIPS target.
-+
-+ This file is part of BFD, the Binary File Descriptor library.
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; see the file COPYING3. If not,
-+ see <http://www.gnu.org/licenses/ >. */
-+
-+#include "sysdep.h"
-+#include "bfd.h"
-+#include "libbfd.h"
-+
-+/* This routine is provided two arch_infos and returns an arch_info
-+ that is compatible with both, or NULL if none exists. */
-+
-+static const bfd_arch_info_type *
-+riscv_compatible (const bfd_arch_info_type *a, const bfd_arch_info_type *b)
-+{
-+ if (a->arch != b->arch)
-+ return NULL;
-+
-+ /* Machine compatibility is checked in
-+ _bfd_riscv_elf_merge_private_bfd_data. */
-+
-+ return a;
-+}
-+
-+#define N(BITS_WORD, BITS_ADDR, NUMBER, PRINT, DEFAULT, NEXT) \
-+ { \
-+ BITS_WORD, /* bits in a word */ \
-+ BITS_ADDR, /* bits in an address */ \
-+ 8, /* 8 bits in a byte */ \
-+ bfd_arch_riscv, \
-+ NUMBER, \
-+ "riscv", \
-+ PRINT, \
-+ 3, \
-+ DEFAULT, \
-+ riscv_compatible, \
-+ bfd_default_scan, \
-+ bfd_arch_default_fill, \
-+ NEXT, \
-+ }
-+
-+enum
-+{
-+ I_riscv64,
-+ I_riscv32
-+};
-+
-+#define NN(index) (&arch_info_struct[(index) + 1])
-+
-+static const bfd_arch_info_type arch_info_struct[] =
-+{
-+ N (64, 64, bfd_mach_riscv64, "riscv:rv64", FALSE, NN (I_riscv64)),
-+ N (32, 32, bfd_mach_riscv32, "riscv:rv32", FALSE, 0)
-+};
-+
-+/* The default architecture is riscv:rv64. */
-+
-+const bfd_arch_info_type bfd_riscv_arch =
-+ N (64, 64, 0, "riscv", TRUE, &arch_info_struct[0]);
-diff -urN empty/bfd/elfnn-riscv.c binutils-2.26.1/bfd/elfnn-riscv.c
---- empty/bfd/elfnn-riscv.c 1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/bfd/elfnn-riscv.c 2016-04-03 10:33:12.062126369 +0800
-@@ -0,0 +1,3022 @@
-+/* RISC-V-specific support for NN-bit ELF.
-+ Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on TILE-Gx and MIPS targets.
-+
-+ This file is part of BFD, the Binary File Descriptor library.
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; see the file COPYING3. If not,
-+ see <http://www.gnu.org/licenses/ >. */
-+
-+/* This file handles RISC-V ELF targets. */
-+
-+#include "sysdep.h"
-+#include "bfd.h"
-+#include "libbfd.h"
-+#include "bfdlink.h"
-+#include "genlink.h"
-+#include "elf-bfd.h"
-+#include "elfxx-riscv.h"
-+#include "elf/riscv.h"
-+#include "opcode/riscv.h"
-+
-+#define ARCH_SIZE NN
-+
-+#define MINUS_ONE ((bfd_vma)0 - 1)
-+
-+#define RISCV_ELF_LOG_WORD_BYTES (ARCH_SIZE == 32 ? 2 : 3)
-+
-+#define RISCV_ELF_WORD_BYTES (1 << RISCV_ELF_LOG_WORD_BYTES)
-+
-+/* The name of the dynamic interpreter. This is put in the .interp
-+ section. */
-+
-+#define ELF64_DYNAMIC_INTERPRETER "/lib/ld.so.1"
-+#define ELF32_DYNAMIC_INTERPRETER "/lib32/ld.so.1"
-+
-+#define ELF_ARCH bfd_arch_riscv
-+#define ELF_TARGET_ID RISCV_ELF_DATA
-+#define ELF_MACHINE_CODE EM_RISCV
-+#define ELF_MAXPAGESIZE 0x1000
-+#define ELF_COMMONPAGESIZE 0x1000
-+
-+/* The RISC-V linker needs to keep track of the number of relocs that it
-+ decides to copy as dynamic relocs in check_relocs for each symbol.
-+ This is so that it can later discard them if they are found to be
-+ unnecessary. We store the information in a field extending the
-+ regular ELF linker hash table. */
-+
-+struct riscv_elf_dyn_relocs
-+{
-+ struct riscv_elf_dyn_relocs *next;
-+
-+ /* The input section of the reloc. */
-+ asection *sec;
-+
-+ /* Total number of relocs copied for the input section. */
-+ bfd_size_type count;
-+
-+ /* Number of pc-relative relocs copied for the input section. */
-+ bfd_size_type pc_count;
-+};
-+
-+/* RISC-V ELF linker hash entry. */
-+
-+struct riscv_elf_link_hash_entry
-+{
-+ struct elf_link_hash_entry elf;
-+
-+ /* Track dynamic relocs copied for this symbol. */
-+ struct riscv_elf_dyn_relocs *dyn_relocs;
-+
-+#define GOT_UNKNOWN 0
-+#define GOT_NORMAL 1
-+#define GOT_TLS_GD 2
-+#define GOT_TLS_IE 4
-+#define GOT_TLS_LE 8
-+ char tls_type;
-+};
-+
-+#define riscv_elf_hash_entry(ent) \
-+ ((struct riscv_elf_link_hash_entry *)(ent))
-+
-+struct _bfd_riscv_elf_obj_tdata
-+{
-+ struct elf_obj_tdata root;
-+
-+ /* tls_type for each local got entry. */
-+ char *local_got_tls_type;
-+};
-+
-+#define _bfd_riscv_elf_tdata(abfd) \
-+ ((struct _bfd_riscv_elf_obj_tdata *) (abfd)->tdata.any)
-+
-+#define _bfd_riscv_elf_local_got_tls_type(abfd) \
-+ (_bfd_riscv_elf_tdata (abfd)->local_got_tls_type)
-+
-+#define _bfd_riscv_elf_tls_type(abfd, h, symndx) \
-+ (*((h) != NULL ? &riscv_elf_hash_entry (h)->tls_type \
-+ : &_bfd_riscv_elf_local_got_tls_type (abfd) [symndx]))
-+
-+#define is_riscv_elf(bfd) \
-+ (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
-+ && elf_tdata (bfd) != NULL \
-+ && elf_object_id (bfd) == RISCV_ELF_DATA)
-+
-+#include "elf/common.h"
-+#include "elf/internal.h"
-+
-+struct riscv_elf_link_hash_table
-+{
-+ struct elf_link_hash_table elf;
-+
-+ /* Short-cuts to get to dynamic linker sections. */
-+ asection *sdynbss;
-+ asection *srelbss;
-+ asection *sdyntdata;
-+
-+ /* Small local sym to section mapping cache. */
-+ struct sym_cache sym_cache;
-+};
-+
-+
-+/* Get the RISC-V ELF linker hash table from a link_info structure. */
-+#define riscv_elf_hash_table(p) \
-+ (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
-+ == RISCV_ELF_DATA ? ((struct riscv_elf_link_hash_table *) ((p)->hash)) : NULL)
-+
-+static void
-+riscv_info_to_howto_rela (bfd *abfd ATTRIBUTE_UNUSED,
-+ arelent *cache_ptr,
-+ Elf_Internal_Rela *dst)
-+{
-+ cache_ptr->howto = riscv_elf_rtype_to_howto (ELFNN_R_TYPE (dst->r_info));
-+}
-+
-+static void
-+riscv_elf_append_rela (bfd *abfd, asection *s, Elf_Internal_Rela *rel)
-+{
-+ const struct elf_backend_data *bed;
-+ bfd_byte *loc;
-+
-+ bed = get_elf_backend_data (abfd);
-+ loc = s->contents + (s->reloc_count++ * bed->s->sizeof_rela);
-+ bed->s->swap_reloca_out (abfd, rel, loc);
-+}
-+
-+/* PLT/GOT stuff */
-+
-+#define PLT_HEADER_INSNS 8
-+#define PLT_ENTRY_INSNS 4
-+#define PLT_HEADER_SIZE (PLT_HEADER_INSNS * 4)
-+#define PLT_ENTRY_SIZE (PLT_ENTRY_INSNS * 4)
-+
-+#define GOT_ENTRY_SIZE RISCV_ELF_WORD_BYTES
-+
-+#define GOTPLT_HEADER_SIZE (2 * GOT_ENTRY_SIZE)
-+
-+#define sec_addr(sec) ((sec)->output_section->vma + (sec)->output_offset)
-+
-+static bfd_vma
-+riscv_elf_got_plt_val (bfd_vma plt_index, struct bfd_link_info *info)
-+{
-+ return sec_addr (riscv_elf_hash_table (info)->elf.sgotplt)
-+ + GOTPLT_HEADER_SIZE + (plt_index * GOT_ENTRY_SIZE);
-+}
-+
-+#if ARCH_SIZE == 32
-+# define MATCH_LREG MATCH_LW
-+#else
-+# define MATCH_LREG MATCH_LD
-+#endif
-+
-+/* Generate a PLT header. */
-+
-+static void
-+riscv_make_plt_header (bfd_vma gotplt_addr, bfd_vma addr, uint32_t *entry)
-+{
-+ bfd_vma gotplt_offset_high = RISCV_PCREL_HIGH_PART (gotplt_addr, addr);
-+ bfd_vma gotplt_offset_low = RISCV_PCREL_LOW_PART (gotplt_addr, addr);
-+
-+ /* auipc t2, %hi(.got.plt)
-+ sub t1, t1, t3 # shifted .got.plt offset + hdr size + 12
-+ l[w|d] t3, %lo(.got.plt)(t2) # _dl_runtime_resolve
-+ addi t1, t1, -(hdr size + 12) # shifted .got.plt offset
-+ addi t0, t2, %lo(.got.plt) # &.got.plt
-+ srli t1, t1, log2(16/PTRSIZE) # .got.plt offset
-+ l[w|d] t0, PTRSIZE(t0) # link map
-+ jr t3 */
-+
-+ entry[0] = RISCV_UTYPE (AUIPC, X_T2, gotplt_offset_high);
-+ entry[1] = RISCV_RTYPE (SUB, X_T1, X_T1, X_T3);
-+ entry[2] = RISCV_ITYPE (LREG, X_T3, X_T2, gotplt_offset_low);
-+ entry[3] = RISCV_ITYPE (ADDI, X_T1, X_T1, -(PLT_HEADER_SIZE + 12));
-+ entry[4] = RISCV_ITYPE (ADDI, X_T0, X_T2, gotplt_offset_low);
-+ entry[5] = RISCV_ITYPE (SRLI, X_T1, X_T1, 4 - RISCV_ELF_LOG_WORD_BYTES);
-+ entry[6] = RISCV_ITYPE (LREG, X_T0, X_T0, RISCV_ELF_WORD_BYTES);
-+ entry[7] = RISCV_ITYPE (JALR, 0, X_T3, 0);
-+}
-+
-+/* Generate a PLT entry. */
-+
-+static void
-+riscv_make_plt_entry (bfd_vma got, bfd_vma addr, uint32_t *entry)
-+{
-+ /* auipc t3, %hi(.got.plt entry)
-+ l[w|d] t3, %lo(.got.plt entry)(t3)
-+ jalr t1, t3
-+ nop */
-+
-+ entry[0] = RISCV_UTYPE (AUIPC, X_T3, RISCV_PCREL_HIGH_PART (got, addr));
-+ entry[1] = RISCV_ITYPE (LREG, X_T3, X_T3, RISCV_PCREL_LOW_PART(got, addr));
-+ entry[2] = RISCV_ITYPE (JALR, X_T1, X_T3, 0);
-+ entry[3] = RISCV_NOP;
-+}
-+
-+/* Create an entry in an RISC-V ELF linker hash table. */
-+
-+static struct bfd_hash_entry *
-+link_hash_newfunc (struct bfd_hash_entry *entry,
-+ struct bfd_hash_table *table, const char *string)
-+{
-+ /* Allocate the structure if it has not already been allocated by a
-+ subclass. */
-+ if (entry == NULL)
-+ {
-+ entry =
-+ bfd_hash_allocate (table,
-+ sizeof (struct riscv_elf_link_hash_entry));
-+ if (entry == NULL)
-+ return entry;
-+ }
-+
-+ /* Call the allocation method of the superclass. */
-+ entry = _bfd_elf_link_hash_newfunc (entry, table, string);
-+ if (entry != NULL)
-+ {
-+ struct riscv_elf_link_hash_entry *eh;
-+
-+ eh = (struct riscv_elf_link_hash_entry *) entry;
-+ eh->dyn_relocs = NULL;
-+ eh->tls_type = GOT_UNKNOWN;
-+ }
-+
-+ return entry;
-+}
-+
-+/* Create a RISC-V ELF linker hash table. */
-+
-+static struct bfd_link_hash_table *
-+riscv_elf_link_hash_table_create (bfd *abfd)
-+{
-+ struct riscv_elf_link_hash_table *ret;
-+ bfd_size_type amt = sizeof (struct riscv_elf_link_hash_table);
-+
-+ ret = (struct riscv_elf_link_hash_table *) bfd_zmalloc (amt);
-+ if (ret == NULL)
-+ return NULL;
-+
-+ if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, link_hash_newfunc,
-+ sizeof (struct riscv_elf_link_hash_entry),
-+ RISCV_ELF_DATA))
-+ {
-+ free (ret);
-+ return NULL;
-+ }
-+
-+ return &ret->elf.root;
-+}
-+
-+/* Create the .got section. */
-+
-+static bfd_boolean
-+riscv_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
-+{
-+ flagword flags;
-+ asection *s, *s_got;
-+ struct elf_link_hash_entry *h;
-+ const struct elf_backend_data *bed = get_elf_backend_data (abfd);
-+ struct elf_link_hash_table *htab = elf_hash_table (info);
-+
-+ /* This function may be called more than once. */
-+ s = bfd_get_linker_section (abfd, ".got");
-+ if (s != NULL)
-+ return TRUE;
-+
-+ flags = bed->dynamic_sec_flags;
-+
-+ s = bfd_make_section_anyway_with_flags (abfd,
-+ (bed->rela_plts_and_copies_p
-+ ? ".rela.got" : ".rel.got"),
-+ (bed->dynamic_sec_flags
-+ | SEC_READONLY));
-+ if (s == NULL
-+ || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
-+ return FALSE;
-+ htab->srelgot = s;
-+
-+ s = s_got = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
-+ if (s == NULL
-+ || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
-+ return FALSE;
-+ htab->sgot = s;
-+
-+ /* The first bit of the global offset table is the header. */
-+ s->size += bed->got_header_size;
-+
-+ if (bed->want_got_plt)
-+ {
-+ s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
-+ if (s == NULL
-+ || !bfd_set_section_alignment (abfd, s,
-+ bed->s->log_file_align))
-+ return FALSE;
-+ htab->sgotplt = s;
-+
-+ /* Reserve room for the header. */
-+ s->size += GOTPLT_HEADER_SIZE;
-+ }
-+
-+ if (bed->want_got_sym)
-+ {
-+ /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
-+ section. We don't do this in the linker script because we don't want
-+ to define the symbol if we are not creating a global offset
-+ table. */
-+ h = _bfd_elf_define_linkage_sym (abfd, info, s_got,
-+ "_GLOBAL_OFFSET_TABLE_");
-+ elf_hash_table (info)->hgot = h;
-+ if (h == NULL)
-+ return FALSE;
-+ }
-+
-+ return TRUE;
-+}
-+
-+/* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
-+ .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
-+ hash table. */
-+
-+static bfd_boolean
-+riscv_elf_create_dynamic_sections (bfd *dynobj,
-+ struct bfd_link_info *info)
-+{
-+ struct riscv_elf_link_hash_table *htab;
-+
-+ htab = riscv_elf_hash_table (info);
-+ BFD_ASSERT (htab != NULL);
-+
-+ if (!riscv_elf_create_got_section (dynobj, info))
-+ return FALSE;
-+
-+ if (!_bfd_elf_create_dynamic_sections (dynobj, info))
-+ return FALSE;
-+
-+ htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
-+ if (!bfd_link_pic (info))
-+ {
-+ htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
-+ htab->sdyntdata =
-+ bfd_make_section_anyway_with_flags (dynobj, ".tdata.dyn",
-+ SEC_ALLOC | SEC_THREAD_LOCAL);
-+ }
-+
-+ if (!htab->elf.splt || !htab->elf.srelplt || !htab->sdynbss
-+ || (!bfd_link_pic (info) && (!htab->srelbss || !htab->sdyntdata)))
-+ abort ();
-+
-+ return TRUE;
-+}
-+
-+/* Copy the extra info we tack onto an elf_link_hash_entry. */
-+
-+static void
-+riscv_elf_copy_indirect_symbol (struct bfd_link_info *info,
-+ struct elf_link_hash_entry *dir,
-+ struct elf_link_hash_entry *ind)
-+{
-+ struct riscv_elf_link_hash_entry *edir, *eind;
-+
-+ edir = (struct riscv_elf_link_hash_entry *) dir;
-+ eind = (struct riscv_elf_link_hash_entry *) ind;
-+
-+ if (eind->dyn_relocs != NULL)
-+ {
-+ if (edir->dyn_relocs != NULL)
-+ {
-+ struct riscv_elf_dyn_relocs **pp;
-+ struct riscv_elf_dyn_relocs *p;
-+
-+ /* Add reloc counts against the indirect sym to the direct sym
-+ list. Merge any entries against the same section. */
-+ for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
-+ {
-+ struct riscv_elf_dyn_relocs *q;
-+
-+ for (q = edir->dyn_relocs; q != NULL; q = q->next)
-+ if (q->sec == p->sec)
-+ {
-+ q->pc_count += p->pc_count;
-+ q->count += p->count;
-+ *pp = p->next;
-+ break;
-+ }
-+ if (q == NULL)
-+ pp = &p->next;
-+ }
-+ *pp = edir->dyn_relocs;
-+ }
-+
-+ edir->dyn_relocs = eind->dyn_relocs;
-+ eind->dyn_relocs = NULL;
-+ }
-+
-+ if (ind->root.type == bfd_link_hash_indirect
-+ && dir->got.refcount <= 0)
-+ {
-+ edir->tls_type = eind->tls_type;
-+ eind->tls_type = GOT_UNKNOWN;
-+ }
-+ _bfd_elf_link_hash_copy_indirect (info, dir, ind);
-+}
-+
-+static bfd_boolean
-+riscv_elf_record_tls_type (bfd *abfd, struct elf_link_hash_entry *h,
-+ unsigned long symndx, char tls_type)
-+{
-+ char *new_tls_type = &_bfd_riscv_elf_tls_type (abfd, h, symndx);
-+ *new_tls_type |= tls_type;
-+ if ((*new_tls_type & GOT_NORMAL) && (*new_tls_type & ~GOT_NORMAL))
-+ {
-+ (*_bfd_error_handler)
-+ (_("%B: `%s' accessed both as normal and thread local symbol"),
-+ abfd, h ? h->root.root.string : "<local>");
-+ return FALSE;
-+ }
-+ return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_elf_record_got_reference (bfd *abfd, struct bfd_link_info *info,
-+ struct elf_link_hash_entry *h, long symndx)
-+{
-+ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+ Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
-+
-+ if (htab->elf.sgot == NULL)
-+ {
-+ if (!riscv_elf_create_got_section (htab->elf.dynobj, info))
-+ return FALSE;
-+ }
-+
-+ if (h != NULL)
-+ {
-+ h->got.refcount += 1;
-+ return TRUE;
-+ }
-+
-+ /* This is a global offset table entry for a local symbol. */
-+ if (elf_local_got_refcounts (abfd) == NULL)
-+ {
-+ bfd_size_type size = symtab_hdr->sh_info * (sizeof (bfd_vma) + 1);
-+ if (!(elf_local_got_refcounts (abfd) = bfd_zalloc (abfd, size)))
-+ return FALSE;
-+ _bfd_riscv_elf_local_got_tls_type (abfd)
-+ = (char *) (elf_local_got_refcounts (abfd) + symtab_hdr->sh_info);
-+ }
-+ elf_local_got_refcounts (abfd) [symndx] += 1;
-+
-+ return TRUE;
-+}
-+
-+static bfd_boolean
-+bad_static_reloc (bfd *abfd, unsigned r_type, struct elf_link_hash_entry *h)
-+{
-+ (*_bfd_error_handler)
-+ (_("%B: relocation %s against `%s' can not be used when making a shared "
-+ "object; recompile with -fPIC"),
-+ abfd, riscv_elf_rtype_to_howto (r_type)->name,
-+ h != NULL ? h->root.root.string : "a local symbol");
-+ bfd_set_error (bfd_error_bad_value);
-+ return FALSE;
-+}
-+/* Look through the relocs for a section during the first phase, and
-+ allocate space in the global offset table or procedure linkage
-+ table. */
-+
-+static bfd_boolean
-+riscv_elf_check_relocs (bfd *abfd, struct bfd_link_info *info,
-+ asection *sec, const Elf_Internal_Rela *relocs)
-+{
-+ struct riscv_elf_link_hash_table *htab;
-+ Elf_Internal_Shdr *symtab_hdr;
-+ struct elf_link_hash_entry **sym_hashes;
-+ const Elf_Internal_Rela *rel;
-+ asection *sreloc = NULL;
-+
-+ if (bfd_link_relocatable (info))
-+ return TRUE;
-+
-+ htab = riscv_elf_hash_table (info);
-+ symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
-+ sym_hashes = elf_sym_hashes (abfd);
-+
-+ if (htab->elf.dynobj == NULL)
-+ htab->elf.dynobj = abfd;
-+
-+ for (rel = relocs; rel < relocs + sec->reloc_count; rel++)
-+ {
-+ unsigned int r_type;
-+ unsigned long r_symndx;
-+ struct elf_link_hash_entry *h;
-+
-+ r_symndx = ELFNN_R_SYM (rel->r_info);
-+ r_type = ELFNN_R_TYPE (rel->r_info);
-+
-+ if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
-+ {
-+ (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
-+ abfd, r_symndx);
-+ return FALSE;
-+ }
-+
-+ if (r_symndx < symtab_hdr->sh_info)
-+ h = NULL;
-+ else
-+ {
-+ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
-+ while (h->root.type == bfd_link_hash_indirect
-+ || h->root.type == bfd_link_hash_warning)
-+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
-+
-+ /* PR15323, ref flags aren't set for references in the same
-+ object. */
-+ h->root.non_ir_ref = 1;
-+ }
-+
-+ switch (r_type)
-+ {
-+ case R_RISCV_TLS_GD_HI20:
-+ if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
-+ || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_GD))
-+ return FALSE;
-+ break;
-+
-+ case R_RISCV_TLS_GOT_HI20:
-+ if (bfd_link_pic (info))
-+ info->flags |= DF_STATIC_TLS;
-+ if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
-+ || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_IE))
-+ return FALSE;
-+ break;
-+
-+ case R_RISCV_GOT_HI20:
-+ if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
-+ || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_NORMAL))
-+ return FALSE;
-+ break;
-+
-+ case R_RISCV_CALL_PLT:
-+ /* This symbol requires a procedure linkage table entry. We
-+ actually build the entry in adjust_dynamic_symbol,
-+ because this might be a case of linking PIC code without
-+ linking in any dynamic objects, in which case we don't
-+ need to generate a procedure linkage table after all. */
-+
-+ if (h != NULL)
-+ {
-+ h->needs_plt = 1;
-+ h->plt.refcount += 1;
-+ }
-+ break;
-+
-+ case R_RISCV_CALL:
-+ case R_RISCV_JAL:
-+ case R_RISCV_BRANCH:
-+ case R_RISCV_RVC_BRANCH:
-+ case R_RISCV_RVC_JUMP:
-+ case R_RISCV_PCREL_HI20:
-+ /* In shared libraries, these relocs are known to bind locally. */
-+ if (bfd_link_pic (info))
-+ break;
-+ goto static_reloc;
-+
-+ case R_RISCV_TPREL_HI20:
-+ if (!bfd_link_executable (info))
-+ return bad_static_reloc (abfd, r_type, h);
-+ if (h != NULL)
-+ riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_LE);
-+ goto static_reloc;
-+
-+ case R_RISCV_HI20:
-+ if (bfd_link_pic (info))
-+ return bad_static_reloc (abfd, r_type, h);
-+ /* Fall through. */
-+
-+ case R_RISCV_COPY:
-+ case R_RISCV_JUMP_SLOT:
-+ case R_RISCV_RELATIVE:
-+ case R_RISCV_64:
-+ case R_RISCV_32:
-+ /* Fall through. */
-+
-+ static_reloc:
-+ /* This reloc might not bind locally. */
-+ if (h != NULL)
-+ h->non_got_ref = 1;
-+
-+ if (h != NULL && !bfd_link_pic (info))
-+ {
-+ /* We may need a .plt entry if the function this reloc
-+ refers to is in a shared lib. */
-+ h->plt.refcount += 1;
-+ }
-+
-+ /* If we are creating a shared library, and this is a reloc
-+ against a global symbol, or a non PC relative reloc
-+ against a local symbol, then we need to copy the reloc
-+ into the shared library. However, if we are linking with
-+ -Bsymbolic, we do not need to copy a reloc against a
-+ global symbol which is defined in an object we are
-+ including in the link (i.e., DEF_REGULAR is set). At
-+ this point we have not seen all the input files, so it is
-+ possible that DEF_REGULAR is not set now but will be set
-+ later (it is never cleared). In case of a weak definition,
-+ DEF_REGULAR may be cleared later by a strong definition in
-+ a shared library. We account for that possibility below by
-+ storing information in the relocs_copied field of the hash
-+ table entry. A similar situation occurs when creating
-+ shared libraries and symbol visibility changes render the
-+ symbol local.
-+
-+ If on the other hand, we are creating an executable, we
-+ may need to keep relocations for symbols satisfied by a
-+ dynamic library if we manage to avoid copy relocs for the
-+ symbol. */
-+ if ((bfd_link_pic (info)
-+ && (sec->flags & SEC_ALLOC) != 0
-+ && (! riscv_elf_rtype_to_howto (r_type)->pc_relative
-+ || (h != NULL
-+ && (! info->symbolic
-+ || h->root.type == bfd_link_hash_defweak
-+ || !h->def_regular))))
-+ || (!bfd_link_pic (info)
-+ && (sec->flags & SEC_ALLOC) != 0
-+ && h != NULL
-+ && (h->root.type == bfd_link_hash_defweak
-+ || !h->def_regular)))
-+ {
-+ struct riscv_elf_dyn_relocs *p;
-+ struct riscv_elf_dyn_relocs **head;
-+
-+ /* When creating a shared object, we must copy these
-+ relocs into the output file. We create a reloc
-+ section in dynobj and make room for the reloc. */
-+ if (sreloc == NULL)
-+ {
-+ sreloc = _bfd_elf_make_dynamic_reloc_section
-+ (sec, htab->elf.dynobj, RISCV_ELF_LOG_WORD_BYTES,
-+ abfd, /*rela?*/ TRUE);
-+
-+ if (sreloc == NULL)
-+ return FALSE;
-+ }
-+
-+ /* If this is a global symbol, we count the number of
-+ relocations we need for this symbol. */
-+ if (h != NULL)
-+ head = &((struct riscv_elf_link_hash_entry *) h)->dyn_relocs;
-+ else
-+ {
-+ /* Track dynamic relocs needed for local syms too.
-+ We really need local syms available to do this
-+ easily. Oh well. */
-+
-+ asection *s;
-+ void *vpp;
-+ Elf_Internal_Sym *isym;
-+
-+ isym = bfd_sym_from_r_symndx (&htab->sym_cache,
-+ abfd, r_symndx);
-+ if (isym == NULL)
-+ return FALSE;
-+
-+ s = bfd_section_from_elf_index (abfd, isym->st_shndx);
-+ if (s == NULL)
-+ s = sec;
-+
-+ vpp = &elf_section_data (s)->local_dynrel;
-+ head = (struct riscv_elf_dyn_relocs **) vpp;
-+ }
-+
-+ p = *head;
-+ if (p == NULL || p->sec != sec)
-+ {
-+ bfd_size_type amt = sizeof *p;
-+ p = ((struct riscv_elf_dyn_relocs *)
-+ bfd_alloc (htab->elf.dynobj, amt));
-+ if (p == NULL)
-+ return FALSE;
-+ p->next = *head;
-+ *head = p;
-+ p->sec = sec;
-+ p->count = 0;
-+ p->pc_count = 0;
-+ }
-+
-+ p->count += 1;
-+ p->pc_count += riscv_elf_rtype_to_howto (r_type)->pc_relative;
-+ }
-+
-+ break;
-+
-+ case R_RISCV_GNU_VTINHERIT:
-+ if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
-+ return FALSE;
-+ break;
-+
-+ case R_RISCV_GNU_VTENTRY:
-+ if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
-+ return FALSE;
-+ break;
-+
-+ default:
-+ break;
-+ }
-+ }
-+
-+ return TRUE;
-+}
-+
-+static asection *
-+riscv_elf_gc_mark_hook (asection *sec,
-+ struct bfd_link_info *info,
-+ Elf_Internal_Rela *rel,
-+ struct elf_link_hash_entry *h,
-+ Elf_Internal_Sym *sym)
-+{
-+ if (h != NULL)
-+ switch (ELFNN_R_TYPE (rel->r_info))
-+ {
-+ case R_RISCV_GNU_VTINHERIT:
-+ case R_RISCV_GNU_VTENTRY:
-+ return NULL;
-+ }
-+
-+ return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
-+}
-+
-+/* Update the got entry reference counts for the section being removed. */
-+static bfd_boolean
-+riscv_elf_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
-+ asection *sec, const Elf_Internal_Rela *relocs)
-+{
-+ const Elf_Internal_Rela *rel, *relend;
-+ Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (abfd);
-+ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (abfd);
-+ bfd_signed_vma *local_got_refcounts = elf_local_got_refcounts (abfd);
-+
-+ if (bfd_link_relocatable (info))
-+ return TRUE;
-+
-+ elf_section_data (sec)->local_dynrel = NULL;
-+
-+ for (rel = relocs, relend = relocs + sec->reloc_count; rel < relend; rel++)
-+ {
-+ unsigned long r_symndx;
-+ struct elf_link_hash_entry *h = NULL;
-+
-+ r_symndx = ELFNN_R_SYM (rel->r_info);
-+ if (r_symndx >= symtab_hdr->sh_info)
-+ {
-+ struct riscv_elf_link_hash_entry *eh;
-+ struct riscv_elf_dyn_relocs **pp;
-+ struct riscv_elf_dyn_relocs *p;
-+
-+ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
-+ while (h->root.type == bfd_link_hash_indirect
-+ || h->root.type == bfd_link_hash_warning)
-+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
-+ eh = (struct riscv_elf_link_hash_entry *) h;
-+ for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
-+ if (p->sec == sec)
-+ {
-+ /* Everything must go for SEC. */
-+ *pp = p->next;
-+ break;
-+ }
-+ }
-+
-+ switch (ELFNN_R_TYPE (rel->r_info))
-+ {
-+ case R_RISCV_GOT_HI20:
-+ case R_RISCV_TLS_GOT_HI20:
-+ case R_RISCV_TLS_GD_HI20:
-+ if (h != NULL)
-+ {
-+ if (h->got.refcount > 0)
-+ h->got.refcount--;
-+ }
-+ else
-+ {
-+ if (local_got_refcounts &&
-+ local_got_refcounts[r_symndx] > 0)
-+ local_got_refcounts[r_symndx]--;
-+ }
-+ break;
-+
-+ case R_RISCV_HI20:
-+ case R_RISCV_PCREL_HI20:
-+ case R_RISCV_COPY:
-+ case R_RISCV_JUMP_SLOT:
-+ case R_RISCV_RELATIVE:
-+ case R_RISCV_64:
-+ case R_RISCV_32:
-+ case R_RISCV_BRANCH:
-+ case R_RISCV_CALL:
-+ case R_RISCV_JAL:
-+ case R_RISCV_RVC_BRANCH:
-+ case R_RISCV_RVC_JUMP:
-+ if (bfd_link_pic (info))
-+ break;
-+ /* Fall through. */
-+
-+ case R_RISCV_CALL_PLT:
-+ if (h != NULL)
-+ {
-+ if (h->plt.refcount > 0)
-+ h->plt.refcount--;
-+ }
-+ break;
-+
-+ default:
-+ break;
-+ }
-+ }
-+
-+ return TRUE;
-+}
-+
-+/* Adjust a symbol defined by a dynamic object and referenced by a
-+ regular object. The current definition is in some section of the
-+ dynamic object, but we're not including those sections. We have to
-+ change the definition to something the rest of the link can
-+ understand. */
-+
-+static bfd_boolean
-+riscv_elf_adjust_dynamic_symbol (struct bfd_link_info *info,
-+ struct elf_link_hash_entry *h)
-+{
-+ struct riscv_elf_link_hash_table *htab;
-+ struct riscv_elf_link_hash_entry * eh;
-+ struct riscv_elf_dyn_relocs *p;
-+ bfd *dynobj;
-+ asection *s;
-+
-+ htab = riscv_elf_hash_table (info);
-+ BFD_ASSERT (htab != NULL);
-+
-+ dynobj = htab->elf.dynobj;
-+
-+ /* Make sure we know what is going on here. */
-+ BFD_ASSERT (dynobj != NULL
-+ && (h->needs_plt
-+ || h->type == STT_GNU_IFUNC
-+ || h->u.weakdef != NULL
-+ || (h->def_dynamic
-+ && h->ref_regular
-+ && !h->def_regular)));
-+
-+ /* If this is a function, put it in the procedure linkage table. We
-+ will fill in the contents of the procedure linkage table later
-+ (although we could actually do it here). */
-+ if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
-+ {
-+ if (h->plt.refcount <= 0
-+ || SYMBOL_CALLS_LOCAL (info, h)
-+ || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
-+ && h->root.type == bfd_link_hash_undefweak))
-+ {
-+ /* This case can occur if we saw a R_RISCV_CALL_PLT reloc in an
-+ input file, but the symbol was never referred to by a dynamic
-+ object, or if all references were garbage collected. In such
-+ a case, we don't actually need to build a PLT entry. */
-+ h->plt.offset = (bfd_vma) -1;
-+ h->needs_plt = 0;
-+ }
-+
-+ return TRUE;
-+ }
-+ else
-+ h->plt.offset = (bfd_vma) -1;
-+
-+ /* If this is a weak symbol, and there is a real definition, the
-+ processor independent code will have arranged for us to see the
-+ real definition first, and we can just use the same value. */
-+ if (h->u.weakdef != NULL)
-+ {
-+ BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
-+ || h->u.weakdef->root.type == bfd_link_hash_defweak);
-+ h->root.u.def.section = h->u.weakdef->root.u.def.section;
-+ h->root.u.def.value = h->u.weakdef->root.u.def.value;
-+ return TRUE;
-+ }
-+
-+ /* This is a reference to a symbol defined by a dynamic object which
-+ is not a function. */
-+
-+ /* If we are creating a shared library, we must presume that the
-+ only references to the symbol are via the global offset table.
-+ For such cases we need not do anything here; the relocations will
-+ be handled correctly by relocate_section. */
-+ if (bfd_link_pic (info))
-+ return TRUE;
-+
-+ /* If there are no references to this symbol that do not use the
-+ GOT, we don't need to generate a copy reloc. */
-+ if (!h->non_got_ref)
-+ return TRUE;
-+
-+ /* If -z nocopyreloc was given, we won't generate them either. */
-+ if (info->nocopyreloc)
-+ {
-+ h->non_got_ref = 0;
-+ return TRUE;
-+ }
-+
-+ eh = (struct riscv_elf_link_hash_entry *) h;
-+ for (p = eh->dyn_relocs; p != NULL; p = p->next)
-+ {
-+ s = p->sec->output_section;
-+ if (s != NULL && (s->flags & SEC_READONLY) != 0)
-+ break;
-+ }
-+
-+ /* If we didn't find any dynamic relocs in read-only sections, then
-+ we'll be keeping the dynamic relocs and avoiding the copy reloc. */
-+ if (p == NULL)
-+ {
-+ h->non_got_ref = 0;
-+ return TRUE;
-+ }
-+
-+ /* We must allocate the symbol in our .dynbss section, which will
-+ become part of the .bss section of the executable. There will be
-+ an entry for this symbol in the .dynsym section. The dynamic
-+ object will contain position independent code, so all references
-+ from the dynamic object to this symbol will go through the global
-+ offset table. The dynamic linker will use the .dynsym entry to
-+ determine the address it must put in the global offset table, so
-+ both the dynamic object and the regular object will refer to the
-+ same memory location for the variable. */
-+
-+ /* We must generate a R_RISCV_COPY reloc to tell the dynamic linker
-+ to copy the initial value out of the dynamic object and into the
-+ runtime process image. We need to remember the offset into the
-+ .rel.bss section we are going to use. */
-+ if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
-+ {
-+ htab->srelbss->size += sizeof (ElfNN_External_Rela);
-+ h->needs_copy = 1;
-+ }
-+
-+ if (eh->tls_type & ~GOT_NORMAL)
-+ return _bfd_elf_adjust_dynamic_copy (info, h, htab->sdyntdata);
-+
-+ return _bfd_elf_adjust_dynamic_copy (info, h, htab->sdynbss);
-+}
-+
-+/* Allocate space in .plt, .got and associated reloc sections for
-+ dynamic relocs. */
-+
-+static bfd_boolean
-+allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
-+{
-+ struct bfd_link_info *info;
-+ struct riscv_elf_link_hash_table *htab;
-+ struct riscv_elf_link_hash_entry *eh;
-+ struct riscv_elf_dyn_relocs *p;
-+
-+ if (h->root.type == bfd_link_hash_indirect)
-+ return TRUE;
-+
-+ info = (struct bfd_link_info *) inf;
-+ htab = riscv_elf_hash_table (info);
-+ BFD_ASSERT (htab != NULL);
-+
-+ if (htab->elf.dynamic_sections_created
-+ && h->plt.refcount > 0)
-+ {
-+ /* Make sure this symbol is output as a dynamic symbol.
-+ Undefined weak syms won't yet be marked as dynamic. */
-+ if (h->dynindx == -1
-+ && !h->forced_local)
-+ {
-+ if (! bfd_elf_link_record_dynamic_symbol (info, h))
-+ return FALSE;
-+ }
-+
-+ if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, bfd_link_pic (info), h))
-+ {
-+ asection *s = htab->elf.splt;
-+
-+ if (s->size == 0)
-+ s->size = PLT_HEADER_SIZE;
-+
-+ h->plt.offset = s->size;
-+
-+ /* Make room for this entry. */
-+ s->size += PLT_ENTRY_SIZE;
-+
-+ /* We also need to make an entry in the .got.plt section. */
-+ htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
-+
-+ /* We also need to make an entry in the .rela.plt section. */
-+ htab->elf.srelplt->size += sizeof (ElfNN_External_Rela);
-+
-+ /* If this symbol is not defined in a regular file, and we are
-+ not generating a shared library, then set the symbol to this
-+ location in the .plt. This is required to make function
-+ pointers compare as equal between the normal executable and
-+ the shared library. */
-+ if (! bfd_link_pic (info)
-+ && !h->def_regular)
-+ {
-+ h->root.u.def.section = s;
-+ h->root.u.def.value = h->plt.offset;
-+ }
-+ }
-+ else
-+ {
-+ h->plt.offset = (bfd_vma) -1;
-+ h->needs_plt = 0;
-+ }
-+ }
-+ else
-+ {
-+ h->plt.offset = (bfd_vma) -1;
-+ h->needs_plt = 0;
-+ }
-+
-+ if (h->got.refcount > 0)
-+ {
-+ asection *s;
-+ bfd_boolean dyn;
-+ int tls_type = riscv_elf_hash_entry (h)->tls_type;
-+
-+ /* Make sure this symbol is output as a dynamic symbol.
-+ Undefined weak syms won't yet be marked as dynamic. */
-+ if (h->dynindx == -1
-+ && !h->forced_local)
-+ {
-+ if (! bfd_elf_link_record_dynamic_symbol (info, h))
-+ return FALSE;
-+ }
-+
-+ s = htab->elf.sgot;
-+ h->got.offset = s->size;
-+ dyn = htab->elf.dynamic_sections_created;
-+ if (tls_type & (GOT_TLS_GD | GOT_TLS_IE))
-+ {
-+ /* TLS_GD needs two dynamic relocs and two GOT slots. */
-+ if (tls_type & GOT_TLS_GD)
-+ {
-+ s->size += 2 * RISCV_ELF_WORD_BYTES;
-+ htab->elf.srelgot->size += 2 * sizeof (ElfNN_External_Rela);
-+ }
-+
-+ /* TLS_IE needs one dynamic reloc and one GOT slot. */
-+ if (tls_type & GOT_TLS_IE)
-+ {
-+ s->size += RISCV_ELF_WORD_BYTES;
-+ htab->elf.srelgot->size += sizeof (ElfNN_External_Rela);
-+ }
-+ }
-+ else
-+ {
-+ s->size += RISCV_ELF_WORD_BYTES;
-+ if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h))
-+ htab->elf.srelgot->size += sizeof (ElfNN_External_Rela);
-+ }
-+ }
-+ else
-+ h->got.offset = (bfd_vma) -1;
-+
-+ eh = (struct riscv_elf_link_hash_entry *) h;
-+ if (eh->dyn_relocs == NULL)
-+ return TRUE;
-+
-+ /* In the shared -Bsymbolic case, discard space allocated for
-+ dynamic pc-relative relocs against symbols which turn out to be
-+ defined in regular objects. For the normal shared case, discard
-+ space for pc-relative relocs that have become local due to symbol
-+ visibility changes. */
-+
-+ if (bfd_link_pic (info))
-+ {
-+ if (SYMBOL_CALLS_LOCAL (info, h))
-+ {
-+ struct riscv_elf_dyn_relocs **pp;
-+
-+ for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
-+ {
-+ p->count -= p->pc_count;
-+ p->pc_count = 0;
-+ if (p->count == 0)
-+ *pp = p->next;
-+ else
-+ pp = &p->next;
-+ }
-+ }
-+
-+ /* Also discard relocs on undefined weak syms with non-default
-+ visibility. */
-+ if (eh->dyn_relocs != NULL
-+ && h->root.type == bfd_link_hash_undefweak)
-+ {
-+ if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
-+ eh->dyn_relocs = NULL;
-+
-+ /* Make sure undefined weak symbols are output as a dynamic
-+ symbol in PIEs. */
-+ else if (h->dynindx == -1
-+ && !h->forced_local)
-+ {
-+ if (! bfd_elf_link_record_dynamic_symbol (info, h))
-+ return FALSE;
-+ }
-+ }
-+ }
-+ else
-+ {
-+ /* For the non-shared case, discard space for relocs against
-+ symbols which turn out to need copy relocs or are not
-+ dynamic. */
-+
-+ if (!h->non_got_ref
-+ && ((h->def_dynamic
-+ && !h->def_regular)
-+ || (htab->elf.dynamic_sections_created
-+ && (h->root.type == bfd_link_hash_undefweak
-+ || h->root.type == bfd_link_hash_undefined))))
-+ {
-+ /* Make sure this symbol is output as a dynamic symbol.
-+ Undefined weak syms won't yet be marked as dynamic. */
-+ if (h->dynindx == -1
-+ && !h->forced_local)
-+ {
-+ if (! bfd_elf_link_record_dynamic_symbol (info, h))
-+ return FALSE;
-+ }
-+
-+ /* If that succeeded, we know we'll be keeping all the
-+ relocs. */
-+ if (h->dynindx != -1)
-+ goto keep;
-+ }
-+
-+ eh->dyn_relocs = NULL;
-+
-+ keep: ;
-+ }
-+
-+ /* Finally, allocate space. */
-+ for (p = eh->dyn_relocs; p != NULL; p = p->next)
-+ {
-+ asection *sreloc = elf_section_data (p->sec)->sreloc;
-+ sreloc->size += p->count * sizeof (ElfNN_External_Rela);
-+ }
-+
-+ return TRUE;
-+}
-+
-+/* Find any dynamic relocs that apply to read-only sections. */
-+
-+static bfd_boolean
-+readonly_dynrelocs (struct elf_link_hash_entry *h, void *inf)
-+{
-+ struct riscv_elf_link_hash_entry *eh;
-+ struct riscv_elf_dyn_relocs *p;
-+
-+ eh = (struct riscv_elf_link_hash_entry *) h;
-+ for (p = eh->dyn_relocs; p != NULL; p = p->next)
-+ {
-+ asection *s = p->sec->output_section;
-+
-+ if (s != NULL && (s->flags & SEC_READONLY) != 0)
-+ {
-+ ((struct bfd_link_info *) inf)->flags |= DF_TEXTREL;
-+ return FALSE;
-+ }
-+ }
-+ return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_elf_size_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info)
-+{
-+ struct riscv_elf_link_hash_table *htab;
-+ bfd *dynobj;
-+ asection *s;
-+ bfd *ibfd;
-+
-+ htab = riscv_elf_hash_table (info);
-+ BFD_ASSERT (htab != NULL);
-+ dynobj = htab->elf.dynobj;
-+ BFD_ASSERT (dynobj != NULL);
-+
-+ if (elf_hash_table (info)->dynamic_sections_created)
-+ {
-+ /* Set the contents of the .interp section to the interpreter. */
-+ if (bfd_link_executable (info) && !info->nointerp)
-+ {
-+ s = bfd_get_linker_section (dynobj, ".interp");
-+ BFD_ASSERT (s != NULL);
-+ s->size = strlen (ELFNN_DYNAMIC_INTERPRETER) + 1;
-+ s->contents = (unsigned char *) ELFNN_DYNAMIC_INTERPRETER;
-+ }
-+ }
-+
-+ /* Set up .got offsets for local syms, and space for local dynamic
-+ relocs. */
-+ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
-+ {
-+ bfd_signed_vma *local_got;
-+ bfd_signed_vma *end_local_got;
-+ char *local_tls_type;
-+ bfd_size_type locsymcount;
-+ Elf_Internal_Shdr *symtab_hdr;
-+ asection *srel;
-+
-+ if (! is_riscv_elf (ibfd))
-+ continue;
-+
-+ for (s = ibfd->sections; s != NULL; s = s->next)
-+ {
-+ struct riscv_elf_dyn_relocs *p;
-+
-+ for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
-+ {
-+ if (!bfd_is_abs_section (p->sec)
-+ && bfd_is_abs_section (p->sec->output_section))
-+ {
-+ /* Input section has been discarded, either because
-+ it is a copy of a linkonce section or due to
-+ linker script /DISCARD/, so we'll be discarding
-+ the relocs too. */
-+ }
-+ else if (p->count != 0)
-+ {
-+ srel = elf_section_data (p->sec)->sreloc;
-+ srel->size += p->count * sizeof (ElfNN_External_Rela);
-+ if ((p->sec->output_section->flags & SEC_READONLY) != 0)
-+ info->flags |= DF_TEXTREL;
-+ }
-+ }
-+ }
-+
-+ local_got = elf_local_got_refcounts (ibfd);
-+ if (!local_got)
-+ continue;
-+
-+ symtab_hdr = &elf_symtab_hdr (ibfd);
-+ locsymcount = symtab_hdr->sh_info;
-+ end_local_got = local_got + locsymcount;
-+ local_tls_type = _bfd_riscv_elf_local_got_tls_type (ibfd);
-+ s = htab->elf.sgot;
-+ srel = htab->elf.srelgot;
-+ for (; local_got < end_local_got; ++local_got, ++local_tls_type)
-+ {
-+ if (*local_got > 0)
-+ {
-+ *local_got = s->size;
-+ s->size += RISCV_ELF_WORD_BYTES;
-+ if (*local_tls_type & GOT_TLS_GD)
-+ s->size += RISCV_ELF_WORD_BYTES;
-+ if (bfd_link_pic (info)
-+ || (*local_tls_type & (GOT_TLS_GD | GOT_TLS_IE)))
-+ srel->size += sizeof (ElfNN_External_Rela);
-+ }
-+ else
-+ *local_got = (bfd_vma) -1;
-+ }
-+ }
-+
-+ /* Allocate global sym .plt and .got entries, and space for global
-+ sym dynamic relocs. */
-+ elf_link_hash_traverse (&htab->elf, allocate_dynrelocs, info);
-+
-+ if (htab->elf.sgotplt)
-+ {
-+ struct elf_link_hash_entry *got;
-+ got = elf_link_hash_lookup (elf_hash_table (info),
-+ "_GLOBAL_OFFSET_TABLE_",
-+ FALSE, FALSE, FALSE);
-+
-+ /* Don't allocate .got.plt section if there are no GOT nor PLT
-+ entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
-+ if ((got == NULL
-+ || !got->ref_regular_nonweak)
-+ && (htab->elf.sgotplt->size == GOTPLT_HEADER_SIZE)
-+ && (htab->elf.splt == NULL
-+ || htab->elf.splt->size == 0)
-+ && (htab->elf.sgot == NULL
-+ || (htab->elf.sgot->size
-+ == get_elf_backend_data (output_bfd)->got_header_size)))
-+ htab->elf.sgotplt->size = 0;
-+ }
-+
-+ /* The check_relocs and adjust_dynamic_symbol entry points have
-+ determined the sizes of the various dynamic sections. Allocate
-+ memory for them. */
-+ for (s = dynobj->sections; s != NULL; s = s->next)
-+ {
-+ if ((s->flags & SEC_LINKER_CREATED) == 0)
-+ continue;
-+
-+ if (s == htab->elf.splt
-+ || s == htab->elf.sgot
-+ || s == htab->elf.sgotplt
-+ || s == htab->sdynbss)
-+ {
-+ /* Strip this section if we don't need it; see the
-+ comment below. */
-+ }
-+ else if (strncmp (s->name, ".rela", 5) == 0)
-+ {
-+ if (s->size != 0)
-+ {
-+ /* We use the reloc_count field as a counter if we need
-+ to copy relocs into the output file. */
-+ s->reloc_count = 0;
-+ }
-+ }
-+ else
-+ {
-+ /* It's not one of our sections. */
-+ continue;
-+ }
-+
-+ if (s->size == 0)
-+ {
-+ /* If we don't need this section, strip it from the
-+ output file. This is mostly to handle .rela.bss and
-+ .rela.plt. We must create both sections in
-+ create_dynamic_sections, because they must be created
-+ before the linker maps input sections to output
-+ sections. The linker does that before
-+ adjust_dynamic_symbol is called, and it is that
-+ function which decides whether anything needs to go
-+ into these sections. */
-+ s->flags |= SEC_EXCLUDE;
-+ continue;
-+ }
-+
-+ if ((s->flags & SEC_HAS_CONTENTS) == 0)
-+ continue;
-+
-+ /* Allocate memory for the section contents. Zero the memory
-+ for the benefit of .rela.plt, which has 4 unused entries
-+ at the beginning, and we don't want garbage. */
-+ s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
-+ if (s->contents == NULL)
-+ return FALSE;
-+ }
-+
-+ if (elf_hash_table (info)->dynamic_sections_created)
-+ {
-+ /* Add some entries to the .dynamic section. We fill in the
-+ values later, in riscv_elf_finish_dynamic_sections, but we
-+ must add the entries now so that we get the correct size for
-+ the .dynamic section. The DT_DEBUG entry is filled in by the
-+ dynamic linker and used by the debugger. */
-+#define add_dynamic_entry(TAG, VAL) \
-+ _bfd_elf_add_dynamic_entry (info, TAG, VAL)
-+
-+ if (bfd_link_executable (info))
-+ {
-+ if (!add_dynamic_entry (DT_DEBUG, 0))
-+ return FALSE;
-+ }
-+
-+ if (htab->elf.srelplt->size != 0)
-+ {
-+ if (!add_dynamic_entry (DT_PLTGOT, 0)
-+ || !add_dynamic_entry (DT_PLTRELSZ, 0)
-+ || !add_dynamic_entry (DT_PLTREL, DT_RELA)
-+ || !add_dynamic_entry (DT_JMPREL, 0))
-+ return FALSE;
-+ }
-+
-+ if (!add_dynamic_entry (DT_RELA, 0)
-+ || !add_dynamic_entry (DT_RELASZ, 0)
-+ || !add_dynamic_entry (DT_RELAENT, sizeof (ElfNN_External_Rela)))
-+ return FALSE;
-+
-+ /* If any dynamic relocs apply to a read-only section,
-+ then we need a DT_TEXTREL entry. */
-+ if ((info->flags & DF_TEXTREL) == 0)
-+ elf_link_hash_traverse (&htab->elf, readonly_dynrelocs, info);
-+
-+ if (info->flags & DF_TEXTREL)
-+ {
-+ if (!add_dynamic_entry (DT_TEXTREL, 0))
-+ return FALSE;
-+ }
-+ }
-+#undef add_dynamic_entry
-+
-+ return TRUE;
-+}
-+
-+#define TP_OFFSET 0
-+#define DTP_OFFSET 0x800
-+
-+/* Return the relocation value for a TLS dtp-relative reloc. */
-+
-+static bfd_vma
-+dtpoff (struct bfd_link_info *info, bfd_vma address)
-+{
-+ /* If tls_sec is NULL, we should have signalled an error already. */
-+ if (elf_hash_table (info)->tls_sec == NULL)
-+ return 0;
-+ return address - elf_hash_table (info)->tls_sec->vma - DTP_OFFSET;
-+}
-+
-+/* Return the relocation value for a static TLS tp-relative relocation. */
-+
-+static bfd_vma
-+tpoff (struct bfd_link_info *info, bfd_vma address)
-+{
-+ /* If tls_sec is NULL, we should have signalled an error already. */
-+ if (elf_hash_table (info)->tls_sec == NULL)
-+ return 0;
-+ return address - elf_hash_table (info)->tls_sec->vma - TP_OFFSET;
-+}
-+
-+/* Return the global pointer's value, or 0 if it is not in use. */
-+
-+static bfd_vma
-+riscv_global_pointer_value (struct bfd_link_info *info)
-+{
-+ struct bfd_link_hash_entry *h;
-+
-+ h = bfd_link_hash_lookup (info->hash, "_gp", FALSE, FALSE, TRUE);
-+ if (h == NULL || h->type != bfd_link_hash_defined)
-+ return 0;
-+
-+ return h->u.def.value + sec_addr (h->u.def.section);
-+}
-+
-+/* Emplace a static relocation. */
-+
-+static bfd_reloc_status_type
-+perform_relocation (const reloc_howto_type *howto,
-+ const Elf_Internal_Rela *rel,
-+ bfd_vma value,
-+ asection *input_section,
-+ bfd *input_bfd,
-+ bfd_byte *contents)
-+{
-+ if (howto->pc_relative)
-+ value -= sec_addr (input_section) + rel->r_offset;
-+ value += rel->r_addend;
-+
-+ switch (ELFNN_R_TYPE (rel->r_info))
-+ {
-+ case R_RISCV_HI20:
-+ case R_RISCV_TPREL_HI20:
-+ case R_RISCV_PCREL_HI20:
-+ case R_RISCV_GOT_HI20:
-+ case R_RISCV_TLS_GOT_HI20:
-+ case R_RISCV_TLS_GD_HI20:
-+ if (ARCH_SIZE > 32 && !VALID_UTYPE_IMM (RISCV_CONST_HIGH_PART (value)))
-+ return bfd_reloc_overflow;
-+ value = ENCODE_UTYPE_IMM (RISCV_CONST_HIGH_PART (value));
-+ break;
-+
-+ case R_RISCV_LO12_I:
-+ case R_RISCV_GPREL_I:
-+ case R_RISCV_TPREL_LO12_I:
-+ case R_RISCV_PCREL_LO12_I:
-+ value = ENCODE_ITYPE_IMM (value);
-+ break;
-+
-+ case R_RISCV_LO12_S:
-+ case R_RISCV_GPREL_S:
-+ case R_RISCV_TPREL_LO12_S:
-+ case R_RISCV_PCREL_LO12_S:
-+ value = ENCODE_STYPE_IMM (value);
-+ break;
-+
-+ case R_RISCV_CALL:
-+ case R_RISCV_CALL_PLT:
-+ if (ARCH_SIZE > 32 && !VALID_UTYPE_IMM (RISCV_CONST_HIGH_PART (value)))
-+ return bfd_reloc_overflow;
-+ value = ENCODE_UTYPE_IMM (RISCV_CONST_HIGH_PART (value))
-+ | (ENCODE_ITYPE_IMM (value) << 32);
-+ break;
-+
-+ case R_RISCV_JAL:
-+ if (!VALID_UJTYPE_IMM (value))
-+ return bfd_reloc_overflow;
-+ value = ENCODE_UJTYPE_IMM (value);
-+ break;
-+
-+ case R_RISCV_BRANCH:
-+ if (!VALID_SBTYPE_IMM (value))
-+ return bfd_reloc_overflow;
-+ value = ENCODE_SBTYPE_IMM (value);
-+ break;
-+
-+ case R_RISCV_RVC_BRANCH:
-+ if (!VALID_RVC_B_IMM (value))
-+ return bfd_reloc_overflow;
-+ value = ENCODE_RVC_B_IMM (value);
-+ break;
-+
-+ case R_RISCV_RVC_JUMP:
-+ if (!VALID_RVC_J_IMM (value))
-+ return bfd_reloc_overflow;
-+ value = ENCODE_RVC_J_IMM (value);
-+ break;
-+
-+ case R_RISCV_RVC_LUI:
-+ if (!VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (value)))
-+ return bfd_reloc_overflow;
-+ value = ENCODE_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (value));
-+ break;
-+
-+ case R_RISCV_32:
-+ case R_RISCV_64:
-+ case R_RISCV_ADD8:
-+ case R_RISCV_ADD16:
-+ case R_RISCV_ADD32:
-+ case R_RISCV_ADD64:
-+ case R_RISCV_SUB8:
-+ case R_RISCV_SUB16:
-+ case R_RISCV_SUB32:
-+ case R_RISCV_SUB64:
-+ case R_RISCV_TLS_DTPREL32:
-+ case R_RISCV_TLS_DTPREL64:
-+ break;
-+
-+ default:
-+ return bfd_reloc_notsupported;
-+ }
-+
-+ bfd_vma word = bfd_get (howto->bitsize, input_bfd, contents + rel->r_offset);
-+ word = (word & ~howto->dst_mask) | (value & howto->dst_mask);
-+ bfd_put (howto->bitsize, input_bfd, word, contents + rel->r_offset);
-+
-+ return bfd_reloc_ok;
-+}
-+
-+/* Remember all PC-relative high-part relocs we've encountered to help us
-+ later resolve the corresponding low-part relocs. */
-+
-+typedef struct {
-+ bfd_vma address;
-+ bfd_vma value;
-+} riscv_pcrel_hi_reloc;
-+
-+typedef struct riscv_pcrel_lo_reloc {
-+ asection *input_section;
-+ struct bfd_link_info *info;
-+ reloc_howto_type *howto;
-+ const Elf_Internal_Rela *reloc;
-+ bfd_vma addr;
-+ const char *name;
-+ bfd_byte *contents;
-+ struct riscv_pcrel_lo_reloc *next;
-+} riscv_pcrel_lo_reloc;
-+
-+typedef struct {
-+ htab_t hi_relocs;
-+ riscv_pcrel_lo_reloc *lo_relocs;
-+} riscv_pcrel_relocs;
-+
-+static hashval_t
-+riscv_pcrel_reloc_hash (const void *entry)
-+{
-+ const riscv_pcrel_hi_reloc *e = entry;
-+ return (hashval_t)(e->address >> 2);
-+}
-+
-+static bfd_boolean
-+riscv_pcrel_reloc_eq (const void *entry1, const void *entry2)
-+{
-+ const riscv_pcrel_hi_reloc *e1 = entry1, *e2 = entry2;
-+ return e1->address == e2->address;
-+}
-+
-+static bfd_boolean
-+riscv_init_pcrel_relocs (riscv_pcrel_relocs *p)
-+{
-+
-+ p->lo_relocs = NULL;
-+ p->hi_relocs = htab_create (1024, riscv_pcrel_reloc_hash,
-+ riscv_pcrel_reloc_eq, free);
-+ return p->hi_relocs != NULL;
-+}
-+
-+static void
-+riscv_free_pcrel_relocs (riscv_pcrel_relocs *p)
-+{
-+ riscv_pcrel_lo_reloc *cur = p->lo_relocs;
-+ while (cur != NULL)
-+ {
-+ riscv_pcrel_lo_reloc *next = cur->next;
-+ free (cur);
-+ cur = next;
-+ }
-+
-+ htab_delete (p->hi_relocs);
-+}
-+
-+static bfd_boolean
-+riscv_record_pcrel_hi_reloc (riscv_pcrel_relocs *p, bfd_vma addr, bfd_vma value)
-+{
-+ riscv_pcrel_hi_reloc entry = {addr, value - addr};
-+ riscv_pcrel_hi_reloc **slot =
-+ (riscv_pcrel_hi_reloc **) htab_find_slot (p->hi_relocs, &entry, INSERT);
-+ BFD_ASSERT (*slot == NULL);
-+ *slot = (riscv_pcrel_hi_reloc *) bfd_malloc (sizeof (riscv_pcrel_hi_reloc));
-+ if (*slot == NULL)
-+ return FALSE;
-+ **slot = entry;
-+ return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_record_pcrel_lo_reloc (riscv_pcrel_relocs *p,
-+ asection *input_section,
-+ struct bfd_link_info *info,
-+ reloc_howto_type *howto,
-+ const Elf_Internal_Rela *reloc,
-+ bfd_vma addr,
-+ const char *name,
-+ bfd_byte *contents)
-+{
-+ riscv_pcrel_lo_reloc *entry;
-+ entry = (riscv_pcrel_lo_reloc *) bfd_malloc (sizeof (riscv_pcrel_lo_reloc));
-+ if (entry == NULL)
-+ return FALSE;
-+ *entry = (riscv_pcrel_lo_reloc) {input_section, info, howto, reloc, addr,
-+ name, contents, p->lo_relocs};
-+ p->lo_relocs = entry;
-+ return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_resolve_pcrel_lo_relocs (riscv_pcrel_relocs *p)
-+{
-+ riscv_pcrel_lo_reloc *r;
-+ for (r = p->lo_relocs; r != NULL; r = r->next)
-+ {
-+ bfd *input_bfd = r->input_section->owner;
-+ riscv_pcrel_hi_reloc search = {r->addr, 0};
-+ riscv_pcrel_hi_reloc *entry = htab_find (p->hi_relocs, &search);
-+ if (entry == NULL)
-+ return ((*r->info->callbacks->reloc_overflow)
-+ (r->info, NULL, r->name, r->howto->name, (bfd_vma) 0,
-+ input_bfd, r->input_section, r->reloc->r_offset));
-+
-+ perform_relocation (r->howto, r->reloc, entry->value, r->input_section,
-+ input_bfd, r->contents);
-+ }
-+
-+ return TRUE;
-+}
-+
-+/* Relocate a RISC-V ELF section.
-+
-+ The RELOCATE_SECTION function is called by the new ELF backend linker
-+ to handle the relocations for a section.
-+
-+ The relocs are always passed as Rela structures.
-+
-+ This function is responsible for adjusting the section contents as
-+ necessary, and (if generating a relocatable output file) adjusting
-+ the reloc addend as necessary.
-+
-+ This function does not have to worry about setting the reloc
-+ address or the reloc symbol index.
-+
-+ LOCAL_SYMS is a pointer to the swapped in local symbols.
-+
-+ LOCAL_SECTIONS is an array giving the section in the input file
-+ corresponding to the st_shndx field of each local symbol.
-+
-+ The global hash table entry for the global symbols can be found
-+ via elf_sym_hashes (input_bfd).
-+
-+ When generating relocatable output, this function must handle
-+ STB_LOCAL/STT_SECTION symbols specially. The output symbol is
-+ going to be the section symbol corresponding to the output
-+ section, which means that the addend must be adjusted
-+ accordingly. */
-+
-+static bfd_boolean
-+riscv_elf_relocate_section (bfd *output_bfd, struct bfd_link_info *info,
-+ bfd *input_bfd, asection *input_section,
-+ bfd_byte *contents, Elf_Internal_Rela *relocs,
-+ Elf_Internal_Sym *local_syms,
-+ asection **local_sections)
-+{
-+ Elf_Internal_Rela *rel;
-+ Elf_Internal_Rela *relend;
-+ riscv_pcrel_relocs pcrel_relocs;
-+ bfd_boolean ret = FALSE;
-+ asection *sreloc = elf_section_data (input_section)->sreloc;
-+ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+ Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (input_bfd);
-+ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (input_bfd);
-+ bfd_vma *local_got_offsets = elf_local_got_offsets (input_bfd);
-+
-+ if (!riscv_init_pcrel_relocs (&pcrel_relocs))
-+ return FALSE;
-+
-+ relend = relocs + input_section->reloc_count;
-+ for (rel = relocs; rel < relend; rel++)
-+ {
-+ unsigned long r_symndx;
-+ struct elf_link_hash_entry *h;
-+ Elf_Internal_Sym *sym;
-+ asection *sec;
-+ bfd_vma relocation;
-+ bfd_reloc_status_type r = bfd_reloc_ok;
-+ const char *name;
-+ bfd_vma off, ie_off;
-+ bfd_boolean unresolved_reloc, is_ie = FALSE;
-+ bfd_vma pc = sec_addr (input_section) + rel->r_offset;
-+ int r_type = ELFNN_R_TYPE (rel->r_info), tls_type;
-+ reloc_howto_type *howto = riscv_elf_rtype_to_howto (r_type);
-+ const char *msg = NULL;
-+
-+ if (r_type == R_RISCV_GNU_VTINHERIT || r_type == R_RISCV_GNU_VTENTRY)
-+ continue;
-+
-+ /* This is a final link. */
-+ r_symndx = ELFNN_R_SYM (rel->r_info);
-+ h = NULL;
-+ sym = NULL;
-+ sec = NULL;
-+ unresolved_reloc = FALSE;
-+ if (r_symndx < symtab_hdr->sh_info)
-+ {
-+ sym = local_syms + r_symndx;
-+ sec = local_sections[r_symndx];
-+ relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
-+ }
-+ else
-+ {
-+ bfd_boolean warned, ignored;
-+
-+ RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
-+ r_symndx, symtab_hdr, sym_hashes,
-+ h, sec, relocation,
-+ unresolved_reloc, warned, ignored);
-+ if (warned)
-+ {
-+ /* To avoid generating warning messages about truncated
-+ relocations, set the relocation's address to be the same as
-+ the start of this section. */
-+ if (input_section->output_section != NULL)
-+ relocation = input_section->output_section->vma;
-+ else
-+ relocation = 0;
-+ }
-+ }
-+
-+ if (sec != NULL && discarded_section (sec))
-+ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
-+ rel, 1, relend, howto, 0, contents);
-+
-+ if (bfd_link_relocatable (info))
-+ continue;
-+
-+ if (h != NULL)
-+ name = h->root.root.string;
-+ else
-+ {
-+ name = (bfd_elf_string_from_elf_section
-+ (input_bfd, symtab_hdr->sh_link, sym->st_name));
-+ if (name == NULL || *name == '\0')
-+ name = bfd_section_name (input_bfd, sec);
-+ }
-+
-+ switch (r_type)
-+ {
-+ case R_RISCV_NONE:
-+ case R_RISCV_TPREL_ADD:
-+ case R_RISCV_COPY:
-+ case R_RISCV_JUMP_SLOT:
-+ case R_RISCV_RELATIVE:
-+ /* These require nothing of us at all. */
-+ continue;
-+
-+ case R_RISCV_HI20:
-+ case R_RISCV_BRANCH:
-+ case R_RISCV_RVC_BRANCH:
-+ case R_RISCV_RVC_LUI:
-+ case R_RISCV_LO12_I:
-+ case R_RISCV_LO12_S:
-+ /* These require no special handling beyond perform_relocation. */
-+ break;
-+
-+ case R_RISCV_GOT_HI20:
-+ if (h != NULL)
-+ {
-+ bfd_boolean dyn, pic;
-+
-+ off = h->got.offset;
-+ BFD_ASSERT (off != (bfd_vma) -1);
-+ dyn = elf_hash_table (info)->dynamic_sections_created;
-+ pic = bfd_link_pic (info);
-+
-+ if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, pic, h)
-+ || (pic && SYMBOL_REFERENCES_LOCAL (info, h)))
-+ {
-+ /* This is actually a static link, or it is a
-+ -Bsymbolic link and the symbol is defined
-+ locally, or the symbol was forced to be local
-+ because of a version file. We must initialize
-+ this entry in the global offset table. Since the
-+ offset must always be a multiple of the word size,
-+ we use the least significant bit to record whether
-+ we have initialized it already.
-+
-+ When doing a dynamic link, we create a .rela.got
-+ relocation entry to initialize the value. This
-+ is done in the finish_dynamic_symbol routine. */
-+ if ((off & 1) != 0)
-+ off &= ~1;
-+ else
-+ {
-+ bfd_put_NN (output_bfd, relocation,
-+ htab->elf.sgot->contents + off);
-+ h->got.offset |= 1;
-+ }
-+ }
-+ else
-+ unresolved_reloc = FALSE;
-+ }
-+ else
-+ {
-+ BFD_ASSERT (local_got_offsets != NULL
-+ && local_got_offsets[r_symndx] != (bfd_vma) -1);
-+
-+ off = local_got_offsets[r_symndx];
-+
-+ /* The offset must always be a multiple of the word size.
-+ So, we can use the least significant bit to record
-+ whether we have already processed this entry. */
-+ if ((off & 1) != 0)
-+ off &= ~1;
-+ else
-+ {
-+ if (bfd_link_pic (info))
-+ {
-+ asection *s;
-+ Elf_Internal_Rela outrel;
-+
-+ /* We need to generate a R_RISCV_RELATIVE reloc
-+ for the dynamic linker. */
-+ s = htab->elf.srelgot;
-+ BFD_ASSERT (s != NULL);
-+
-+ outrel.r_offset = sec_addr (htab->elf.sgot) + off;
-+ outrel.r_info =
-+ ELFNN_R_INFO (0, R_RISCV_RELATIVE);
-+ outrel.r_addend = relocation;
-+ relocation = 0;
-+ riscv_elf_append_rela (output_bfd, s, &outrel);
-+ }
-+
-+ bfd_put_NN (output_bfd, relocation,
-+ htab->elf.sgot->contents + off);
-+ local_got_offsets[r_symndx] |= 1;
-+ }
-+ }
-+ relocation = sec_addr (htab->elf.sgot) + off;
-+ if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc, relocation))
-+ r = bfd_reloc_overflow;
-+ break;
-+
-+ case R_RISCV_ADD8:
-+ case R_RISCV_ADD16:
-+ case R_RISCV_ADD32:
-+ case R_RISCV_ADD64:
-+ {
-+ bfd_vma old_value = bfd_get (howto->bitsize, input_bfd,
-+ contents + rel->r_offset);
-+ relocation = old_value + relocation;
-+ }
-+ break;
-+
-+ case R_RISCV_SUB8:
-+ case R_RISCV_SUB16:
-+ case R_RISCV_SUB32:
-+ case R_RISCV_SUB64:
-+ {
-+ bfd_vma old_value = bfd_get (howto->bitsize, input_bfd,
-+ contents + rel->r_offset);
-+ relocation = old_value - relocation;
-+ }
-+ break;
-+
-+ case R_RISCV_CALL_PLT:
-+ case R_RISCV_CALL:
-+ case R_RISCV_JAL:
-+ case R_RISCV_RVC_JUMP:
-+ if (bfd_link_pic (info) && h != NULL && h->plt.offset != MINUS_ONE)
-+ {
-+ /* Refer to the PLT entry. */
-+ relocation = sec_addr (htab->elf.splt) + h->plt.offset;
-+ unresolved_reloc = FALSE;
-+ }
-+ break;
-+
-+ case R_RISCV_TPREL_HI20:
-+ relocation = tpoff (info, relocation);
-+ break;
-+
-+ case R_RISCV_TPREL_LO12_I:
-+ case R_RISCV_TPREL_LO12_S:
-+ relocation = tpoff (info, relocation);
-+ if (VALID_ITYPE_IMM (relocation + rel->r_addend))
-+ {
-+ /* We can use tp as the base register. */
-+ bfd_vma insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
-+ insn &= ~(OP_MASK_RS1 << OP_SH_RS1);
-+ insn |= X_TP << OP_SH_RS1;
-+ bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
-+ }
-+ break;
-+
-+ case R_RISCV_GPREL_I:
-+ case R_RISCV_GPREL_S:
-+ {
-+ bfd_vma gp = riscv_global_pointer_value (info);
-+ bfd_boolean x0_base = VALID_ITYPE_IMM (relocation + rel->r_addend);
-+ if (x0_base || VALID_ITYPE_IMM (relocation + rel->r_addend - gp))
-+ {
-+ /* We can use x0 or gp as the base register. */
-+ bfd_vma insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
-+ insn &= ~(OP_MASK_RS1 << OP_SH_RS1);
-+ if (!x0_base)
-+ {
-+ rel->r_addend -= gp;
-+ insn |= X_GP << OP_SH_RS1;
-+ }
-+ bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
-+ }
-+ else
-+ r = bfd_reloc_overflow;
-+ break;
-+ }
-+
-+ case R_RISCV_PCREL_HI20:
-+ if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc,
-+ relocation + rel->r_addend))
-+ r = bfd_reloc_overflow;
-+ break;
-+
-+ case R_RISCV_PCREL_LO12_I:
-+ case R_RISCV_PCREL_LO12_S:
-+ if (riscv_record_pcrel_lo_reloc (&pcrel_relocs, input_section, info,
-+ howto, rel, relocation, name,
-+ contents))
-+ continue;
-+ r = bfd_reloc_overflow;
-+ break;
-+
-+ case R_RISCV_TLS_DTPREL32:
-+ case R_RISCV_TLS_DTPREL64:
-+ relocation = dtpoff (info, relocation);
-+ break;
-+
-+ case R_RISCV_32:
-+ case R_RISCV_64:
-+ if ((input_section->flags & SEC_ALLOC) == 0)
-+ break;
-+
-+ if ((bfd_link_pic (info)
-+ && (h == NULL
-+ || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
-+ || h->root.type != bfd_link_hash_undefweak)
-+ && (! howto->pc_relative
-+ || !SYMBOL_CALLS_LOCAL (info, h)))
-+ || (!bfd_link_pic (info)
-+ && h != NULL
-+ && h->dynindx != -1
-+ && !h->non_got_ref
-+ && ((h->def_dynamic
-+ && !h->def_regular)
-+ || h->root.type == bfd_link_hash_undefweak
-+ || h->root.type == bfd_link_hash_undefined)))
-+ {
-+ Elf_Internal_Rela outrel;
-+ bfd_boolean skip_static_relocation, skip_dynamic_relocation;
-+
-+ /* When generating a shared object, these relocations
-+ are copied into the output file to be resolved at run
-+ time. */
-+
-+ outrel.r_offset =
-+ _bfd_elf_section_offset (output_bfd, info, input_section,
-+ rel->r_offset);
-+ skip_static_relocation = outrel.r_offset != (bfd_vma) -2;
-+ skip_dynamic_relocation = outrel.r_offset >= (bfd_vma) -2;
-+ outrel.r_offset += sec_addr (input_section);
-+
-+ if (skip_dynamic_relocation)
-+ memset (&outrel, 0, sizeof outrel);
-+ else if (h != NULL && h->dynindx != -1
-+ && !(bfd_link_pic (info)
-+ && SYMBOLIC_BIND (info, h)
-+ && h->def_regular))
-+ {
-+ outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
-+ outrel.r_addend = rel->r_addend;
-+ }
-+ else
-+ {
-+ outrel.r_info = ELFNN_R_INFO (0, R_RISCV_RELATIVE);
-+ outrel.r_addend = relocation + rel->r_addend;
-+ }
-+
-+ riscv_elf_append_rela (output_bfd, sreloc, &outrel);
-+ if (skip_static_relocation)
-+ continue;
-+ }
-+ break;
-+
-+ case R_RISCV_TLS_GOT_HI20:
-+ is_ie = TRUE;
-+ /* Fall through. */
-+
-+ case R_RISCV_TLS_GD_HI20:
-+ if (h != NULL)
-+ {
-+ off = h->got.offset;
-+ h->got.offset |= 1;
-+ }
-+ else
-+ {
-+ off = local_got_offsets[r_symndx];
-+ local_got_offsets[r_symndx] |= 1;
-+ }
-+
-+ tls_type = _bfd_riscv_elf_tls_type (input_bfd, h, r_symndx);
-+ BFD_ASSERT (tls_type & (GOT_TLS_IE | GOT_TLS_GD));
-+ /* If this symbol is referenced by both GD and IE TLS, the IE
-+ reference's GOT slot follows the GD reference's slots. */
-+ ie_off = 0;
-+ if ((tls_type & GOT_TLS_GD) && (tls_type & GOT_TLS_IE))
-+ ie_off = 2 * GOT_ENTRY_SIZE;
-+
-+ if ((off & 1) != 0)
-+ off &= ~1;
-+ else
-+ {
-+ Elf_Internal_Rela outrel;
-+ int indx = 0;
-+ bfd_boolean need_relocs = FALSE;
-+
-+ if (htab->elf.srelgot == NULL)
-+ abort ();
-+
-+ if (h != NULL)
-+ {
-+ bfd_boolean dyn, pic;
-+ dyn = htab->elf.dynamic_sections_created;
-+ pic = bfd_link_pic (info);
-+
-+ if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, pic, h)
-+ && (!pic || !SYMBOL_REFERENCES_LOCAL (info, h)))
-+ indx = h->dynindx;
-+ }
-+
-+ /* The GOT entries have not been initialized yet. Do it
-+ now, and emit any relocations. */
-+ if ((bfd_link_pic (info) || indx != 0)
-+ && (h == NULL
-+ || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
-+ || h->root.type != bfd_link_hash_undefweak))
-+ need_relocs = TRUE;
-+
-+ if (tls_type & GOT_TLS_GD)
-+ {
-+ if (need_relocs)
-+ {
-+ outrel.r_offset = sec_addr (htab->elf.sgot) + off;
-+ outrel.r_addend = 0;
-+ outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_DTPMODNN);
-+ bfd_put_NN (output_bfd, 0,
-+ htab->elf.sgot->contents + off);
-+ riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
-+ if (indx == 0)
-+ {
-+ BFD_ASSERT (! unresolved_reloc);
-+ bfd_put_NN (output_bfd,
-+ dtpoff (info, relocation),
-+ (htab->elf.sgot->contents + off +
-+ RISCV_ELF_WORD_BYTES));
-+ }
-+ else
-+ {
-+ bfd_put_NN (output_bfd, 0,
-+ (htab->elf.sgot->contents + off +
-+ RISCV_ELF_WORD_BYTES));
-+ outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_DTPRELNN);
-+ outrel.r_offset += RISCV_ELF_WORD_BYTES;
-+ riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
-+ }
-+ }
-+ else
-+ {
-+ /* If we are not emitting relocations for a
-+ general dynamic reference, then we must be in a
-+ static link or an executable link with the
-+ symbol binding locally. Mark it as belonging
-+ to module 1, the executable. */
-+ bfd_put_NN (output_bfd, 1,
-+ htab->elf.sgot->contents + off);
-+ bfd_put_NN (output_bfd,
-+ dtpoff (info, relocation),
-+ (htab->elf.sgot->contents + off +
-+ RISCV_ELF_WORD_BYTES));
-+ }
-+ }
-+
-+ if (tls_type & GOT_TLS_IE)
-+ {
-+ if (need_relocs)
-+ {
-+ bfd_put_NN (output_bfd, 0,
-+ htab->elf.sgot->contents + off + ie_off);
-+ outrel.r_offset = sec_addr (htab->elf.sgot)
-+ + off + ie_off;
-+ outrel.r_addend = 0;
-+ if (indx == 0)
-+ outrel.r_addend = tpoff (info, relocation);
-+ outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_TPRELNN);
-+ riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
-+ }
-+ else
-+ {
-+ bfd_put_NN (output_bfd, tpoff (info, relocation),
-+ htab->elf.sgot->contents + off + ie_off);
-+ }
-+ }
-+ }
-+
-+ BFD_ASSERT (off < (bfd_vma) -2);
-+ relocation = sec_addr (htab->elf.sgot) + off + (is_ie ? ie_off : 0);
-+ if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc, relocation))
-+ r = bfd_reloc_overflow;
-+ unresolved_reloc = FALSE;
-+ break;
-+
-+ default:
-+ r = bfd_reloc_notsupported;
-+ }
-+
-+ /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
-+ because such sections are not SEC_ALLOC and thus ld.so will
-+ not process them. */
-+ if (unresolved_reloc
-+ && !((input_section->flags & SEC_DEBUGGING) != 0
-+ && h->def_dynamic)
-+ && _bfd_elf_section_offset (output_bfd, info, input_section,
-+ rel->r_offset) != (bfd_vma) -1)
-+ {
-+ (*_bfd_error_handler)
-+ (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
-+ input_bfd,
-+ input_section,
-+ (long) rel->r_offset,
-+ howto->name,
-+ h->root.root.string);
-+ continue;
-+ }
-+
-+ if (r == bfd_reloc_ok)
-+ r = perform_relocation (howto, rel, relocation, input_section,
-+ input_bfd, contents);
-+
-+ switch (r)
-+ {
-+ case bfd_reloc_ok:
-+ continue;
-+
-+ case bfd_reloc_overflow:
-+ r = info->callbacks->reloc_overflow
-+ (info, (h ? &h->root : NULL), name, howto->name,
-+ (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
-+ break;
-+
-+ case bfd_reloc_undefined:
-+ r = info->callbacks->undefined_symbol
-+ (info, name, input_bfd, input_section, rel->r_offset,
-+ TRUE);
-+ break;
-+
-+ case bfd_reloc_outofrange:
-+ msg = _("internal error: out of range error");
-+ break;
-+
-+ case bfd_reloc_notsupported:
-+ msg = _("internal error: unsupported relocation error");
-+ break;
-+
-+ case bfd_reloc_dangerous:
-+ msg = _("internal error: dangerous relocation");
-+ break;
-+
-+ default:
-+ msg = _("internal error: unknown error");
-+ break;
-+ }
-+
-+ if (msg)
-+ r = info->callbacks->warning
-+ (info, msg, name, input_bfd, input_section, rel->r_offset);
-+ goto out;
-+ }
-+
-+ ret = riscv_resolve_pcrel_lo_relocs (&pcrel_relocs);
-+out:
-+ riscv_free_pcrel_relocs (&pcrel_relocs);
-+ return ret;
-+}
-+
-+/* Finish up dynamic symbol handling. We set the contents of various
-+ dynamic sections here. */
-+
-+static bfd_boolean
-+riscv_elf_finish_dynamic_symbol (bfd *output_bfd,
-+ struct bfd_link_info *info,
-+ struct elf_link_hash_entry *h,
-+ Elf_Internal_Sym *sym)
-+{
-+ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+ const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
-+
-+ if (h->plt.offset != (bfd_vma) -1)
-+ {
-+ /* We've decided to create a PLT entry for this symbol. */
-+ bfd_byte *loc;
-+ bfd_vma i, header_address, plt_idx, got_address;
-+ uint32_t plt_entry[PLT_ENTRY_INSNS];
-+ Elf_Internal_Rela rela;
-+
-+ BFD_ASSERT (h->dynindx != -1);
-+
-+ /* Calculate the address of the PLT header. */
-+ header_address = sec_addr (htab->elf.splt);
-+
-+ /* Calculate the index of the entry. */
-+ plt_idx = (h->plt.offset - PLT_HEADER_SIZE) / PLT_ENTRY_SIZE;
-+
-+ /* Calculate the address of the .got.plt entry. */
-+ got_address = riscv_elf_got_plt_val (plt_idx, info);
-+
-+ /* Find out where the .plt entry should go. */
-+ loc = htab->elf.splt->contents + h->plt.offset;
-+
-+ /* Fill in the PLT entry itself. */
-+ riscv_make_plt_entry (got_address, header_address + h->plt.offset,
-+ plt_entry);
-+ for (i = 0; i < PLT_ENTRY_INSNS; i++)
-+ bfd_put_32 (output_bfd, plt_entry[i], loc + 4*i);
-+
-+ /* Fill in the initial value of the .got.plt entry. */
-+ loc = htab->elf.sgotplt->contents
-+ + (got_address - sec_addr (htab->elf.sgotplt));
-+ bfd_put_NN (output_bfd, sec_addr (htab->elf.splt), loc);
-+
-+ /* Fill in the entry in the .rela.plt section. */
-+ rela.r_offset = got_address;
-+ rela.r_addend = 0;
-+ rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_JUMP_SLOT);
-+
-+ loc = htab->elf.srelplt->contents + plt_idx * sizeof (ElfNN_External_Rela);
-+ bed->s->swap_reloca_out (output_bfd, &rela, loc);
-+
-+ if (!h->def_regular)
-+ {
-+ /* Mark the symbol as undefined, rather than as defined in
-+ the .plt section. Leave the value alone. */
-+ sym->st_shndx = SHN_UNDEF;
-+ /* If the symbol is weak, we do need to clear the value.
-+ Otherwise, the PLT entry would provide a definition for
-+ the symbol even if the symbol wasn't defined anywhere,
-+ and so the symbol would never be NULL. */
-+ if (!h->ref_regular_nonweak)
-+ sym->st_value = 0;
-+ }
-+ }
-+
-+ if (h->got.offset != (bfd_vma) -1
-+ && !(riscv_elf_hash_entry(h)->tls_type & (GOT_TLS_GD | GOT_TLS_IE)))
-+ {
-+ asection *sgot;
-+ asection *srela;
-+ Elf_Internal_Rela rela;
-+
-+ /* This symbol has an entry in the GOT. Set it up. */
-+
-+ sgot = htab->elf.sgot;
-+ srela = htab->elf.srelgot;
-+ BFD_ASSERT (sgot != NULL && srela != NULL);
-+
-+ rela.r_offset = sec_addr (sgot) + (h->got.offset &~ (bfd_vma) 1);
-+
-+ /* If this is a -Bsymbolic link, and the symbol is defined
-+ locally, we just want to emit a RELATIVE reloc. Likewise if
-+ the symbol was forced to be local because of a version file.
-+ The entry in the global offset table will already have been
-+ initialized in the relocate_section function. */
-+ if (bfd_link_pic (info)
-+ && (info->symbolic || h->dynindx == -1)
-+ && h->def_regular)
-+ {
-+ asection *sec = h->root.u.def.section;
-+ rela.r_info = ELFNN_R_INFO (0, R_RISCV_RELATIVE);
-+ rela.r_addend = (h->root.u.def.value
-+ + sec->output_section->vma
-+ + sec->output_offset);
-+ }
-+ else
-+ {
-+ BFD_ASSERT (h->dynindx != -1);
-+ rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_NN);
-+ rela.r_addend = 0;
-+ }
-+
-+ bfd_put_NN (output_bfd, 0,
-+ sgot->contents + (h->got.offset & ~(bfd_vma) 1));
-+ riscv_elf_append_rela (output_bfd, srela, &rela);
-+ }
-+
-+ if (h->needs_copy)
-+ {
-+ Elf_Internal_Rela rela;
-+
-+ /* This symbols needs a copy reloc. Set it up. */
-+ BFD_ASSERT (h->dynindx != -1);
-+
-+ rela.r_offset = sec_addr (h->root.u.def.section) + h->root.u.def.value;
-+ rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_COPY);
-+ rela.r_addend = 0;
-+ riscv_elf_append_rela (output_bfd, htab->srelbss, &rela);
-+ }
-+
-+ /* Mark some specially defined symbols as absolute. */
-+ if (h == htab->elf.hdynamic
-+ || (h == htab->elf.hgot || h == htab->elf.hplt))
-+ sym->st_shndx = SHN_ABS;
-+
-+ return TRUE;
-+}
-+
-+/* Finish up the dynamic sections. */
-+
-+static bfd_boolean
-+riscv_finish_dyn (bfd *output_bfd, struct bfd_link_info *info,
-+ bfd *dynobj, asection *sdyn)
-+{
-+ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+ const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
-+ size_t dynsize = bed->s->sizeof_dyn;
-+ bfd_byte *dyncon, *dynconend;
-+
-+ dynconend = sdyn->contents + sdyn->size;
-+ for (dyncon = sdyn->contents; dyncon < dynconend; dyncon += dynsize)
-+ {
-+ Elf_Internal_Dyn dyn;
-+ asection *s;
-+
-+ bed->s->swap_dyn_in (dynobj, dyncon, &dyn);
-+
-+ switch (dyn.d_tag)
-+ {
-+ case DT_PLTGOT:
-+ s = htab->elf.sgotplt;
-+ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
-+ break;
-+ case DT_JMPREL:
-+ s = htab->elf.srelplt;
-+ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
-+ break;
-+ case DT_PLTRELSZ:
-+ s = htab->elf.srelplt;
-+ dyn.d_un.d_val = s->size;
-+ break;
-+ default:
-+ continue;
-+ }
-+
-+ bed->s->swap_dyn_out (output_bfd, &dyn, dyncon);
-+ }
-+ return TRUE;
-+}
-+
-+static bfd_boolean
-+riscv_elf_finish_dynamic_sections (bfd *output_bfd,
-+ struct bfd_link_info *info)
-+{
-+ bfd *dynobj;
-+ asection *sdyn;
-+ struct riscv_elf_link_hash_table *htab;
-+
-+ htab = riscv_elf_hash_table (info);
-+ BFD_ASSERT (htab != NULL);
-+ dynobj = htab->elf.dynobj;
-+
-+ sdyn = bfd_get_linker_section (dynobj, ".dynamic");
-+
-+ if (elf_hash_table (info)->dynamic_sections_created)
-+ {
-+ asection *splt;
-+ bfd_boolean ret;
-+
-+ splt = htab->elf.splt;
-+ BFD_ASSERT (splt != NULL && sdyn != NULL);
-+
-+ ret = riscv_finish_dyn (output_bfd, info, dynobj, sdyn);
-+
-+ if (ret != TRUE)
-+ return ret;
-+
-+ /* Fill in the head and tail entries in the procedure linkage table. */
-+ if (splt->size > 0)
-+ {
-+ int i;
-+ uint32_t plt_header[PLT_HEADER_INSNS];
-+ riscv_make_plt_header (sec_addr (htab->elf.sgotplt),
-+ sec_addr (splt), plt_header);
-+
-+ for (i = 0; i < PLT_HEADER_INSNS; i++)
-+ bfd_put_32 (output_bfd, plt_header[i], splt->contents + 4*i);
-+ }
-+
-+ elf_section_data (splt->output_section)->this_hdr.sh_entsize
-+ = PLT_ENTRY_SIZE;
-+ }
-+
-+ if (htab->elf.sgotplt)
-+ {
-+ asection *output_section = htab->elf.sgotplt->output_section;
-+
-+ if (bfd_is_abs_section (output_section))
-+ {
-+ (*_bfd_error_handler)
-+ (_("discarded output section: `%A'"), htab->elf.sgotplt);
-+ return FALSE;
-+ }
-+
-+ if (htab->elf.sgotplt->size > 0)
-+ {
-+ /* Write the first two entries in .got.plt, needed for the dynamic
-+ linker. */
-+ bfd_put_NN (output_bfd, (bfd_vma) -1, htab->elf.sgotplt->contents);
-+ bfd_put_NN (output_bfd, (bfd_vma) 0,
-+ htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
-+ }
-+
-+ elf_section_data (output_section)->this_hdr.sh_entsize = GOT_ENTRY_SIZE;
-+ }
-+
-+ if (htab->elf.sgot)
-+ {
-+ asection *output_section = htab->elf.sgot->output_section;
-+
-+ if (htab->elf.sgot->size > 0)
-+ {
-+ /* Set the first entry in the global offset table to the address of
-+ the dynamic section. */
-+ bfd_vma val = sdyn ? sec_addr (sdyn) : 0;
-+ bfd_put_NN (output_bfd, val, htab->elf.sgot->contents);
-+ }
-+
-+ elf_section_data (output_section)->this_hdr.sh_entsize = GOT_ENTRY_SIZE;
-+ }
-+
-+ return TRUE;
-+}
-+
-+/* Return address for Ith PLT stub in section PLT, for relocation REL
-+ or (bfd_vma) -1 if it should not be included. */
-+
-+static bfd_vma
-+riscv_elf_plt_sym_val (bfd_vma i, const asection *plt,
-+ const arelent *rel ATTRIBUTE_UNUSED)
-+{
-+ return plt->vma + PLT_HEADER_SIZE + i * PLT_ENTRY_SIZE;
-+}
-+
-+static enum elf_reloc_type_class
-+riscv_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
-+ const asection *rel_sec ATTRIBUTE_UNUSED,
-+ const Elf_Internal_Rela *rela)
-+{
-+ switch (ELFNN_R_TYPE (rela->r_info))
-+ {
-+ case R_RISCV_RELATIVE:
-+ return reloc_class_relative;
-+ case R_RISCV_JUMP_SLOT:
-+ return reloc_class_plt;
-+ case R_RISCV_COPY:
-+ return reloc_class_copy;
-+ default:
-+ return reloc_class_normal;
-+ }
-+}
-+
-+/* Merge backend specific data from an object file to the output
-+ object file when linking. */
-+
-+static bfd_boolean
-+_bfd_riscv_elf_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
-+{
-+ flagword new_flags = elf_elfheader (ibfd)->e_flags;
-+ flagword old_flags = elf_elfheader (obfd)->e_flags;
-+
-+ if (!is_riscv_elf (ibfd) || !is_riscv_elf (obfd))
-+ return TRUE;
-+
-+ if (strcmp (bfd_get_target (ibfd), bfd_get_target (obfd)) != 0)
-+ {
-+ (*_bfd_error_handler)
-+ (_("%B: ABI is incompatible with that of the selected emulation"),
-+ ibfd);
-+ return FALSE;
-+ }
-+
-+ if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
-+ return FALSE;
-+
-+ if (! elf_flags_init (obfd))
-+ {
-+ elf_flags_init (obfd) = TRUE;
-+ elf_elfheader (obfd)->e_flags = new_flags;
-+ return TRUE;
-+ }
-+
-+ /* Disallow linking soft-float and hard-float. */
-+ if ((old_flags ^ new_flags) & EF_RISCV_SOFT_FLOAT)
-+ {
-+ (*_bfd_error_handler)
-+ (_("%B: can't link hard-float modules with soft-float modules"), ibfd);
-+ goto fail;
-+ }
-+
-+ /* Allow linking RVC and non-RVC, and keep the RVC flag. */
-+ elf_elfheader (obfd)->e_flags |= new_flags & EF_RISCV_RVC;
-+
-+ return TRUE;
-+
-+fail:
-+ bfd_set_error (bfd_error_bad_value);
-+ return FALSE;
-+}
-+
-+/* Delete some bytes from a section while relaxing. */
-+
-+static bfd_boolean
-+riscv_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, size_t count)
-+{
-+ unsigned int i, symcount;
-+ bfd_vma toaddr = sec->size;
-+ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (abfd);
-+ Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
-+ unsigned int sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
-+ struct bfd_elf_section_data *data = elf_section_data (sec);
-+ bfd_byte *contents = data->this_hdr.contents;
-+
-+ /* Actually delete the bytes. */
-+ sec->size -= count;
-+ memmove (contents + addr, contents + addr + count, toaddr - addr - count);
-+
-+ /* Adjust the location of all of the relocs. Note that we need not
-+ adjust the addends, since all PC-relative references must be against
-+ symbols, which we will adjust below. */
-+ for (i = 0; i < sec->reloc_count; i++)
-+ if (data->relocs[i].r_offset > addr && data->relocs[i].r_offset < toaddr)
-+ data->relocs[i].r_offset -= count;
-+
-+ /* Adjust the local symbols defined in this section. */
-+ for (i = 0; i < symtab_hdr->sh_info; i++)
-+ {
-+ Elf_Internal_Sym *sym = (Elf_Internal_Sym *) symtab_hdr->contents + i;
-+ if (sym->st_shndx == sec_shndx)
-+ {
-+ /* If the symbol is in the range of memory we just moved, we
-+ have to adjust its value. */
-+ if (sym->st_value > addr && sym->st_value <= toaddr)
-+ sym->st_value -= count;
-+
-+ /* If the symbol *spans* the bytes we just deleted (i.e. its
-+ *end* is in the moved bytes but its *start* isn't), then we
-+ must adjust its size. */
-+ if (sym->st_value <= addr
-+ && sym->st_value + sym->st_size > addr
-+ && sym->st_value + sym->st_size <= toaddr)
-+ sym->st_size -= count;
-+ }
-+ }
-+
-+ /* Now adjust the global symbols defined in this section. */
-+ symcount = ((symtab_hdr->sh_size / sizeof (ElfNN_External_Sym))
-+ - symtab_hdr->sh_info);
-+
-+ for (i = 0; i < symcount; i++)
-+ {
-+ struct elf_link_hash_entry *sym_hash = sym_hashes[i];
-+
-+ if ((sym_hash->root.type == bfd_link_hash_defined
-+ || sym_hash->root.type == bfd_link_hash_defweak)
-+ && sym_hash->root.u.def.section == sec)
-+ {
-+ /* As above, adjust the value if needed. */
-+ if (sym_hash->root.u.def.value > addr
-+ && sym_hash->root.u.def.value <= toaddr)
-+ sym_hash->root.u.def.value -= count;
-+
-+ /* As above, adjust the size if needed. */
-+ if (sym_hash->root.u.def.value <= addr
-+ && sym_hash->root.u.def.value + sym_hash->size > addr
-+ && sym_hash->root.u.def.value + sym_hash->size <= toaddr)
-+ sym_hash->size -= count;
-+ }
-+ }
-+
-+ return TRUE;
-+}
-+
-+/* Relax AUIPC + JALR into JAL. */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_call (bfd *abfd, asection *sec, asection *sym_sec,
-+ struct bfd_link_info *link_info,
-+ Elf_Internal_Rela *rel,
-+ bfd_vma symval,
-+ bfd_boolean *again)
-+{
-+ bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
-+ bfd_signed_vma foff = symval - (sec_addr (sec) + rel->r_offset);
-+ bfd_boolean near_zero = (symval + RISCV_IMM_REACH/2) < RISCV_IMM_REACH;
-+ bfd_vma auipc, jalr;
-+ int rd, r_type, len = 4, rvc = elf_elfheader (abfd)->e_flags & EF_RISCV_RVC;
-+
-+ /* If the call crosses section boundaries, an alignment directive could
-+ cause the PC-relative offset to later increase. Assume at most
-+ page-alignment, and account for this by adding some slop. */
-+ if (VALID_UJTYPE_IMM (foff) && sym_sec->output_section != sec->output_section)
-+ foff += (foff < 0 ? -ELF_MAXPAGESIZE : ELF_MAXPAGESIZE);
-+
-+ /* See if this function call can be shortened. */
-+ if (!VALID_UJTYPE_IMM (foff) && !(!bfd_link_pic (link_info) && near_zero))
-+ return TRUE;
-+
-+ /* Shorten the function call. */
-+ BFD_ASSERT (rel->r_offset + 8 <= sec->size);
-+
-+ auipc = bfd_get_32 (abfd, contents + rel->r_offset);
-+ jalr = bfd_get_32 (abfd, contents + rel->r_offset + 4);
-+ rd = (jalr >> OP_SH_RD) & OP_MASK_RD;
-+ rvc = rvc && VALID_RVC_J_IMM (foff) && ARCH_SIZE == 32;
-+
-+ if (rvc && (rd == 0 || rd == X_RA))
-+ {
-+ /* Relax to C.J[AL] rd, addr. */
-+ r_type = R_RISCV_RVC_JUMP;
-+ auipc = rd == 0 ? MATCH_C_J : MATCH_C_JAL;
-+ len = 2;
-+ }
-+ else if (VALID_UJTYPE_IMM (foff))
-+ {
-+ /* Relax to JAL rd, addr. */
-+ r_type = R_RISCV_JAL;
-+ auipc = MATCH_JAL | (rd << OP_SH_RD);
-+ }
-+ else /* near_zero */
-+ {
-+ /* Relax to JALR rd, x0, addr. */
-+ r_type = R_RISCV_LO12_I;
-+ auipc = MATCH_JALR | (rd << OP_SH_RD);
-+ }
-+
-+ /* Replace the R_RISCV_CALL reloc. */
-+ rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), r_type);
-+ /* Replace the AUIPC. */
-+ bfd_put (8 * len, abfd, auipc, contents + rel->r_offset);
-+
-+ /* Delete unnecessary JALR. */
-+ *again = TRUE;
-+ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + len, 8 - len);
-+}
-+
-+/* Relax non-PIC global variable references. */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_lui (bfd *abfd, asection *sec, asection *sym_sec,
-+ struct bfd_link_info *link_info,
-+ Elf_Internal_Rela *rel,
-+ bfd_vma symval,
-+ bfd_boolean *again)
-+{
-+ bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
-+ bfd_vma gp = riscv_global_pointer_value (link_info);
-+ int use_rvc = elf_elfheader (abfd)->e_flags & EF_RISCV_RVC;
-+
-+ /* Mergeable symbols might later move out of range. */
-+ if (sym_sec->flags & SEC_MERGE)
-+ return TRUE;
-+
-+ BFD_ASSERT (rel->r_offset + 4 <= sec->size);
-+
-+ /* Is the reference in range of x0 or gp? */
-+ if (VALID_ITYPE_IMM (symval) || VALID_ITYPE_IMM (symval - gp))
-+ {
-+ unsigned sym = ELFNN_R_SYM (rel->r_info);
-+ switch (ELFNN_R_TYPE (rel->r_info))
-+ {
-+ case R_RISCV_LO12_I:
-+ rel->r_info = ELFNN_R_INFO (sym, R_RISCV_GPREL_I);
-+ return TRUE;
-+
-+ case R_RISCV_LO12_S:
-+ rel->r_info = ELFNN_R_INFO (sym, R_RISCV_GPREL_S);
-+ return TRUE;
-+
-+ case R_RISCV_HI20:
-+ /* We can delete the unnecessary LUI and reloc. */
-+ rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
-+ *again = TRUE;
-+ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset, 4);
-+
-+ default:
-+ abort ();
-+ }
-+ }
-+
-+ /* Can we relax LUI to C.LUI? Alignment might move the section forward;
-+ account for this assuming page alignment at worst. */
-+ if (use_rvc
-+ && ELFNN_R_TYPE (rel->r_info) == R_RISCV_HI20
-+ && VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (symval))
-+ && VALID_RVC_LUI_IMM (RISCV_CONST_HIGH_PART (symval + ELF_MAXPAGESIZE)))
-+ {
-+ /* Replace LUI with C.LUI if legal (i.e., rd != x2/sp). */
-+ bfd_vma lui = bfd_get_32 (abfd, contents + rel->r_offset);
-+ if (((lui >> OP_SH_RD) & OP_MASK_RD) == X_SP)
-+ return TRUE;
-+
-+ lui = (lui & (OP_MASK_RD << OP_SH_RD)) | MATCH_C_LUI;
-+ bfd_put_32 (abfd, lui, contents + rel->r_offset);
-+
-+ /* Replace the R_RISCV_HI20 reloc. */
-+ rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), R_RISCV_RVC_LUI);
-+
-+ *again = TRUE;
-+ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + 2, 2);
-+ }
-+
-+ return TRUE;
-+}
-+
-+/* Relax non-PIC TLS references. */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_tls_le (bfd *abfd, asection *sec,
-+ asection *sym_sec ATTRIBUTE_UNUSED,
-+ struct bfd_link_info *link_info,
-+ Elf_Internal_Rela *rel,
-+ bfd_vma symval,
-+ bfd_boolean *again)
-+{
-+ /* See if this symbol is in range of tp. */
-+ if (RISCV_CONST_HIGH_PART (tpoff (link_info, symval)) != 0)
-+ return TRUE;
-+
-+ /* We can delete the unnecessary LUI and tp add. The LO12 reloc will be
-+ made directly tp-relative. */
-+ BFD_ASSERT (rel->r_offset + 4 <= sec->size);
-+ rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
-+
-+ *again = TRUE;
-+ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset, 4);
-+}
-+
-+/* Implement R_RISCV_ALIGN by deleting excess alignment NOPs. */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_align (bfd *abfd, asection *sec,
-+ asection *sym_sec ATTRIBUTE_UNUSED,
-+ struct bfd_link_info *link_info ATTRIBUTE_UNUSED,
-+ Elf_Internal_Rela *rel,
-+ bfd_vma symval,
-+ bfd_boolean *again ATTRIBUTE_UNUSED)
-+{
-+ bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
-+ bfd_vma alignment = 1, pos;
-+ while (alignment <= rel->r_addend)
-+ alignment *= 2;
-+
-+ symval -= rel->r_addend;
-+ bfd_vma aligned_addr = ((symval - 1) & ~(alignment - 1)) + alignment;
-+ bfd_vma nop_bytes = aligned_addr - symval;
-+
-+ /* Once we've handled an R_RISCV_ALIGN, we can't relax anything else. */
-+ sec->sec_flg0 = TRUE;
-+
-+ /* Make sure there are enough NOPs to actually achieve the alignment. */
-+ if (rel->r_addend < nop_bytes)
-+ return FALSE;
-+
-+ /* Delete the reloc. */
-+ rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
-+
-+ /* If the number of NOPs is already correct, there's nothing to do. */
-+ if (nop_bytes == rel->r_addend)
-+ return TRUE;
-+
-+ /* Write as many RISC-V NOPs as we need. */
-+ for (pos = 0; pos < (nop_bytes & -4); pos += 4)
-+ bfd_put_32 (abfd, RISCV_NOP, contents + rel->r_offset + pos);
-+
-+ /* Write a final RVC NOP if need be. */
-+ if (nop_bytes % 4 != 0)
-+ bfd_put_16 (abfd, RVC_NOP, contents + rel->r_offset + pos);
-+
-+ /* Delete the excess bytes. */
-+ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + nop_bytes,
-+ rel->r_addend - nop_bytes);
-+}
-+
-+/* Relax a section. Pass 0 shortens code sequences unless disabled.
-+ Pass 1, which cannot be disabled, handles code alignment directives. */
-+
-+static bfd_boolean
-+_bfd_riscv_relax_section (bfd *abfd, asection *sec,
-+ struct bfd_link_info *info, bfd_boolean *again)
-+{
-+ Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (abfd);
-+ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
-+ struct bfd_elf_section_data *data = elf_section_data (sec);
-+ Elf_Internal_Rela *relocs;
-+ bfd_boolean ret = FALSE;
-+ unsigned int i;
-+
-+ *again = FALSE;
-+
-+ if (bfd_link_relocatable (info)
-+ || sec->sec_flg0
-+ || (sec->flags & SEC_RELOC) == 0
-+ || sec->reloc_count == 0
-+ || (info->disable_target_specific_optimizations
-+ && info->relax_pass == 0))
-+ return TRUE;
-+
-+ /* Read this BFD's relocs if we haven't done so already. */
-+ if (data->relocs)
-+ relocs = data->relocs;
-+ else if (!(relocs = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL,
-+ info->keep_memory)))
-+ goto fail;
-+
-+ /* Examine and consider relaxing each reloc. */
-+ for (i = 0; i < sec->reloc_count; i++)
-+ {
-+ asection *sym_sec;
-+ Elf_Internal_Rela *rel = relocs + i;
-+ typeof (&_bfd_riscv_relax_call) relax_func = NULL;
-+ int type = ELFNN_R_TYPE (rel->r_info);
-+ bfd_vma symval;
-+
-+ if (info->relax_pass == 0)
-+ {
-+ if (type == R_RISCV_CALL || type == R_RISCV_CALL_PLT)
-+ relax_func = _bfd_riscv_relax_call;
-+ else if (type == R_RISCV_HI20
-+ || type == R_RISCV_LO12_I
-+ || type == R_RISCV_LO12_S)
-+ relax_func = _bfd_riscv_relax_lui;
-+ else if (type == R_RISCV_TPREL_HI20 || type == R_RISCV_TPREL_ADD)
-+ relax_func = _bfd_riscv_relax_tls_le;
-+ }
-+ else if (type == R_RISCV_ALIGN)
-+ relax_func = _bfd_riscv_relax_align;
-+
-+ if (!relax_func)
-+ continue;
-+
-+ data->relocs = relocs;
-+
-+ /* Read this BFD's contents if we haven't done so already. */
-+ if (!data->this_hdr.contents
-+ && !bfd_malloc_and_get_section (abfd, sec, &data->this_hdr.contents))
-+ goto fail;
-+
-+ /* Read this BFD's symbols if we haven't done so already. */
-+ if (symtab_hdr->sh_info != 0
-+ && !symtab_hdr->contents
-+ && !(symtab_hdr->contents =
-+ (unsigned char *) bfd_elf_get_elf_syms (abfd, symtab_hdr,
-+ symtab_hdr->sh_info,
-+ 0, NULL, NULL, NULL)))
-+ goto fail;
-+
-+ /* Get the value of the symbol referred to by the reloc. */
-+ if (ELFNN_R_SYM (rel->r_info) < symtab_hdr->sh_info)
-+ {
-+ /* A local symbol. */
-+ Elf_Internal_Sym *isym = ((Elf_Internal_Sym *) symtab_hdr->contents
-+ + ELFNN_R_SYM (rel->r_info));
-+
-+ if (isym->st_shndx == SHN_UNDEF)
-+ sym_sec = sec, symval = sec_addr (sec) + rel->r_offset;
-+ else
-+ {
-+ BFD_ASSERT (isym->st_shndx < elf_numsections (abfd));
-+ sym_sec = elf_elfsections (abfd)[isym->st_shndx]->bfd_section;
-+ if (sec_addr (sym_sec) == 0)
-+ continue;
-+ symval = sec_addr (sym_sec) + isym->st_value;
-+ }
-+ }
-+ else
-+ {
-+ unsigned long indx;
-+ struct elf_link_hash_entry *h;
-+
-+ indx = ELFNN_R_SYM (rel->r_info) - symtab_hdr->sh_info;
-+ h = elf_sym_hashes (abfd)[indx];
-+
-+ while (h->root.type == bfd_link_hash_indirect
-+ || h->root.type == bfd_link_hash_warning)
-+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
-+
-+ if (h->plt.offset != MINUS_ONE)
-+ symval = sec_addr (htab->elf.splt) + h->plt.offset;
-+ else if (h->root.u.def.section->output_section == NULL
-+ || (h->root.type != bfd_link_hash_defined
-+ && h->root.type != bfd_link_hash_defweak))
-+ continue;
-+ else
-+ symval = sec_addr (h->root.u.def.section) + h->root.u.def.value;
-+
-+ sym_sec = h->root.u.def.section;
-+ }
-+
-+ symval += rel->r_addend;
-+
-+ if (!relax_func (abfd, sec, sym_sec, info, rel, symval, again))
-+ goto fail;
-+ }
-+
-+ ret = TRUE;
-+
-+fail:
-+ if (relocs != data->relocs)
-+ free (relocs);
-+
-+ return ret;
-+}
-+
-+#define TARGET_LITTLE_SYM riscv_elfNN_vec
-+#define TARGET_LITTLE_NAME "elfNN-littleriscv"
-+
-+#define elf_backend_reloc_type_class riscv_reloc_type_class
-+
-+#define bfd_elfNN_bfd_reloc_name_lookup riscv_reloc_name_lookup
-+#define bfd_elfNN_bfd_link_hash_table_create riscv_elf_link_hash_table_create
-+#define bfd_elfNN_bfd_reloc_type_lookup riscv_reloc_type_lookup
-+#define bfd_elfNN_bfd_merge_private_bfd_data \
-+ _bfd_riscv_elf_merge_private_bfd_data
-+
-+#define elf_backend_copy_indirect_symbol riscv_elf_copy_indirect_symbol
-+#define elf_backend_create_dynamic_sections riscv_elf_create_dynamic_sections
-+#define elf_backend_check_relocs riscv_elf_check_relocs
-+#define elf_backend_adjust_dynamic_symbol riscv_elf_adjust_dynamic_symbol
-+#define elf_backend_size_dynamic_sections riscv_elf_size_dynamic_sections
-+#define elf_backend_relocate_section riscv_elf_relocate_section
-+#define elf_backend_finish_dynamic_symbol riscv_elf_finish_dynamic_symbol
-+#define elf_backend_finish_dynamic_sections riscv_elf_finish_dynamic_sections
-+#define elf_backend_gc_mark_hook riscv_elf_gc_mark_hook
-+#define elf_backend_gc_sweep_hook riscv_elf_gc_sweep_hook
-+#define elf_backend_plt_sym_val riscv_elf_plt_sym_val
-+#define elf_info_to_howto_rel NULL
-+#define elf_info_to_howto riscv_info_to_howto_rela
-+#define bfd_elfNN_bfd_relax_section _bfd_riscv_relax_section
-+
-+#define elf_backend_init_index_section _bfd_elf_init_1_index_section
-+
-+#define elf_backend_can_gc_sections 1
-+#define elf_backend_can_refcount 1
-+#define elf_backend_want_got_plt 1
-+#define elf_backend_plt_readonly 1
-+#define elf_backend_plt_alignment 4
-+#define elf_backend_want_plt_sym 1
-+#define elf_backend_got_header_size (ARCH_SIZE / 8)
-+#define elf_backend_rela_normal 1
-+#define elf_backend_default_execstack 0
-+
-+#include "elfNN-target.h"
-diff -urN empty/bfd/elfxx-riscv.c binutils-2.26.1/bfd/elfxx-riscv.c
---- empty/bfd/elfxx-riscv.c 1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/bfd/elfxx-riscv.c 2016-04-03 10:33:12.062126369 +0800
-@@ -0,0 +1,814 @@
-+/* RISC-V-specific support for ELF.
-+ Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on TILE-Gx and MIPS targets.
-+
-+ This file is part of BFD, the Binary File Descriptor library.
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; see the file COPYING3. If not,
-+ see <http://www.gnu.org/licenses/ >. */
-+
-+#include "sysdep.h"
-+#include "bfd.h"
-+#include "libbfd.h"
-+#include "elf-bfd.h"
-+#include "elf/riscv.h"
-+#include "opcode/riscv.h"
-+#include "libiberty.h"
-+#include "elfxx-riscv.h"
-+#include <stdint.h>
-+
-+#define MINUS_ONE ((bfd_vma)0 - 1)
-+
-+/* The relocation table used for SHT_RELA sections. */
-+
-+static reloc_howto_type howto_table[] =
-+{
-+ /* No relocation. */
-+ HOWTO (R_RISCV_NONE, /* type */
-+ 0, /* rightshift */
-+ 3, /* size */
-+ 0, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_NONE", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ 0, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 32 bit relocation. */
-+ HOWTO (R_RISCV_32, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_32", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ 0xffffffff, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 64 bit relocation. */
-+ HOWTO (R_RISCV_64, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 64, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_64", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* Relocation against a local symbol in a shared object. */
-+ HOWTO (R_RISCV_RELATIVE, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_RELATIVE", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ 0xffffffff, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ HOWTO (R_RISCV_COPY, /* type */
-+ 0, /* rightshift */
-+ 0, /* this one is variable size */
-+ 0, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_bitfield, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_COPY", /* name */
-+ FALSE, /* partial_inplace */
-+ 0x0, /* src_mask */
-+ 0x0, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ HOWTO (R_RISCV_JUMP_SLOT, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 64, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_bitfield, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_JUMP_SLOT", /* name */
-+ FALSE, /* partial_inplace */
-+ 0x0, /* src_mask */
-+ 0x0, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* Dynamic TLS relocations. */
-+ HOWTO (R_RISCV_TLS_DTPMOD32, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TLS_DTPMOD32", /* name */
-+ FALSE, /* partial_inplace */
-+ MINUS_ONE, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ HOWTO (R_RISCV_TLS_DTPMOD64, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 64, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TLS_DTPMOD64", /* name */
-+ FALSE, /* partial_inplace */
-+ MINUS_ONE, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ HOWTO (R_RISCV_TLS_DTPREL32, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TLS_DTPREL32", /* name */
-+ TRUE, /* partial_inplace */
-+ MINUS_ONE, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ HOWTO (R_RISCV_TLS_DTPREL64, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 64, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TLS_DTPREL64", /* name */
-+ TRUE, /* partial_inplace */
-+ MINUS_ONE, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ HOWTO (R_RISCV_TLS_TPREL32, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TLS_TPREL32", /* name */
-+ FALSE, /* partial_inplace */
-+ MINUS_ONE, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ HOWTO (R_RISCV_TLS_TPREL64, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 64, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TLS_TPREL64", /* name */
-+ FALSE, /* partial_inplace */
-+ MINUS_ONE, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* Reserved for future relocs that the dynamic linker must understand. */
-+ EMPTY_HOWTO (12),
-+ EMPTY_HOWTO (13),
-+ EMPTY_HOWTO (14),
-+ EMPTY_HOWTO (15),
-+
-+ /* 12-bit PC-relative branch offset. */
-+ HOWTO (R_RISCV_BRANCH, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_signed, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_BRANCH", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_SBTYPE_IMM (-1U), /* dst_mask */
-+ TRUE), /* pcrel_offset */
-+
-+ /* 20-bit PC-relative jump offset. */
-+ HOWTO (R_RISCV_JAL, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ /* This needs complex overflow
-+ detection, because the upper 36
-+ bits must match the PC + 4. */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_JAL", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UJTYPE_IMM (-1U), /* dst_mask */
-+ TRUE), /* pcrel_offset */
-+
-+ /* 32-bit PC-relative function call (AUIPC/JALR). */
-+ HOWTO (R_RISCV_CALL, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 64, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_CALL", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UTYPE_IMM (-1U) | ((bfd_vma) ENCODE_ITYPE_IMM (-1U) << 32),
-+ /* dst_mask */
-+ TRUE), /* pcrel_offset */
-+
-+ /* 32-bit PC-relative function call (AUIPC/JALR). */
-+ HOWTO (R_RISCV_CALL_PLT, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 64, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_CALL_PLT", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UTYPE_IMM (-1U) | ((bfd_vma) ENCODE_ITYPE_IMM (-1U) << 32),
-+ /* dst_mask */
-+ TRUE), /* pcrel_offset */
-+
-+ /* High 20 bits of 32-bit PC-relative GOT access. */
-+ HOWTO (R_RISCV_GOT_HI20, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_GOT_HI20", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UTYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 20 bits of 32-bit PC-relative TLS IE GOT access. */
-+ HOWTO (R_RISCV_TLS_GOT_HI20, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TLS_GOT_HI20", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UTYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 20 bits of 32-bit PC-relative TLS GD GOT reference. */
-+ HOWTO (R_RISCV_TLS_GD_HI20, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TLS_GD_HI20", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UTYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 20 bits of 32-bit PC-relative reference. */
-+ HOWTO (R_RISCV_PCREL_HI20, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_PCREL_HI20", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UTYPE_IMM (-1U), /* dst_mask */
-+ TRUE), /* pcrel_offset */
-+
-+ /* Low 12 bits of a 32-bit PC-relative load or add. */
-+ HOWTO (R_RISCV_PCREL_LO12_I, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_PCREL_LO12_I", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_ITYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* Low 12 bits of a 32-bit PC-relative store. */
-+ HOWTO (R_RISCV_PCREL_LO12_S, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_PCREL_LO12_S", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_STYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 20 bits of 32-bit absolute address. */
-+ HOWTO (R_RISCV_HI20, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_HI20", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UTYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 12 bits of 32-bit load or add. */
-+ HOWTO (R_RISCV_LO12_I, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_LO12_I", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_ITYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 12 bits of 32-bit store. */
-+ HOWTO (R_RISCV_LO12_S, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_LO12_S", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_STYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 20 bits of TLS LE thread pointer offset. */
-+ HOWTO (R_RISCV_TPREL_HI20, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_signed, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TPREL_HI20", /* name */
-+ TRUE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_UTYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* Low 12 bits of TLS LE thread pointer offset for loads and adds. */
-+ HOWTO (R_RISCV_TPREL_LO12_I, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_signed, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TPREL_LO12_I", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_ITYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* Low 12 bits of TLS LE thread pointer offset for stores. */
-+ HOWTO (R_RISCV_TPREL_LO12_S, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_signed, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TPREL_LO12_S", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_STYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* TLS LE thread pointer usage. */
-+ HOWTO (R_RISCV_TPREL_ADD, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_TPREL_ADD", /* name */
-+ TRUE, /* partial_inplace */
-+ 0, /* src_mask */
-+ 0, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 8-bit in-place addition, for local label subtraction. */
-+ HOWTO (R_RISCV_ADD8, /* type */
-+ 0, /* rightshift */
-+ 0, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_ADD8", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 16-bit in-place addition, for local label subtraction. */
-+ HOWTO (R_RISCV_ADD16, /* type */
-+ 0, /* rightshift */
-+ 1, /* size */
-+ 16, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_ADD16", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 32-bit in-place addition, for local label subtraction. */
-+ HOWTO (R_RISCV_ADD32, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_ADD32", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 64-bit in-place addition, for local label subtraction. */
-+ HOWTO (R_RISCV_ADD64, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 64, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_ADD64", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 8-bit in-place addition, for local label subtraction. */
-+ HOWTO (R_RISCV_SUB8, /* type */
-+ 0, /* rightshift */
-+ 0, /* size */
-+ 8, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_SUB8", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 16-bit in-place addition, for local label subtraction. */
-+ HOWTO (R_RISCV_SUB16, /* type */
-+ 0, /* rightshift */
-+ 1, /* size */
-+ 16, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_SUB16", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 32-bit in-place addition, for local label subtraction. */
-+ HOWTO (R_RISCV_SUB32, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_SUB32", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* 64-bit in-place addition, for local label subtraction. */
-+ HOWTO (R_RISCV_SUB64, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 64, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_SUB64", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ MINUS_ONE, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* GNU extension to record C++ vtable hierarchy */
-+ HOWTO (R_RISCV_GNU_VTINHERIT, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 0, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ NULL, /* special_function */
-+ "R_RISCV_GNU_VTINHERIT", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ 0, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* GNU extension to record C++ vtable member usage */
-+ HOWTO (R_RISCV_GNU_VTENTRY, /* type */
-+ 0, /* rightshift */
-+ 4, /* size */
-+ 0, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ _bfd_elf_rel_vtable_reloc_fn, /* special_function */
-+ "R_RISCV_GNU_VTENTRY", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ 0, /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* Indicates an alignment statement. The addend field encodes how many
-+ bytes of NOPs follow the statement. The desired alignment is the
-+ addend rounded up to the next power of two. */
-+ HOWTO (R_RISCV_ALIGN, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 0, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_ALIGN", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ 0, /* dst_mask */
-+ TRUE), /* pcrel_offset */
-+
-+ /* 8-bit PC-relative branch offset. */
-+ HOWTO (R_RISCV_RVC_BRANCH, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_signed, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_RVC_BRANCH", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_RVC_B_IMM (-1U), /* dst_mask */
-+ TRUE), /* pcrel_offset */
-+
-+ /* 11-bit PC-relative jump offset. */
-+ HOWTO (R_RISCV_RVC_JUMP, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ TRUE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ /* This needs complex overflow
-+ detection, because the upper 36
-+ bits must match the PC + 4. */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_RVC_JUMP", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_RVC_J_IMM (-1U), /* dst_mask */
-+ TRUE), /* pcrel_offset */
-+
-+ /* High 6 bits of 18-bit absolute address. */
-+ HOWTO (R_RISCV_RVC_LUI, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_RVC_LUI", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_RVC_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 12 bits of 32-bit load or add. */
-+ HOWTO (R_RISCV_GPREL_I, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_GPREL_I", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_ITYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+
-+ /* High 12 bits of 32-bit store. */
-+ HOWTO (R_RISCV_GPREL_S, /* type */
-+ 0, /* rightshift */
-+ 2, /* size */
-+ 32, /* bitsize */
-+ FALSE, /* pc_relative */
-+ 0, /* bitpos */
-+ complain_overflow_dont, /* complain_on_overflow */
-+ bfd_elf_generic_reloc, /* special_function */
-+ "R_RISCV_GPREL_S", /* name */
-+ FALSE, /* partial_inplace */
-+ 0, /* src_mask */
-+ ENCODE_STYPE_IMM (-1U), /* dst_mask */
-+ FALSE), /* pcrel_offset */
-+};
-+
-+/* A mapping from BFD reloc types to RISC-V ELF reloc types. */
-+
-+struct elf_reloc_map {
-+ bfd_reloc_code_real_type bfd_val;
-+ enum elf_riscv_reloc_type elf_val;
-+};
-+
-+static const struct elf_reloc_map riscv_reloc_map[] =
-+{
-+ { BFD_RELOC_NONE, R_RISCV_NONE },
-+ { BFD_RELOC_32, R_RISCV_32 },
-+ { BFD_RELOC_64, R_RISCV_64 },
-+ { BFD_RELOC_RISCV_ADD8, R_RISCV_ADD8 },
-+ { BFD_RELOC_RISCV_ADD16, R_RISCV_ADD16 },
-+ { BFD_RELOC_RISCV_ADD32, R_RISCV_ADD32 },
-+ { BFD_RELOC_RISCV_ADD64, R_RISCV_ADD64 },
-+ { BFD_RELOC_RISCV_SUB8, R_RISCV_SUB8 },
-+ { BFD_RELOC_RISCV_SUB16, R_RISCV_SUB16 },
-+ { BFD_RELOC_RISCV_SUB32, R_RISCV_SUB32 },
-+ { BFD_RELOC_RISCV_SUB64, R_RISCV_SUB64 },
-+ { BFD_RELOC_CTOR, R_RISCV_64 },
-+ { BFD_RELOC_12_PCREL, R_RISCV_BRANCH },
-+ { BFD_RELOC_RISCV_HI20, R_RISCV_HI20 },
-+ { BFD_RELOC_RISCV_LO12_I, R_RISCV_LO12_I },
-+ { BFD_RELOC_RISCV_LO12_S, R_RISCV_LO12_S },
-+ { BFD_RELOC_RISCV_PCREL_LO12_I, R_RISCV_PCREL_LO12_I },
-+ { BFD_RELOC_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_S },
-+ { BFD_RELOC_RISCV_CALL, R_RISCV_CALL },
-+ { BFD_RELOC_RISCV_CALL_PLT, R_RISCV_CALL_PLT },
-+ { BFD_RELOC_RISCV_PCREL_HI20, R_RISCV_PCREL_HI20 },
-+ { BFD_RELOC_RISCV_JMP, R_RISCV_JAL },
-+ { BFD_RELOC_RISCV_GOT_HI20, R_RISCV_GOT_HI20 },
-+ { BFD_RELOC_RISCV_TLS_DTPMOD32, R_RISCV_TLS_DTPMOD32 },
-+ { BFD_RELOC_RISCV_TLS_DTPREL32, R_RISCV_TLS_DTPREL32 },
-+ { BFD_RELOC_RISCV_TLS_DTPMOD64, R_RISCV_TLS_DTPMOD64 },
-+ { BFD_RELOC_RISCV_TLS_DTPREL64, R_RISCV_TLS_DTPREL64 },
-+ { BFD_RELOC_RISCV_TLS_TPREL32, R_RISCV_TLS_TPREL32 },
-+ { BFD_RELOC_RISCV_TLS_TPREL64, R_RISCV_TLS_TPREL64 },
-+ { BFD_RELOC_RISCV_TPREL_HI20, R_RISCV_TPREL_HI20 },
-+ { BFD_RELOC_RISCV_TPREL_ADD, R_RISCV_TPREL_ADD },
-+ { BFD_RELOC_RISCV_TPREL_LO12_S, R_RISCV_TPREL_LO12_S },
-+ { BFD_RELOC_RISCV_TPREL_LO12_I, R_RISCV_TPREL_LO12_I },
-+ { BFD_RELOC_RISCV_TLS_GOT_HI20, R_RISCV_TLS_GOT_HI20 },
-+ { BFD_RELOC_RISCV_TLS_GD_HI20, R_RISCV_TLS_GD_HI20 },
-+ { BFD_RELOC_RISCV_ALIGN, R_RISCV_ALIGN },
-+ { BFD_RELOC_RISCV_RVC_BRANCH, R_RISCV_RVC_BRANCH },
-+ { BFD_RELOC_RISCV_RVC_JUMP, R_RISCV_RVC_JUMP },
-+ { BFD_RELOC_RISCV_RVC_LUI, R_RISCV_RVC_LUI },
-+ { BFD_RELOC_RISCV_GPREL_I, R_RISCV_GPREL_I },
-+ { BFD_RELOC_RISCV_GPREL_S, R_RISCV_GPREL_S },
-+};
-+
-+/* Given a BFD reloc type, return a howto structure. */
-+
-+reloc_howto_type *
-+riscv_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
-+ bfd_reloc_code_real_type code)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < ARRAY_SIZE (riscv_reloc_map); i++)
-+ if (riscv_reloc_map[i].bfd_val == code)
-+ return &howto_table[(int) riscv_reloc_map[i].elf_val];
-+
-+ bfd_set_error (bfd_error_bad_value);
-+ return NULL;
-+}
-+
-+reloc_howto_type *
-+riscv_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, const char *r_name)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < ARRAY_SIZE (howto_table); i++)
-+ if (howto_table[i].name && strcasecmp (howto_table[i].name, r_name) == 0)
-+ return &howto_table[i];
-+
-+ return NULL;
-+}
-+
-+reloc_howto_type *
-+riscv_elf_rtype_to_howto (unsigned int r_type)
-+{
-+ if (r_type >= ARRAY_SIZE (howto_table))
-+ {
-+ (*_bfd_error_handler) (_("unrecognized relocation (0x%x)"), r_type);
-+ bfd_set_error (bfd_error_bad_value);
-+ return NULL;
-+ }
-+ return &howto_table[r_type];
-+}
-diff -urN empty/bfd/elfxx-riscv.h binutils-2.26.1/bfd/elfxx-riscv.h
---- empty/bfd/elfxx-riscv.h 1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/bfd/elfxx-riscv.h 2016-04-03 10:12:57.122276559 +0800
-@@ -0,0 +1,33 @@
-+/* RISC-V ELF specific backend routines.
-+ Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on MIPS target.
-+
-+ This file is part of BFD, the Binary File Descriptor library.
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; see the file COPYING3. If not,
-+ see <http://www.gnu.org/licenses/ >. */
-+
-+#include "elf/common.h"
-+#include "elf/internal.h"
-+
-+extern reloc_howto_type *
-+riscv_reloc_name_lookup (bfd *, const char *);
-+
-+extern reloc_howto_type *
-+riscv_reloc_type_lookup (bfd *, bfd_reloc_code_real_type);
-+
-+extern reloc_howto_type *
-+riscv_elf_rtype_to_howto (unsigned int r_type);
-diff -urN empty/gas/config/tc-riscv.c binutils-2.26.1/gas/config/tc-riscv.c
---- empty/gas/config/tc-riscv.c 1970-01-01 08:00:00.000000000 +0800
-+++ binutils-2.26.1/gas/config/tc-riscv.c 2016-04-09 10:50:33.576657106 +0800
-@@ -0,0 +1,2434 @@
-+/* tc-riscv.c -- RISC-V assembler
-+ Copyright 2011-2015 Free Software Foundation, Inc.
-+
-+ Contributed by Andrew Waterman (waterman(a)cs.berkeley.edu) at UC Berkeley.
-+ Based on MIPS target.
-+
-+ This file is part of GAS.
-+
-+ GAS is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GAS is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; see the file COPYING3. If not,
-+ see <http://www.gnu.org/licenses/ >. */
-+
-+#include "as.h"
-+#include "config.h"
-+#include "subsegs.h"
-+#include "safe-ctype.h"
-+
-+#include "itbl-ops.h"
-+#include "dwarf2dbg.h"
-+#include "dw2gencfi.h"
-+
-+#include "elf/riscv.h"
-+#include "opcode/riscv.h"
-+
-+#include <execinfo.h>
-+#include <stdint.h>
-+
-+/* Information about an instruction, including its format, operands
-+ and fixups. */
-+struct riscv_cl_insn
-+{
-+ /* The opcode's entry in riscv_opcodes. */
-+ const struct riscv_opcode *insn_mo;
-+
-+ /* The encoded instruction bits. */
-+ insn_t insn_opcode;
-+
-+ /* The frag that contains the instruction. */
-+ struct frag *frag;
-+
-+ /* The offset into FRAG of the first instruction byte. */
-+ long where;
-+
-+ /* The relocs associated with the instruction, if any. */
-+ fixS *fixp;
-+};
-+
-+/* The default architecture. */
-+#ifndef DEFAULT_ARCH
-+#define DEFAULT_ARCH "riscv64"
-+#endif
-+static const char default_arch[] = DEFAULT_ARCH;
-+
-+unsigned xlen = 0; /* width of an x-register */
-+#define LOAD_ADDRESS_INSN (xlen == 64 ? "ld" : "lw")
-+#define ADD32_INSN (xlen == 64 ? "addiw" : "addi")
-+
-+static unsigned elf_flags = 0;
-+
-+/* This is the set of options which the .option pseudo-op may modify. */
-+
-+struct riscv_set_options
-+{
-+ int pic; /* Generate position-independent code. */
-+ int rvc; /* Generate RVC code. */
-+};
-+
-+static struct riscv_set_options riscv_opts =
-+{
-+ 0, /* pic */
-+ 0, /* rvc */
-+};
-+
-+static void
-+riscv_set_rvc (bfd_boolean rvc_value)
-+{
-+ if (rvc_value)
-+ elf_flags |= EF_RISCV_RVC;
-+
-+ riscv_opts.rvc = rvc_value;
-+}
-+
-+struct riscv_subset
-+{
-+ const char *name;
-+ int version_major;
-+ int version_minor;
-+
-+ struct riscv_subset *next;
-+};
-+
-+static struct riscv_subset *riscv_subsets;
-+
-+static bfd_boolean
-+riscv_subset_supports (const char *feature)
-+{
-+ struct riscv_subset *s;
-+ char *p;
-+ unsigned xlen_required = strtoul (feature, &p, 10);
-+
-+ if (xlen_required && xlen != xlen_required)
-+ return FALSE;
-+
-+ for (s = riscv_subsets; s != NULL; s = s->next)
-+ if (strcasecmp (s->name, p) == 0)
-+ /* FIXME: once we support version numbers:
-+ return major == s->version_major && minor <= s->version_minor; */
-+ return TRUE;
-+
-+ return FALSE;
-+}
-+
-+static void
-+riscv_add_subset (const char *subset)
-+{
-+ struct riscv_subset *s = xmalloc (sizeof *s);
-+ s->name = xstrdup (subset);
-+ s->version_major = 2;
-+ s->version_minor = 0;
-+ s->next = riscv_subsets;
-+ riscv_subsets = s;
-+}
-+
-+/* Set which ISA and extensions are available. Formally, ISA strings must
-+ begin with RV32 or RV64, but we allow the prefix to be omitted.
-+
-+ FIXME: Version numbers are not supported yet. */
-+static void
-+riscv_set_arch (const char *arg)
-+{
-+ char *uppercase = xstrdup (arg);
-+ char *p = uppercase;
-+ const char *all_subsets = "IMAFDC";
-+ const char *extension = NULL;
-+ int rvc = 0;
-+ int i;
-+
-+ for (i = 0; uppercase[i]; i++)
-+ uppercase[i] = TOUPPER (uppercase[i]);
-+
-+ if (strncmp (p, "RV32", 4) == 0)
-+ {
-+ xlen = 32;
-+ p += 4;
-+ }
-+ else if (strncmp (p, "RV64", 4) == 0)
-+ {
-+ xlen = 64;
-+ p += 4;
-+ }
-+ else if (strncmp (p, "RV", 2) == 0)
-+ p += 2;
-+
-+ switch (*p)
-+ {
-+ case 'I':
-+ break;
-+
-+ case 'G':
-+ p++;
-+ /* Fall through. */
-+
-+ case '\0':
-+ for (i = 0; all_subsets[i] != '\0'; i++)
-+ {
-+ const char subset[] = {all_subsets[i], '\0'};
-+ riscv_add_subset (subset);
-+ }
-+ break;
-+
-+ default:
-+ as_fatal ("`I' must be the first ISA subset name specified (got %c)",
-+ *p);
-+ }
-+
-+ while (*p)
-+ {
-+ if (*p == 'X')
-+ {
-+ char *subset = xstrdup (p), *q = subset;
-+
-+ while (*++q != '\0' && *q != '_')
-+ ;
-+ *q = '\0';
-+
-+ if (extension)
-+ as_fatal ("only one eXtension is supported (found %s and %s)",
-+ extension, subset);
-+ extension = subset;
-+ riscv_add_subset (subset);
-+ p += strlen (subset);
-+ free (subset);
-+ }
-+ else if (*p == '_')
-+ p++;
-+ else if ((all_subsets = strchr (all_subsets, *p)) != NULL)
-+ {
-+ const char subset[] = {*p, 0};
-+ riscv_add_subset (subset);
-+ if (*p == 'C')
-+ rvc = 1;
-+ all_subsets++;
-+ p++;
-+ }
-+ else
-+ as_fatal ("unsupported ISA subset %c", *p);
-+ }
-+
-+ if (rvc)
-+ /* Override -m[no-]rvc setting if C was explicitly listed. */
-+ riscv_set_rvc (TRUE);
-+ else
-+ /* Add RVC anyway. -m[no-]rvc toggles its availability. */
-+ riscv_add_subset ("C");
-+
-+ free (uppercase);
-+}
-+
-+/* handle of the OPCODE hash table */
-+static struct hash_control *op_hash = NULL;
-+
-+/* This array holds the chars that always start a comment. If the
-+ pre-processor is disabled, these aren't very useful */
-+const char comment_chars[] = "#";
-+
-+/* This array holds the chars that only start a comment at the beginning of
-+ a line. If the line seems to have the form '# 123 filename'
-+ .line and .file directives will appear in the pre-processed output */
-+/* Note that input_file.c hand checks for '#' at the beginning of the
-+ first line of the input file. This is because the compiler outputs
-+ #NO_APP at the beginning of its output. */
-+/* Also note that C style comments are always supported. */
-+const char line_comment_chars[] = "#";
-+
-+/* This array holds machine specific line separator characters. */
-+const char line_separator_chars[] = ";";
-+
-+/* Chars that can be used to separate mant from exp in floating point nums */
-+const char EXP_CHARS[] = "eE";
-+
-+/* Chars that mean this number is a floating point constant */
-+/* As in 0f12.456 */
-+/* or 0d1.2345e12 */
-+const char FLT_CHARS[] = "rRsSfFdDxXpP";
-+
-+/* Macros for encoding relaxation state for RVC branches and far jumps. */
-+#define RELAX_BRANCH_ENCODE(uncond, rvc, length) \
-+ ((relax_substateT) \
-+ (0xc0000000 \
-+ | ((uncond) ? 1 : 0) \
-+ | ((rvc) ? 2 : 0) \
-+ | ((length) << 2)))
-+#define RELAX_BRANCH_P(i) (((i) & 0xf0000000) == 0xc0000000)
-+#define RELAX_BRANCH_LENGTH(i) (((i) >> 2) & 0xF)
-+#define RELAX_BRANCH_RVC(i) (((i) & 2) != 0)
-+#define RELAX_BRANCH_UNCOND(i) (((i) & 1) != 0)
-+
-+/* Is the given value a sign-extended 32-bit value? */
-+#define IS_SEXT_32BIT_NUM(x) \
-+ (((x) &~ (offsetT) 0x7fffffff) == 0 \
-+ || (((x) &~ (offsetT) 0x7fffffff) == ~ (offsetT) 0x7fffffff))
-+
-+/* Is the given value a zero-extended 32-bit value? Or a negated one? */
-+#define IS_ZEXT_32BIT_NUM(x) \
-+ (((x) &~ (offsetT) 0xffffffff) == 0 \
-+ || (((x) &~ (offsetT) 0xffffffff) == ~ (offsetT) 0xffffffff))
-+
-+/* Change INSN's opcode so that the operand given by FIELD has value VALUE.
-+ INSN is a riscv_cl_insn structure and VALUE is evaluated exactly once. */
-+#define INSERT_OPERAND(FIELD, INSN, VALUE) \
-+ INSERT_BITS ((INSN).insn_opcode, VALUE, OP_MASK_##FIELD, OP_SH_##FIELD)
-+
-+/* Determine if an instruction matches an opcode. */
-+#define OPCODE_MATCHES(OPCODE, OP) \
-+ (((OPCODE) & MASK_##OP) == MATCH_##OP)
-+
-+static char *expr_end;
-+
-+/* The default target format to use. */
-+
-+const char *
-+riscv_target_format (void)
-+{
-+ return xlen == 64 ? "elf64-littleriscv" : "elf32-littleriscv";
-+}
-+
-+/* Return the length of instruction INSN. */
-+
-+static inline unsigned int
-+insn_length (const struct riscv_cl_insn *insn)
-+{
-+ return riscv_insn_length (insn->insn_opcode);
-+}
-+
-+/* Initialise INSN from opcode entry MO. Leave its position unspecified. */
-+
-+static void
-+create_insn (struct riscv_cl_insn *insn, const struct riscv_opcode *mo)
-+{
-+ insn->insn_mo = mo;
-+ insn->insn_opcode = mo->match;
-+ insn->frag = NULL;
-+ insn->where = 0;
-+ insn->fixp = NULL;
-+}
-+
-+/* Install INSN at the location specified by its "frag" and "where" fields. */
-+
-+static void
-+install_insn (const struct riscv_cl_insn *insn)
-+{
-+ char *f = insn->frag->fr_literal + insn->where;
-+ md_number_to_chars (f, insn->insn_opcode, insn_length (insn));
-+}
-+
-+/* Move INSN to offset WHERE in FRAG. Adjust the fixups accordingly
-+ and install the opcode in the new location. */
-+
-+static void
-+move_insn (struct riscv_cl_insn *insn, fragS *frag, long where)
-+{
-+ insn->frag = frag;
-+ insn->where = where;
-+ if (insn->fixp != NULL)
-+ {
-+ insn->fixp->fx_frag = frag;
-+ insn->fixp->fx_where = where;
-+ }
-+ install_insn (insn);
-+}
-+
-+/* Add INSN to the end of the output. */
-+
-+static void
-+add_fixed_insn (struct riscv_cl_insn *insn)
-+{
-+ char *f = frag_more (insn_length (insn));
-+ move_insn (insn, frag_now, f - frag_now->fr_literal);
-+}
-+
-+static void
-+add_relaxed_insn (struct riscv_cl_insn *insn, int max_chars, int var,
-+ relax_substateT subtype, symbolS *symbol, offsetT offset)
-+{
-+ frag_grow (max_chars);
-+ move_insn (insn, frag_now, frag_more (0) - frag_now->fr_literal);
-+ frag_var (rs_machine_dependent, max_chars, var,
-+ subtype, symbol, offset, NULL);
-+}
-+
-+/* Compute the length of a branch sequence, and adjust the stored length
-+ accordingly. If FRAGP is NULL, the worst-case length is returned. */
-+
-+static int
-+relaxed_branch_length (fragS *fragp, asection *sec, int update)
-+{
-+ int jump, rvc, length = 8;
-+
-+ if (!fragp)
-+ return length;
-+
-+ jump = RELAX_BRANCH_UNCOND (fragp->fr_subtype);
-+ rvc = RELAX_BRANCH_RVC (fragp->fr_subtype);
-+ length = RELAX_BRANCH_LENGTH (fragp->fr_subtype);
-+
-+ /* Assume jumps are in range; the linker will catch any that aren't. */
-+ length = jump ? 4 : 8;
-+
-+ if (fragp->fr_symbol != NULL
-+ && S_IS_DEFINED (fragp->fr_symbol)
-+ && sec == S_GET_SEGMENT (fragp->fr_symbol))
-+ {
-+ offsetT val = S_GET_VALUE (fragp->fr_symbol) + fragp->fr_offset;
-+ bfd_vma rvc_range = jump ? RVC_JUMP_REACH : RVC_BRANCH_REACH;
-+ val -= fragp->fr_address + fragp->fr_fix;
-+
-+ if (rvc && (bfd_vma)(val + rvc_range/2) < rvc_range)
-+ length = 2;
-+ else if ((bfd_vma)(val + RISCV_BRANCH_REACH/2) < RISCV_BRANCH_REACH)
-+ length = 4;
-+ else if (!jump && rvc)
-+ length = 6;
-+ }
-+
-+ if (update)
-+ fragp->fr_subtype = RELAX_BRANCH_ENCODE (jump, rvc, length);
-+
-+ return length;
-+}
-+
-+struct regname {
-+ const char *name;
-+ unsigned int num;
-+};
-+
-+enum reg_class {
-+ RCLASS_GPR,
-+ RCLASS_FPR,
-+ RCLASS_CSR,
-+ RCLASS_MAX
-+};
-+
-+static struct hash_control *reg_names_hash = NULL;
-+
-+#define ENCODE_REG_HASH(cls, n) (void *)(uintptr_t)((n) * RCLASS_MAX + (cls) + 1)
-+#define DECODE_REG_CLASS(hash) (((uintptr_t)(hash) - 1) % RCLASS_MAX)
-+#define DECODE_REG_NUM(hash) (((uintptr_t)(hash) - 1) / RCLASS_MAX)
-+
-+static void
-+hash_reg_name (enum reg_class class, const char *name, unsigned n)
-+{
-+ void *hash = ENCODE_REG_HASH (class, n);
-+ const char *retval = hash_insert (reg_names_hash, name, hash);
-+
-+ if (retval != NULL)
-+ as_fatal (_("internal error: can't hash `%s': %s"), name, retval);
-+}
-+
-+static void
-+hash_reg_names (enum reg_class class, const char * const names[], unsigned n)
-+{
-+ unsigned i;
-+
-+ for (i = 0; i < n; i++)
-+ hash_reg_name (class, names[i], i);
-+}
-+
-+static unsigned int
-+reg_lookup_internal (const char *s, enum reg_class class)
-+{
-+ struct regname *r = (struct regname *) hash_find (reg_names_hash, s);
-+ if (r == NULL || DECODE_REG_CLASS (r) != class)
-+ return -1;
-+ return DECODE_REG_NUM (r);
-+}
-+
-+static int
-+reg_lookup (char **s, enum reg_class class, unsigned int *regnop)
-+{
-+ char *e;
-+ char save_c;
-+ int reg = -1;
-+
-+ /* Find end of name. */
-+ e = *s;
-+ if (is_name_beginner (*e))
-+ ++e;
-+ while (is_part_of_name (*e))
-+ ++e;
-+
-+ /* Terminate name. */
-+ save_c = *e;
-+ *e = '\0';
-+
-+ /* Look for the register. Advance to next token if one was recognized. */
-+ if ((reg = reg_lookup_internal (*s, class)) >= 0)
-+ *s = e;
-+
-+ *e = save_c;
-+ if (regnop)
-+ *regnop = reg;
-+ return reg >= 0;
-+}
-+
-+static int
-+arg_lookup (char **s, const char *const *array, size_t size, unsigned *regnop)
-+{
-+ const char *p = strchr (*s, ',');
-+ size_t i, len = p ? (size_t)(p - *s) : strlen (*s);
-+
-+ for (i = 0; i < size; i++)
-+ if (array[i] != NULL && strncmp (array[i], *s, len) == 0)
-+ {
-+ *regnop = i;
-+ *s += len;
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+/* For consistency checking, verify that all bits are specified either
-+ by the match/mask part of the instruction definition, or by the
-+ operand list. */
-+static int
-+validate_riscv_insn (const struct riscv_opcode *opc)
-+{
-+ const char *p = opc->args;
-+ char c;
-+ insn_t used_bits = opc->mask;
-+ int insn_width = 8 * riscv_insn_length (opc->match);
-+ insn_t required_bits = ~0ULL >> (64 - insn_width);
-+
-+ if ((used_bits & opc->match) != (opc->match & required_bits))
-+ {
-+ as_bad (_("internal: bad RISC-V opcode (mask error): %s %s"),
-+ opc->name, opc->args);
-+ return 0;
-+ }
-+
-+#define USE_BITS(mask,shift) (used_bits |= ((insn_t)(mask) << (shift)))
-+ while (*p)
-+ switch (c = *p++)
-+ {
-+ /* Xcustom */
-+ case '^':
-+ switch (c = *p++)
-+ {
-+ case 'd': USE_BITS (OP_MASK_RD, OP_SH_RD); break;
-+ case 's': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
-+ case 't': USE_BITS (OP_MASK_RS2, OP_SH_RS2); break;
-+ case 'j': USE_BITS (OP_MASK_CUSTOM_IMM, OP_SH_CUSTOM_IMM); break;
-+ }
-+ break;
-+ case 'C': /* RVC */
-+ switch (c = *p++)
-+ {
-+ case 'a': used_bits |= ENCODE_RVC_J_IMM(-1U); break;
-+ case 'c': break; /* RS1, constrained to equal sp */
-+ case 'i': used_bits |= ENCODE_RVC_SIMM3(-1U); break;
-+ case 'j': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+ case 'k': used_bits |= ENCODE_RVC_LW_IMM(-1U); break;
-+ case 'l': used_bits |= ENCODE_RVC_LD_IMM(-1U); break;
-+ case 'm': used_bits |= ENCODE_RVC_LWSP_IMM(-1U); break;
-+ case 'n': used_bits |= ENCODE_RVC_LDSP_IMM(-1U); break;
-+ case 'p': used_bits |= ENCODE_RVC_B_IMM(-1U); break;
-+ case 's': USE_BITS (OP_MASK_CRS1S, OP_SH_CRS1S); break;
-+ case 't': USE_BITS (OP_MASK_CRS2S, OP_SH_CRS2S); break;
-+ case 'u': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+ case 'v': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+ case 'w': break; /* RS1S, constrained to equal RD */
-+ case 'x': break; /* RS2S, constrained to equal RD */
-+ case 'K': used_bits |= ENCODE_RVC_ADDI4SPN_IMM(-1U); break;
-+ case 'L': used_bits |= ENCODE_RVC_ADDI16SP_IMM(-1U); break;
-+ case 'M': used_bits |= ENCODE_RVC_SWSP_IMM(-1U); break;
-+ case 'N': used_bits |= ENCODE_RVC_SDSP_IMM(-1U); break;
-+ case 'U': break; /* RS1, constrained to equal RD */
-+ case 'V': USE_BITS (OP_MASK_CRS2, OP_SH_CRS2); break;
-+ case '<': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+ case '>': used_bits |= ENCODE_RVC_IMM(-1U); break;
-+ case 'T': USE_BITS (OP_MASK_CRS2, OP_SH_CRS2); break;
-+ case 'D': USE_BITS (OP_MASK_CRS2S, OP_SH_CRS2S); break;
-+ default:
-+ as_bad (_("internal: bad RISC-V opcode (unknown operand type `C%c'): %s %s"),
-+ c, opc->name, opc->args);
-+ return 0;
-+ }
-+ break;
-+ case ',': break;
-+ case '(': break;
-+ case ')': break;
-+ case '<': USE_BITS (OP_MASK_SHAMTW, OP_SH_SHAMTW); break;
-+ case '>': USE_BITS (OP_MASK_SHAMT, OP_SH_SHAMT); break;
-+ case 'A': break;
-+ case 'D': USE_BITS (OP_MASK_RD, OP_SH_RD); break;
-+ case 'Z': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
-+ case 'E': USE_BITS (OP_MASK_CSR, OP_SH_CSR); break;
-+ case 'I': break;
-+ case 'R': USE_BITS (OP_MASK_RS3, OP_SH_RS3); break;
-+ case 'S': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
-+ case 'U': USE_BITS (OP_MASK_RS1, OP_SH_RS1); /* fallthru */
-+ case 'T': USE_BITS (OP_MASK_RS2, OP_SH_RS2); break;
-+ case 'd': USE_BITS (OP_MASK_RD, OP_SH_RD); break;
-+ case 'm': USE_BITS (OP_MASK_RM, OP_SH_RM); break;
-+ case 's': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
-+ case 't': USE_BITS (OP_MASK_RS2, OP_SH_RS2); break;
-+ case 'P': USE_BITS (OP_MASK_PRED, OP_SH_PRED); break;
-+ case 'Q': USE_BITS (OP_MASK_SUCC, OP_SH_SUCC); break;
-+ case 'o':
-+ case 'j': used_bits |= ENCODE_ITYPE_IMM(-1U); break;
-+ case 'a': used_bits |= ENCODE_UJTYPE_IMM(-1U); break;
-+ case 'p': used_bits |= ENCODE_SBTYPE_IMM(-1U); break;
-+ case 'q': used_bits |= ENCODE_STYPE_IMM(-1U); break;
-+ case 'u': used_bits |= ENCODE_UTYPE_IMM(-1U); break;
-+ case '[': break;
-+ case ']': break;
-+ case '0': break;
-+ default:
-+ as_bad (_("internal: bad RISC-V opcode (unknown operand type `%c'): %s %s"),
-+ c, opc->name, opc->args);
-+ return 0;
-+ }
-+#undef USE_BITS
-+ if (used_bits != required_bits)
-+ {
-+ as_bad (_("internal: bad RISC-V opcode (bits 0x%lx undefined): %s %s"),
-+ ~(long)(used_bits & required_bits), opc->name, opc->args);
-+ return 0;
-+ }
-+ return 1;
-+}
-+
-+struct percent_op_match
-+{
-+ const char *str;
-+ bfd_reloc_code_real_type reloc;
-+};
-+
-+/* This function is called once, at assembler startup time. It should set up
-+ all the tables, etc. that the MD part of the assembler will need. */
-+
-+void
-+md_begin (void)
-+{
-+ const char *retval = NULL;
-+ int i = 0;
-+
-+ if (! bfd_set_arch_mach (stdoutput, bfd_arch_riscv, 0))
-+ as_warn (_("Could not set architecture and machine"));
-+
-+ op_hash = hash_new ();
-+
-+ for (i = 0; i < NUMOPCODES;)
-+ {
-+ const char *name = riscv_opcodes[i].name;
-+
-+ retval = hash_insert (op_hash, name, (void *) &riscv_opcodes[i]);
-+
-+ if (retval != NULL)
-+ {
-+ fprintf (stderr, _("internal error: can't hash `%s': %s\n"),
-+ riscv_opcodes[i].name, retval);
-+ /* Probably a memory allocation problem? Give up now. */
-+ as_fatal (_("Broken assembler. No assembly attempted."));
-+ }
-+ do
-+ {
-+ if (riscv_opcodes[i].pinfo != INSN_MACRO)
-+ {
-+ if (!validate_riscv_insn (&riscv_opcodes[i]))
-+ as_fatal (_("Broken assembler. No assembly attempted."));
-+ }
-+ ++i;
-+ }
-+ while ((i < NUMOPCODES) && !strcmp (riscv_opcodes[i].name, name));
-+ }
-+
-+ reg_names_hash = hash_new ();
-+ hash_reg_names (RCLASS_GPR, riscv_gpr_names_numeric, NGPR);
-+ hash_reg_names (RCLASS_GPR, riscv_gpr_names_abi, NGPR);
-+ hash_reg_names (RCLASS_FPR, riscv_fpr_names_numeric, NFPR);
-+ hash_reg_names (RCLASS_FPR, riscv_fpr_names_abi, NFPR);
-+
-+#define DECLARE_CSR(name, num) hash_reg_name (RCLASS_CSR, #name, num);
-+#include "opcode/riscv-opc.h"
-+#undef DECLARE_CSR
-+
-+ /* Set the default alignment for the text section. */
-+ record_alignment (text_section, riscv_opts.rvc ? 1 : 2);
-+}
-+
-+/* Output an instruction. IP is the instruction information.
-+ ADDRESS_EXPR is an operand of the instruction to be used with
-+ RELOC_TYPE. */
-+
-+static void
-+append_insn (struct riscv_cl_insn *ip, expressionS *address_expr,
-+ bfd_reloc_code_real_type reloc_type)
-+{
-+#ifdef OBJ_ELF
-+ dwarf2_emit_insn (0);
-+#endif
-+
-+ if (reloc_type != BFD_RELOC_UNUSED)
-+ {
-+ reloc_howto_type *howto;
-+
-+ gas_assert(address_expr);
-+ if (reloc_type == BFD_RELOC_12_PCREL
-+ || reloc_type == BFD_RELOC_RISCV_JMP)
-+ {
-+ int j = reloc_type == BFD_RELOC_RISCV_JMP;
-+ int best_case = riscv_insn_length (ip->insn_opcode);
-+ int worst_case = relaxed_branch_length (NULL, NULL, 0);
-+ add_relaxed_insn (ip, worst_case, best_case,
-+ RELAX_BRANCH_ENCODE (j, best_case == 2, worst_case),
-+ address_expr->X_add_symbol,
-+ address_expr->X_add_number);
-+ return;
-+ }
-+ else if (address_expr->X_op == O_constant)
-+ {
-+ switch (reloc_type)
-+ {
-+ case BFD_RELOC_32:
-+ ip->insn_opcode |= address_expr->X_add_number;
-+ goto append;
-+
-+ case BFD_RELOC_RISCV_HI20:
-+ {
-+ insn_t imm = RISCV_CONST_HIGH_PART (address_expr->X_add_number);
-+ ip->insn_opcode |= ENCODE_UTYPE_IMM (imm);
-+ goto append;
-+ }
-+
-+ case BFD_RELOC_RISCV_LO12_S:
-+ ip->insn_opcode |= ENCODE_STYPE_IMM (address_expr->X_add_number);
-+ goto append;
-+
-+ case BFD_RELOC_RISCV_LO12_I:
-+ ip->insn_opcode |= ENCODE_ITYPE_IMM (address_expr->X_add_number);
-+ goto append;
-+
-+ default:
-+ break;
-+ }
-+ }
-+
-+ howto = bfd_reloc_type_lookup (stdoutput, reloc_type);
-+ if (howto == NULL)
-+ as_bad (_("Unsupported RISC-V relocation number %d"), reloc_type);
-+
-+ ip->fixp = fix_new_exp (ip->frag, ip->where,
-+ bfd_get_reloc_size (howto),
-+ address_expr, FALSE, reloc_type);
-+ }
-+
-+append:
-+ add_fixed_insn (ip);
-+ install_insn (ip);
-+}
-+
-+/* Build an instruction created by a macro expansion. This is passed
-+ a pointer to the count of instructions created so far, an
-+ expression, the name of the instruction to build, an operand format
-+ string, and corresponding arguments. */
-+
-+static void
-+macro_build (expressionS *ep, const char *name, const char *fmt, ...)
-+{
-+ const struct riscv_opcode *mo;
-+ struct riscv_cl_insn insn;
-+ bfd_reloc_code_real_type r;
-+ va_list args;
-+
-+ va_start (args, fmt);
-+
-+ r = BFD_RELOC_UNUSED;
-+ mo = (struct riscv_opcode *) hash_find (op_hash, name);
-+ gas_assert (mo);
-+
-+ /* Find a non-RVC variant of the instruction. */
-+ while (riscv_insn_length (mo->match) < 4)
-+ mo++;
-+ gas_assert (strcmp (name, mo->name) == 0);
-+
-+ create_insn (&insn, mo);
-+ for (;;)
-+ {
-+ switch (*fmt++)
-+ {
-+ case 'd':
-+ INSERT_OPERAND (RD, insn, va_arg (args, int));
-+ continue;
-+
-+ case 's':
-+ INSERT_OPERAND (RS1, insn, va_arg (args, int));
-+ continue;
-+
-+ case 't':
-+ INSERT_OPERAND (RS2, insn, va_arg (args, int));
-+ continue;
-+
-+ case '>':
-+ INSERT_OPERAND (SHAMT, insn, va_arg (args, int));
-+ continue;
-+
-+ case 'j':
-+ case 'u':
-+ case 'q':
-+ gas_assert (ep != NULL);
-+ r = va_arg (args, int);
-+ continue;
-+
-+ case '\0':
-+ break;
-+ case ',':
-+ continue;
-+ default:
-+ as_fatal (_("internal error: invalid macro"));
-+ }
-+ break;
-+ }
-+ va_end (args);
-+ gas_assert (r == BFD_RELOC_UNUSED ? ep == NULL : ep != NULL);
-+
-+ append_insn (&insn, ep, r);
-+}
-+
-+/* Sign-extend 32-bit mode constants that have bit 31 set and all higher bits
-+ unset. */
-+static void
-+normalize_constant_expr (expressionS *ex)
-+{
-+ if (xlen > 32)
-+ return;
-+ if ((ex->X_op == O_constant || ex->X_op == O_symbol)
-+ && IS_ZEXT_32BIT_NUM (ex->X_add_number))
-+ ex->X_add_number = (((ex->X_add_number & 0xffffffff) ^ 0x80000000)
-+ - 0x80000000);
-+}
-+
-+/* Warn if an expression is not a constant. */
-+
-+static void
-+check_absolute_expr (struct riscv_cl_insn *ip, expressionS *ex)
-+{
-+ if (ex->X_op == O_big)
-+ as_bad (_("unsupported large constant"));
-+ else if (ex->X_op != O_constant)
-+ as_bad (_("Instruction %s requires absolute expression"),
-+ ip->insn_mo->name);
-+ normalize_constant_expr (ex);
-+}
-+
-+static symbolS *
-+make_internal_label (void)
-+{
-+ return (symbolS *) local_symbol_make (FAKE_LABEL_NAME, now_seg,
-+ (valueT) frag_now_fix(), frag_now);
-+}
-+
-+/* Load an entry from the GOT. */
-+static void
-+pcrel_access (int destreg, int tempreg, expressionS *ep,
-+ const char *lo_insn, const char *lo_pattern,
-+ bfd_reloc_code_real_type hi_reloc,
-+ bfd_reloc_code_real_type lo_reloc)
-+{
-+ expressionS ep2;
-+ ep2.X_op = O_symbol;
-+ ep2.X_add_symbol = make_internal_label ();
-+ ep2.X_add_number = 0;
-+
-+ macro_build (ep, "auipc", "d,u", tempreg, hi_reloc);
-+ macro_build (&ep2, lo_insn, lo_pattern, destreg, tempreg, lo_reloc);
-+}
-+
-+static void
-+pcrel_load (int destreg, int tempreg, expressionS *ep, const char *lo_insn,
-+ bfd_reloc_code_real_type hi_reloc,
-+ bfd_reloc_code_real_type lo_reloc)
-+{
-+ pcrel_access (destreg, tempreg, ep, lo_insn, "d,s,j", hi_reloc, lo_reloc);
-+}
-+
-+static void
-+pcrel_store (int srcreg, int tempreg, expressionS *ep, const char *lo_insn,
-+ bfd_reloc_code_real_type hi_reloc,
-+ bfd_reloc_code_real_type lo_reloc)
-+{
-+ pcrel_access (srcreg, tempreg, ep, lo_insn, "t,s,q", hi_reloc, lo_reloc);
-+}
-+
-+/* PC-relative function call using AUIPC/JALR, relaxed to JAL. */
-+static void
-+riscv_call (int destreg, int tempreg, expressionS *ep,
-+ bfd_reloc_code_real_type reloc)
-+{
-+ macro_build (ep, "auipc", "d,u", tempreg, reloc);
-+ macro_build (NULL, "jalr", "d,s", destreg, tempreg);
-+}
-+
-+/* Load an integer constant into a register. */
-+
-+static void
-+load_const (int reg, expressionS *ep)
-+{
-+ int shift = RISCV_IMM_BITS;
-+ expressionS upper = *ep, lower = *ep;
-+ lower.X_add_number = (int32_t) ep->X_add_number << (32-shift) >> (32-shift);
-+ upper.X_add_number -= lower.X_add_number;
-+
-+ if (ep->X_op != O_constant)
-+ {
-+ as_bad (_("unsupported large constant"));
-+ return;
-+ }
-+
-+ if (xlen > 32 && !IS_SEXT_32BIT_NUM(ep->X_add_number))
-+ {
-+ /* Reduce to a signed 32-bit constant using SLLI and ADDI, which
-+ is not optimal but also not so bad. */
-+ while (((upper.X_add_number >> shift) & 1) == 0)
-+ shift++;
-+
-+ upper.X_add_number = (int64_t) upper.X_add_number >> shift;
-+ load_const(reg, &upper);
-+
-+ macro_build (NULL, "slli", "d,s,>", reg, reg, shift);
-+ if (lower.X_add_number != 0)
-+ macro_build (&lower, "addi", "d,s,j", reg, reg, BFD_RELOC_RISCV_LO12_I);
-+ }
-+ else
-+ {
-+ int hi_reg = 0;
-+
-+ if (upper.X_add_number != 0)
-+ {
-+ macro_build (ep, "lui", "d,u", reg, BFD_RELOC_RISCV_HI20);
-+ hi_reg = reg;
-+ }
-+
-+ if (lower.X_add_number != 0 || hi_reg == 0)
-+ macro_build (ep, ADD32_INSN, "d,s,j", reg, hi_reg,
-+ BFD_RELOC_RISCV_LO12_I);
-+ }
-+}
-+
-+/* Expand RISC-V assembly macros into one or more instructions. */
-+static void
-+macro (struct riscv_cl_insn *ip, expressionS *imm_expr,
-+ bfd_reloc_code_real_type *imm_reloc)
-+{
-+ int rd = (ip->insn_opcode >> OP_SH_RD) & OP_MASK_RD;
-+ int rs1 = (ip->insn_opcode >> OP_SH_RS1) & OP_MASK_RS1;
-+ int rs2 = (ip->insn_opcode >> OP_SH_RS2) & OP_MASK_RS2;
-+ int mask = ip->insn_mo->mask;
-+
-+ switch (mask)
-+ {
-+ case M_LI:
-+ load_const (rd, imm_expr);
-+ break;
-+
-+ case M_LA:
-+ case M_LLA:
-+ /* Load the address of a symbol into a register. */
-+ if (!IS_SEXT_32BIT_NUM (imm_expr->X_add_number))
-+ as_bad(_("offset too large"));
-+
-+ if (imm_expr->X_op == O_constant)
-+ load_const (rd, imm_expr);
-+ else if (riscv_opts.pic && mask == M_LA) /* Global PIC symbol */
-+ pcrel_load (rd, rd, imm_expr, LOAD_ADDRESS_INSN,
-+ BFD_RELOC_RISCV_GOT_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ else /* Local PIC symbol, or any non-PIC symbol */
-+ pcrel_load (rd, rd, imm_expr, "addi",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LA_TLS_GD:
-+ pcrel_load (rd, rd, imm_expr, "addi",
-+ BFD_RELOC_RISCV_TLS_GD_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LA_TLS_IE:
-+ pcrel_load (rd, rd, imm_expr, LOAD_ADDRESS_INSN,
-+ BFD_RELOC_RISCV_TLS_GOT_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LB:
-+ pcrel_load (rd, rd, imm_expr, "lb",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LBU:
-+ pcrel_load (rd, rd, imm_expr, "lbu",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LH:
-+ pcrel_load (rd, rd, imm_expr, "lh",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LHU:
-+ pcrel_load (rd, rd, imm_expr, "lhu",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LW:
-+ pcrel_load (rd, rd, imm_expr, "lw",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LWU:
-+ pcrel_load (rd, rd, imm_expr, "lwu",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_LD:
-+ pcrel_load (rd, rd, imm_expr, "ld",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_FLW:
-+ pcrel_load (rd, rs1, imm_expr, "flw",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_FLD:
-+ pcrel_load (rd, rs1, imm_expr, "fld",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
-+ break;
-+
-+ case M_SB:
-+ pcrel_store (rs2, rs1, imm_expr, "sb",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+ break;
-+
-+ case M_SH:
-+ pcrel_store (rs2, rs1, imm_expr, "sh",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+ break;
-+
-+ case M_SW:
-+ pcrel_store (rs2, rs1, imm_expr, "sw",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+ break;
-+
-+ case M_SD:
-+ pcrel_store (rs2, rs1, imm_expr, "sd",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+ break;
-+
-+ case M_FSW:
-+ pcrel_store (rs2, rs1, imm_expr, "fsw",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+ break;
-+
-+ case M_FSD:
-+ pcrel_store (rs2, rs1, imm_expr, "fsd",
-+ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
-+ break;
-+
-+ case M_CALL:
-+ riscv_call (rd, rs1, imm_expr, *imm_reloc);
-+ break;
-+
-+ default:
-+ as_bad (_("Macro %s not implemented"), ip->insn_mo->name);
-+ break;
-+ }
-+}
-+
-+static const struct percent_op_match percent_op_utype[] =
-+{
-+ {"%tprel_hi", BFD_RELOC_RISCV_TPREL_HI20},
-+ {"%pcrel_hi", BFD_RELOC_RISCV_PCREL_HI20},
-+ {"%tls_ie_pcrel_hi", BFD_RELOC_RISCV_TLS_GOT_HI20},
-+ {"%tls_gd_pcrel_hi", BFD_RELOC_RISCV_TLS_GD_HI20},
-+ {"%hi", BFD_RELOC_RISCV_HI20},
-+ {0, 0}
-+};
-+
-+static const struct percent_op_match percent_op_itype[] =
-+{
-+ {"%lo", BFD_RELOC_RISCV_LO12_I},
-+ {"%tprel_lo", BFD_RELOC_RISCV_TPREL_LO12_I},
-+ {"%pcrel_lo", BFD_RELOC_RISCV_PCREL_LO12_I},
-+ {0, 0}
-+};
-+
-+static const struct percent_op_match percent_op_stype[] =
-+{
-+ {"%lo", BFD_RELOC_RISCV_LO12_S},
-+ {"%tprel_lo", BFD_RELOC_RISCV_TPREL_LO12_S},
-+ {"%pcrel_lo", BFD_RELOC_RISCV_PCREL_LO12_S},
-+ {0, 0}
-+};
-+
-+static const struct percent_op_match percent_op_rtype[] =
-+{
-+ {"%tprel_add", BFD_RELOC_RISCV_TPREL_ADD},
-+ {0, 0}
-+};
-+
-+/* Return true if *STR points to a relocation operator. When returning true,
-+ move *STR over the operator and store its relocation code in *RELOC.
-+ Leave both *STR and *RELOC alone when returning false. */
-+
-+static bfd_boolean
-+parse_relocation (char **str, bfd_reloc_code_real_type *reloc,
-+ const struct percent_op_match *percent_op)
-+{
-+ for ( ; percent_op->str; percent_op++)
-+ if (strncasecmp (*str, percent_op->str, strlen (percent_op->str)) == 0)
-+ {
-+ int len = strlen (percent_op->str);
-+
-+ if (!ISSPACE ((*str)[len]) && (*str)[len] != '(')
-+ continue;
-+
-+ *str += strlen (percent_op->str);
-+ *reloc = percent_op->reloc;
-+
-+ /* Check whether the output BFD supports this relocation.
-+ If not, issue an error and fall back on something safe. */
-+ if (!bfd_reloc_type_lookup (stdoutput, percent_op->reloc))
-+ {
-+ as_bad ("relocation %s isn't supported by the current ABI",
-+ percent_op->str);
-+ *reloc = BFD_RELOC_UNUSED;
-+ }
-+ return TRUE;
-+ }
-+ return FALSE;
-+}
-+
-+static void
-+my_getExpression (expressionS *ep, char *str)
-+{
-+ char *save_in;
-+
-+ save_in = input_line_pointer;
-+ input_line_pointer = str;
-+ expression (ep);
-+ expr_end = input_line_pointer;
-+ input_line_pointer = save_in;
-+}
-+
-+/* Parse string STR as a 16-bit relocatable operand. Store the
-+ expression in *EP and the relocation, if any, in RELOC.
-+ Return the number of relocation operators used (0 or 1).
-+
-+ On exit, EXPR_END points to the first character after the expression. */
-+
-+static size_t
-+my_getSmallExpression (expressionS *ep, bfd_reloc_code_real_type *reloc,
-+ char *str, const struct percent_op_match *percent_op)
-+{
-+ size_t reloc_index;
-+ unsigned crux_depth, str_depth, regno;
-+ char *crux;
-+
-+ /* First, check for integer registers. */
-+ if (reg_lookup (&str, RCLASS_GPR, ®no))
-+ {
-+ ep->X_op = O_register;
-+ ep->X_add_number = regno;
-+ return 0;
-+ }
-+
-+ /* Search for the start of the main expression.
-+ End the loop with CRUX pointing to the start
-+ of the main expression and with CRUX_DEPTH containing the number
-+ of open brackets at that point. */
-+ reloc_index = -1;
-+ str_depth = 0;
-+ do
-+ {
-+ reloc_index++;
-+ crux = str;
-+ crux_depth = str_depth;
-+
-+ /* Skip over whitespace and brackets, keeping count of the number
-+ of brackets. */
-+ while (*str == ' ' || *str == '\t' || *str == '(')
-+ if (*str++ == '(')
-+ str_depth++;
-+ }
-+ while (*str == '%'
-+ && reloc_index < 1
-+ && parse_relocation (&str, reloc, percent_op));
-+
-+ my_getExpression (ep, crux);
-+ str = expr_end;
-+
-+ /* Match every open bracket. */
-+ while (crux_depth > 0 && (*str == ')' || *str == ' ' || *str == '\t'))
-+ if (*str++ == ')')
-+ crux_depth--;
-+
-+ if (crux_depth > 0)
-+ as_bad ("unclosed '('");
-+
-+ expr_end = str;
-+
-+ return reloc_index;
-+}
-+
-+/* This routine assembles an instruction into its binary format. As a
-+ side effect, it sets the global variable imm_reloc to the type of
-+ relocation to do if one of the operands is an address expression. */
-+
-+static const char *
-+riscv_ip (char *str, struct riscv_cl_insn *ip, expressionS *imm_expr,
-+ bfd_reloc_code_real_type *imm_reloc)
-+{
-+ char *s;
-+ const char *args;
-+ char c = 0;
-+ struct riscv_opcode *insn, *end = &riscv_opcodes[NUMOPCODES];
-+ char *argsStart;
-+ unsigned int regno;
-+ char save_c = 0;
-+ int argnum;
-+ const struct percent_op_match *p;
-+ const char *error = "unrecognized opcode";
-+
-+ /* Parse the name of the instruction. Terminate the string if whitespace
-+ is found so that hash_find only sees the name part of the string. */
-+ for (s = str; *s != '\0'; ++s)
-+ if (ISSPACE (*s))
-+ {
-+ save_c = *s;
-+ *s++ = '\0';
-+ break;
-+ }
-+
-+ insn = (struct riscv_opcode *) hash_find (op_hash, str);
-+
-+ argsStart = s;
-+ for ( ; insn && insn < end && strcmp (insn->name, str) == 0; insn++)
-+ {
-+ if (!riscv_subset_supports (insn->subset))
-+ continue;
-+
-+ create_insn (ip, insn);
-+ argnum = 1;
-+
-+ imm_expr->X_op = O_absent;
-+ *imm_reloc = BFD_RELOC_UNUSED;
-+ p = percent_op_itype;
-+
-+ for (args = insn->args;; ++args)
-+ {
-+ s += strspn (s, " \t");
-+ switch (*args)
-+ {
-+ case '\0': /* end of args */
-+ if (insn->pinfo != INSN_MACRO)
-+ {
-+ if (!insn->match_func (insn, ip->insn_opcode))
-+ break;
-+ if (riscv_insn_length (insn->match) == 2 && !riscv_opts.rvc)
-+ break;
-+ }
-+ if (*s != '\0')
-+ break;
-+ /* Successful assembly. */
-+ error = NULL;
-+ goto out;
-+ /* Xcustom */
-+ case '^':
-+ {
-+ unsigned long max = OP_MASK_RD;
-+ my_getExpression (imm_expr, s);
-+ check_absolute_expr (ip, imm_expr);
-+ switch (*++args)
-+ {
-+ case 'j':
-+ max = OP_MASK_CUSTOM_IMM;
-+ INSERT_OPERAND (CUSTOM_IMM, *ip, imm_expr->X_add_number);
-+ break;
-+ case 'd':
-+ INSERT_OPERAND (RD, *ip, imm_expr->X_add_number);
-+ break;
-+ case 's':
-+ INSERT_OPERAND (RS1, *ip, imm_expr->X_add_number);
-+ break;
-+ case 't':
-+ INSERT_OPERAND (RS2, *ip, imm_expr->X_add_number);
-+ break;
-+ }
-+ imm_expr->X_op = O_absent;
-+ s = expr_end;
-+ if ((unsigned long) imm_expr->X_add_number > max)
-+ as_warn ("Bad custom immediate (%lu), must be at most %lu",
-+ (unsigned long)imm_expr->X_add_number, max);
-+ continue;
-+ }
-+
-+ case 'C': /* RVC */
-+ switch (*++args)
-+ {
-+ case 's': /* RS1 x8-x15 */
-+ if (!reg_lookup (&s, RCLASS_GPR, ®no)
-+ || !(regno >= 8 && regno <= 15))
-+ break;
-+ INSERT_OPERAND (CRS1S, *ip, regno % 8);
-+ continue;
-+ case 'w': /* RS1 x8-x15, constrained to equal RD x8-x15 */
-+ if (!reg_lookup (&s, RCLASS_GPR, ®no)
-+ || EXTRACT_OPERAND (CRS1S, ip->insn_opcode) + 8 != regno)
-+ break;
-+ continue;
-+ case 't': /* RS2 x8-x15 */
-+ if (!reg_lookup (&s, RCLASS_GPR, ®no)
-+ || !(regno >= 8 && regno <= 15))
-+ break;
-+ INSERT_OPERAND (CRS2S, *ip, regno % 8);
-+ continue;
-+ case 'x': /* RS2 x8-x15, constrained to equal RD x8-x15 */
-+ if (!reg_lookup (&s, RCLASS_GPR, ®no)
-+ || EXTRACT_OPERAND (CRS2S, ip->insn_opcode) + 8 != regno)
-+ break;
-+ continue;
-+ case 'U': /* RS1, constrained to equal RD */
-+ if (!reg_lookup (&s, RCLASS_GPR, ®no)
-+ || EXTRACT_OPERAND (RD, ip->insn_opcode) != regno)
-+ break;
-+ continue;
-+ case 'V': /* RS2 */
-+ if (!reg_lookup (&s, RCLASS_GPR, ®no))
-+ break;
-+ INSERT_OPERAND (CRS2, *ip, regno);
-+ continue;
-+ case 'c': /* RS1, constrained to equal sp */
-+ if (!reg_lookup (&s, RCLASS_GPR, ®no)
-+ || regno != X_SP)
-+ break;
-+ continue;
-+ case '>':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || imm_expr->X_add_number <= 0
-+ || imm_expr->X_add_number >= 64)
-+ break;
-+ ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
-+rvc_imm_done:
-+ s = expr_end;
-+ imm_expr->X_op = O_absent;
-+ continue;
-+ case '<':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_IMM (imm_expr->X_add_number)
-+ || imm_expr->X_add_number <= 0
-+ || imm_expr->X_add_number >= 32)
-+ break;
-+ ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'i':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || imm_expr->X_add_number == 0
-+ || !VALID_RVC_SIMM3 (imm_expr->X_add_number))
-+ break;
-+ ip->insn_opcode |= ENCODE_RVC_SIMM3 (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'j':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || imm_expr->X_add_number == 0
-+ || !VALID_RVC_IMM (imm_expr->X_add_number))
-+ break;
-+ ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'k':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_LW_IMM (imm_expr->X_add_number))
-+ break;
-+ ip->insn_opcode |= ENCODE_RVC_LW_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'l':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_LD_IMM (imm_expr->X_add_number))
-+ break;
-+ ip->insn_opcode |= ENCODE_RVC_LD_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'm':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_LWSP_IMM (imm_expr->X_add_number))
-+ break;
-+ ip->insn_opcode |=
-+ ENCODE_RVC_LWSP_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'n':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_LDSP_IMM (imm_expr->X_add_number))
-+ break;
-+ ip->insn_opcode |=
-+ ENCODE_RVC_LDSP_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'K':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_ADDI4SPN_IMM (imm_expr->X_add_number)
-+ || imm_expr->X_add_number == 0)
-+ break;
-+ ip->insn_opcode |=
-+ ENCODE_RVC_ADDI4SPN_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'L':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_ADDI16SP_IMM (imm_expr->X_add_number)
-+ || imm_expr->X_add_number == 0)
-+ break;
-+ ip->insn_opcode |=
-+ ENCODE_RVC_ADDI16SP_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'M':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_SWSP_IMM (imm_expr->X_add_number))
-+ break;
-+ ip->insn_opcode |=
-+ ENCODE_RVC_SWSP_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'N':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || imm_expr->X_op != O_constant
-+ || !VALID_RVC_SDSP_IMM (imm_expr->X_add_number))
-+ break;
-+ ip->insn_opcode |=
-+ ENCODE_RVC_SDSP_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'u':
-+ p = percent_op_utype;
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p))
-+ break;
-+rvc_lui:
-+ if (imm_expr->X_op != O_constant
-+ || imm_expr->X_add_number <= 0
-+ || imm_expr->X_add_number >= RISCV_BIGIMM_REACH
-+ || (imm_expr->X_add_number >= RISCV_RVC_IMM_REACH / 2
-+ && imm_expr->X_add_number <
-+ RISCV_BIGIMM_REACH - RISCV_RVC_IMM_REACH / 2))
-+ break;
-+ ip->insn_opcode |= ENCODE_RVC_IMM (imm_expr->X_add_number);
-+ goto rvc_imm_done;
-+ case 'v':
-+ if (my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ || (imm_expr->X_add_number & (RISCV_IMM_REACH - 1))
-+ || (int32_t)imm_expr->X_add_number
-+ != imm_expr->X_add_number)
-+ break;
-+ imm_expr->X_add_number =
-+ ((uint32_t) imm_expr->X_add_number) >> RISCV_IMM_BITS;
-+ goto rvc_lui;
-+ case 'p':
-+ goto branch;
-+ case 'a':
-+ goto jump;
-+ case 'D': /* floating-point RS2 x8-x15 */
-+ if (!reg_lookup (&s, RCLASS_FPR, ®no)
-+ || !(regno >= 8 && regno <= 15))
-+ break;
-+ INSERT_OPERAND (CRS2S, *ip, regno % 8);
-+ continue;
-+ case 'T': /* floating-point RS2 */
-+ if (!reg_lookup (&s, RCLASS_FPR, ®no))
-+ break;
-+ INSERT_OPERAND (CRS2, *ip, regno);
-+ continue;
-+ default:
-+ as_bad (_("bad RVC field specifier 'C%c'\n"), *args);
-+ }
-+ break;
-+
-+ case ',':
-+ ++argnum;
-+ if (*s++ == *args)
-+ continue;
-+ s--;
-+ break;
-+
-+ case '(':
-+ case ')':
-+ case '[':
-+ case ']':
-+ if (*s++ == *args)
-+ continue;
-+ break;
-+
-+ case '<': /* shift amount, 0 - 31 */
-+ my_getExpression (imm_expr, s);
-+ check_absolute_expr (ip, imm_expr);
-+ if ((unsigned long) imm_expr->X_add_number > 31)
-+ as_warn (_("Improper shift amount (%lu)"),
-+ (unsigned long) imm_expr->X_add_number);
-+ INSERT_OPERAND (SHAMTW, *ip, imm_expr->X_add_number);
-+ imm_expr->X_op = O_absent;
-+ s = expr_end;
-+ continue;
-+
-+ case '>': /* shift amount, 0 - (XLEN-1) */
-+ my_getExpression (imm_expr, s);
-+ check_absolute_expr (ip, imm_expr);
-+ if ((unsigned long) imm_expr->X_add_number >= xlen)
-+ as_warn (_("Improper shift amount (%lu)"),
-+ (unsigned long) imm_expr->X_add_number);
-+ INSERT_OPERAND (SHAMT, *ip, imm_expr->X_add_number);
-+ imm_expr->X_op = O_absent;
-+ s = expr_end;
-+ continue;
-+
-+ case 'Z': /* CSRRxI immediate */
-+ my_getExpression (imm_expr, s);
-+ check_absolute_expr (ip, imm_expr);
-+ if ((unsigned long) imm_expr->X_add_number > 31)
-+ as_warn (_("Improper CSRxI immediate (%lu)"),
-+ (unsigned long) imm_expr->X_add_number);
-+ INSERT_OPERAND (RS1, *ip, imm_expr->X_add_number);
-+ imm_expr->X_op = O_absent;
-+ s = expr_end;
-+ continue;
-+
-+ case 'E': /* Control register. */
-+ if (reg_lookup (&s, RCLASS_CSR, ®no))
-+ INSERT_OPERAND (CSR, *ip, regno);
-+ else
-+ {
-+ my_getExpression (imm_expr, s);
-+ check_absolute_expr (ip, imm_expr);
-+ if ((unsigned long) imm_expr->X_add_number > 0xfff)
-+ as_warn(_("Improper CSR address (%lu)"),
-+ (unsigned long) imm_expr->X_add_number);
-+ INSERT_OPERAND (CSR, *ip, imm_expr->X_add_number);
-+ imm_expr->X_op = O_absent;
-+ s = expr_end;
-+ }
-+ continue;
-+
-+ case 'm': /* rounding mode */
-+ if (arg_lookup (&s, riscv_rm, ARRAY_SIZE (riscv_rm), ®no))
-+ {
-+ INSERT_OPERAND (RM, *ip, regno);
-+ continue;
-+ }
-+ break;
-+
-+ case 'P':
-+ case 'Q': /* fence predecessor/successor */
-+ if (arg_lookup (&s, riscv_pred_succ, ARRAY_SIZE (riscv_pred_succ),
-+ ®no))
-+ {
-+ if (*args == 'P')
-+ INSERT_OPERAND (PRED, *ip, regno);
-+ else
-+ INSERT_OPERAND (SUCC, *ip, regno);
-+ continue;
-+ }
-+ break;
-+
-+ case 'd': /* destination register */
-+ case 's': /* source register */
-+ case 't': /* target register */
-+ if (reg_lookup (&s, RCLASS_GPR, ®no))
-+ {
-+ c = *args;
-+ if (*s == ' ')
-+ ++s;
-+
-+ /* Now that we have assembled one operand, we use the args
-+ string to figure out where it goes in the instruction. */
-+ switch (c)
-+ {
-+ case 's':
-+ INSERT_OPERAND (RS1, *ip, regno);
-+ break;
-+ case 'd':
-+ INSERT_OPERAND (RD, *ip, regno);
-+ break;
-+ case 't':
-+ INSERT_OPERAND (RS2, *ip, regno);
-+ break;
-+ }
-+ continue;
-+ }
-+ break;
-+
-+ case 'D': /* floating point rd */
-+ case 'S': /* floating point rs1 */
-+ case 'T': /* floating point rs2 */
-+ case 'U': /* floating point rs1 and rs2 */
-+ case 'R': /* floating point rs3 */
-+ if (reg_lookup (&s, RCLASS_FPR, ®no))
-+ {
-+ c = *args;
-+ if (*s == ' ')
-+ ++s;
-+ switch (c)
-+ {
-+ case 'D':
-+ INSERT_OPERAND (RD, *ip, regno);
-+ break;
-+ case 'S':
-+ INSERT_OPERAND (RS1, *ip, regno);
-+ break;
-+ case 'U':
-+ INSERT_OPERAND (RS1, *ip, regno);
-+ /* fallthru */
-+ case 'T':
-+ INSERT_OPERAND (RS2, *ip, regno);
-+ break;
-+ case 'R':
-+ INSERT_OPERAND (RS3, *ip, regno);
-+ break;
-+ }
-+ continue;
-+ }
-+
-+ break;
-+
-+ case 'I':
-+ my_getExpression (imm_expr, s);
-+ if (imm_expr->X_op != O_big
-+ && imm_expr->X_op != O_constant)
-+ break;
-+ normalize_constant_expr (imm_expr);
-+ s = expr_end;
-+ continue;
-+
-+ case 'A':
-+ my_getExpression (imm_expr, s);
-+ normalize_constant_expr (imm_expr);
-+ /* The 'A' format specifier must be a symbol. */
-+ if (imm_expr->X_op != O_symbol)
-+ break;
-+ *imm_reloc = BFD_RELOC_32;
-+ s = expr_end;
-+ continue;
-+
-+ case 'j': /* sign-extended immediate */
-+ *imm_reloc = BFD_RELOC_RISCV_LO12_I;
-+ p = percent_op_itype;
-+ goto alu_op;
-+ case 'q': /* store displacement */
-+ p = percent_op_stype;
-+ *imm_reloc = BFD_RELOC_RISCV_LO12_S;
-+ goto load_store;
-+ case 'o': /* load displacement */
-+ p = percent_op_itype;
-+ *imm_reloc = BFD_RELOC_RISCV_LO12_I;
-+ goto load_store;
-+ case '0': /* AMO "displacement," which must be zero */
-+ p = percent_op_rtype;
-+ *imm_reloc = BFD_RELOC_UNUSED;
-+load_store:
-+ /* Check whether there is only a single bracketed expression
-+ left. If so, it must be the base register and the
-+ constant must be zero. */
-+ imm_expr->X_op = O_constant;
-+ imm_expr->X_add_number = 0;
-+ if (*s == '(' && strchr (s + 1, '(') == 0)
-+ continue;
-+alu_op:
-+ /* If this value won't fit into a 16 bit offset, then go
-+ find a macro that will generate the 32 bit offset
-+ code pattern. */
-+ if (!my_getSmallExpression (imm_expr, imm_reloc, s, p))
-+ {
-+ normalize_constant_expr (imm_expr);
-+ if (imm_expr->X_op != O_constant
-+ || (*args == '0' && imm_expr->X_add_number != 0)
-+ || imm_expr->X_add_number >= (signed)RISCV_IMM_REACH/2
-+ || imm_expr->X_add_number < -(signed)RISCV_IMM_REACH/2)
-+ break;
-+ }
-+
-+ s = expr_end;
-+ continue;
-+
-+ case 'p': /* pc relative offset */
-+branch:
-+ *imm_reloc = BFD_RELOC_12_PCREL;
-+ my_getExpression (imm_expr, s);
-+ s = expr_end;
-+ continue;
-+
-+ case 'u': /* upper 20 bits */
-+ p = percent_op_utype;
-+ if (!my_getSmallExpression (imm_expr, imm_reloc, s, p)
-+ && imm_expr->X_op == O_constant)
-+ {
-+ if (imm_expr->X_add_number < 0
-+ || imm_expr->X_add_number >= (signed)RISCV_BIGIMM_REACH)
-+ as_bad (_("lui expression not in range 0..1048575"));
-+
-+ *imm_reloc = BFD_RELOC_RISCV_HI20;
-+ imm_expr->X_add_number <<= RISCV_IMM_BITS;
-+ }
-+ s = expr_end;
-+ continue;
-+
-+ case 'a': /* 26 bit address */
-+jump:
-+ my_getExpression (imm_expr, s);
-+ s = expr_end;
-+ *imm_reloc = BFD_RELOC_RISCV_JMP;
-+ continue;
-+
-+ case 'c':
-+ my_getExpression (imm_expr, s);
-+ s = expr_end;
-+ *imm_reloc = BFD_RELOC_RISCV_CALL;
-+ if (*s == '@')
-+ *imm_reloc = BFD_RELOC_RISCV_CALL_PLT, s++;
-+ continue;
-+
-+ default:
-+ as_fatal (_("internal error: bad argument type %c"), *args);
-+ }
-+ break;
-+ }
-+ s = argsStart;
-+ error = _("illegal operands");
-+ }
-+
-+out:
-+ /* Restore the character we might have clobbered above. */
-+ if (save_c)
-+ *(argsStart - 1) = save_c;
-+
-+ return error;
-+}
-+
-+void
-+md_assemble (char *str)
-+{
-+ struct riscv_cl_insn insn;
-+ expressionS imm_expr;
-+ bfd_reloc_code_real_type imm_reloc = BFD_RELOC_UNUSED;
-+
-+ const char *error = riscv_ip (str, &insn, &imm_expr, &imm_reloc);
-+
-+ if (error)
-+ {
-+ as_bad ("%s `%s'", error, str);
-+ return;
-+ }
-+
-+ if (insn.insn_mo->pinfo == INSN_MACRO)
-+ macro (&insn, &imm_expr, &imm_reloc);
-+ else
-+ append_insn (&insn, &imm_expr, imm_reloc);
-+}
-+
-+char *
-+md_atof (int type, char *litP, int *sizeP)
-+{
-+ return ieee_md_atof (type, litP, sizeP, TARGET_BYTES_BIG_ENDIAN);
-+}
-+
-+void
-+md_number_to_chars (char *buf, valueT val, int n)
-+{
-+ number_to_chars_littleendian (buf, val, n);
-+}
-+
-+const char *md_shortopts = "O::g::G:";
-+
-+enum options
-+ {
-+ OPTION_M32 = OPTION_MD_BASE,
-+ OPTION_M64,
-+ OPTION_MARCH,
-+ OPTION_PIC,
-+ OPTION_NO_PIC,
-+ OPTION_MSOFT_FLOAT,
-+ OPTION_MHARD_FLOAT,
-+ OPTION_MRVC,
-+ OPTION_MNO_RVC,
-+ OPTION_END_OF_ENUM
-+ };
-+
-+struct option md_longopts[] =
-+{
-+ {"m32", no_argument, NULL, OPTION_M32},
-+ {"m64", no_argument, NULL, OPTION_M64},
-+ {"march", required_argument, NULL, OPTION_MARCH},
-+ {"fPIC", no_argument, NULL, OPTION_PIC},
-+ {"fpic", no_argument, NULL, OPTION_PIC},
-+ {"fno-pic", no_argument, NULL, OPTION_NO_PIC},
-+ {"mrvc", no_argument, NULL, OPTION_MRVC},
-+ {"mno-rvc", no_argument, NULL, OPTION_MNO_RVC},
-+ {"msoft-float", no_argument, NULL, OPTION_MSOFT_FLOAT},
-+ {"mhard-float", no_argument, NULL, OPTION_MHARD_FLOAT},
-+
-+ {NULL, no_argument, NULL, 0}
-+};
-+size_t md_longopts_size = sizeof (md_longopts);
-+
-+enum float_mode {
-+ FLOAT_MODE_DEFAULT,
-+ FLOAT_MODE_SOFT,
-+ FLOAT_MODE_HARD
-+};
-+static enum float_mode marg_float_mode = FLOAT_MODE_DEFAULT;
-+
-+int
-+md_parse_option (int c, char *arg)
-+{
-+ switch (c)
-+ {
-+ case OPTION_MRVC:
-+ riscv_set_rvc (TRUE);
-+ break;
-+
-+ case OPTION_MNO_RVC:
-+ riscv_set_rvc (FALSE);
-+ break;
-+
-+ case OPTION_MSOFT_FLOAT:
-+ marg_float_mode = FLOAT_MODE_SOFT;
-+ break;
-+
-+ case OPTION_MHARD_FLOAT:
-+ marg_float_mode = FLOAT_MODE_HARD;
-+ break;
-+
-+ case OPTION_M32:
-+ xlen = 32;
-+ break;
-+
-+ case OPTION_M64:
-+ xlen = 64;
-+ break;
-+
-+ case OPTION_MARCH:
-+ riscv_set_arch (arg);
-+ break;
-+
-+ case OPTION_NO_PIC:
-+ riscv_opts.pic = FALSE;
-+ break;
-+
-+ case OPTION_PIC:
-+ riscv_opts.pic = TRUE;
-+ break;
-+
-+ default:
-+ return 0;
-+ }
-+
-+ return 1;
-+}
-+
-+void
-+riscv_after_parse_args (void)
-+{
-+ struct riscv_subset *subset;
-+ enum float_mode isa_float_mode, elf_float_mode;
-+
-+ if (riscv_subsets == NULL)
-+ riscv_set_arch ("RVIMAFDXcustom");
-+
-+ if (xlen == 0)
-+ {
-+ if (strcmp (default_arch, "riscv32") == 0)
-+ xlen = 32;
-+ else if (strcmp (default_arch, "riscv64") == 0)
-+ xlen = 64;
-+ else
-+ as_bad ("unknown default architecture `%s'", default_arch);
-+ }
-+
-+ isa_float_mode = FLOAT_MODE_SOFT;
-+ for (subset = riscv_subsets; subset != NULL; subset = subset->next)
-+ {
-+ if (strcasecmp(subset->name, "F") == 0)
-+ isa_float_mode = FLOAT_MODE_HARD;
-+ if (strcasecmp(subset->name, "D") == 0)
-+ isa_float_mode = FLOAT_MODE_HARD;
-+ }
-+
-+ if (marg_float_mode == FLOAT_MODE_HARD && isa_float_mode == FLOAT_MODE_SOFT)
-+ as_bad ("Architecture doesn't allow hardfloat ABI");
-+
-+ elf_float_mode = (marg_float_mode == FLOAT_MODE_DEFAULT) ? isa_float_mode
-+ : marg_float_mode;
-+
-+ switch (elf_float_mode) {
-+ case FLOAT_MODE_DEFAULT:
-+ as_bad("a specific float mode must be specified for an ELF");
-+ break;
-+
-+ case FLOAT_MODE_SOFT:
-+ elf_flags |= EF_RISCV_SOFT_FLOAT;
-+ break;
-+
-+ case FLOAT_MODE_HARD:
-+ elf_flags &= ~EF_RISCV_SOFT_FLOAT;
-+ break;
-+ }
-+}
-+
-+void
-+riscv_init_after_args (void)
-+{
-+ /* initialize opcodes */
-+ bfd_riscv_num_opcodes = bfd_riscv_num_builtin_opcodes;
-+ riscv_opcodes = (struct riscv_opcode *) riscv_builtin_opcodes;
-+}
-+
-+long
-+md_pcrel_from (fixS *fixP)
-+{
-+ return fixP->fx_where + fixP->fx_frag->fr_address;
-+}
-+
-+/* Apply a fixup to the object file. */
-+
-+void
-+md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
-+{
-+ bfd_byte *buf = (bfd_byte *) (fixP->fx_frag->fr_literal + fixP->fx_where);
-+
-+ /* Remember value for tc_gen_reloc. */
-+ fixP->fx_addnumber = *valP;
-+
-+ switch (fixP->fx_r_type)
-+ {
-+ case BFD_RELOC_RISCV_TLS_GOT_HI20:
-+ case BFD_RELOC_RISCV_TLS_GD_HI20:
-+ case BFD_RELOC_RISCV_TLS_DTPREL32:
-+ case BFD_RELOC_RISCV_TLS_DTPREL64:
-+ case BFD_RELOC_RISCV_TPREL_HI20:
-+ case BFD_RELOC_RISCV_TPREL_LO12_I:
-+ case BFD_RELOC_RISCV_TPREL_LO12_S:
-+ case BFD_RELOC_RISCV_TPREL_ADD:
-+ S_SET_THREAD_LOCAL (fixP->fx_addsy);
-+ /* fall through */
-+
-+ case BFD_RELOC_RISCV_GOT_HI20:
-+ case BFD_RELOC_RISCV_PCREL_HI20:
-+ case BFD_RELOC_RISCV_HI20:
-+ case BFD_RELOC_RISCV_LO12_I:
-+ case BFD_RELOC_RISCV_LO12_S:
-+ case BFD_RELOC_RISCV_ADD8:
-+ case BFD_RELOC_RISCV_ADD16:
-+ case BFD_RELOC_RISCV_ADD32:
-+ case BFD_RELOC_RISCV_ADD64:
-+ case BFD_RELOC_RISCV_SUB8:
-+ case BFD_RELOC_RISCV_SUB16:
-+ case BFD_RELOC_RISCV_SUB32:
-+ case BFD_RELOC_RISCV_SUB64:
-+ gas_assert (fixP->fx_addsy != NULL);
-+ /* Nothing needed to do. The value comes from the reloc entry. */
-+ break;
-+
-+ case BFD_RELOC_64:
-+ case BFD_RELOC_32:
-+ case BFD_RELOC_16:
-+ case BFD_RELOC_8:
-+ if (fixP->fx_addsy && fixP->fx_subsy)
-+ {
-+ fixP->fx_next = xmemdup (fixP, sizeof (*fixP), sizeof (*fixP));
-+ fixP->fx_next->fx_addsy = fixP->fx_subsy;
-+ fixP->fx_next->fx_subsy = NULL;
-+ fixP->fx_next->fx_offset = 0;
-+ fixP->fx_subsy = NULL;
-+
-+ if (fixP->fx_r_type == BFD_RELOC_64)
-+ {
-+ fixP->fx_r_type = BFD_RELOC_RISCV_ADD64;
-+ fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB64;
-+ }
-+ else if (fixP->fx_r_type == BFD_RELOC_32)
-+ {
-+ fixP->fx_r_type = BFD_RELOC_RISCV_ADD32;
-+ fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB32;
-+ }
-+ else if (fixP->fx_r_type == BFD_RELOC_16)
-+ {
-+ fixP->fx_r_type = BFD_RELOC_RISCV_ADD16;
-+ fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB16;
-+ }
-+ else
-+ {
-+ fixP->fx_r_type = BFD_RELOC_RISCV_ADD8;
-+ fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB8;
-+ }
-+ }
-+ /* fall through */
-+
-+ case BFD_RELOC_RVA:
-+ /* If we are deleting this reloc entry, we must fill in the
-+ value now. This can happen if we have a .word which is not
-+ resolved when it appears but is later defined. */
-+ if (fixP->fx_addsy == NULL)
-+ {
-+ gas_assert (fixP->fx_size <= sizeof (valueT));
-+ md_number_to_chars ((char *) buf, *valP, fixP->fx_size);
-+ fixP->fx_done = 1;
-+ }
-+ break;
-+
-+ case BFD_RELOC_RISCV_JMP:
-+ if (fixP->fx_addsy)
-+ {
-+ /* Fill in a tentative value to improve objdump readability. */
-+ bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
-+ bfd_vma delta = target - md_pcrel_from (fixP);
-+ bfd_putl32 (bfd_getl32 (buf) | ENCODE_UJTYPE_IMM (delta), buf);
-+ }
-+ break;
-+
-+ case BFD_RELOC_12_PCREL:
-+ if (fixP->fx_addsy)
-+ {
-+ /* Fill in a tentative value to improve objdump readability. */
-+ bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
-+ bfd_vma delta = target - md_pcrel_from (fixP);
-+ bfd_putl32 (bfd_getl32 (buf) | ENCODE_SBTYPE_IMM (delta), buf);
-+ }
-+ break;
-+
-+ case BFD_RELOC_RISCV_RVC_BRANCH:
-+ if (fixP->fx_addsy)
-+ {
-+ /* Fill in a tentative value to improve objdump readability. */
-+ bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
-+ bfd_vma delta = target - md_pcrel_from (fixP);
-+ bfd_putl16 (bfd_getl16 (buf) | ENCODE_RVC_B_IMM (delta), buf);
-+ }
-+ break;
-+
-+ case BFD_RELOC_RISCV_RVC_JUMP:
-+ if (fixP->fx_addsy)
-+ {
-+ /* Fill in a tentative value to improve objdump readability. */
-+ bfd_vma target = S_GET_VALUE (fixP->fx_addsy) + *valP;
-+ bfd_vma delta = target - md_pcrel_from (fixP);
-+ bfd_putl16 (bfd_getl16 (buf) | ENCODE_RVC_J_IMM (delta), buf);
-+ }
-+ break;
-+
-+ case BFD_RELOC_RISCV_PCREL_LO12_S:
-+ case BFD_RELOC_RISCV_PCREL_LO12_I:
-+ case BFD_RELOC_RISCV_CALL:
-+ case BFD_RELOC_RISCV_CALL_PLT:
-+ case BFD_RELOC_RISCV_ALIGN:
-+ break;
-+
-+ default:
-+ /* We ignore generic BFD relocations we don't know about. */
-+ if (bfd_reloc_type_lookup (stdoutput, fixP->fx_r_type) != NULL)
-+ as_fatal (_("internal error: bad relocation #%d"), fixP->fx_r_type);
-+ }
-+}
-+
-+/* This structure is used to hold a stack of .option values. */
-+
-+struct riscv_option_stack
-+{
-+ struct riscv_option_stack *next;
-+ struct riscv_set_options options;
-+};
-+
-+static struct riscv_option_stack *riscv_opts_stack;
-+
-+/* Handle the .option pseudo-op. */
-+
-+static void
-+s_riscv_option (int x ATTRIBUTE_UNUSED)
-+{
-+ char *name = input_line_pointer, ch;
-+
-+ while (!is_end_of_line[(unsigned char) *input_line_pointer])
-+ ++input_line_pointer;
-+ ch = *input_line_pointer;
-+ *input_line_pointer = '\0';
-+
-+ if (strcmp (name, "rvc") == 0)
-+ riscv_set_rvc (TRUE);
-+ else if (strcmp (name, "norvc") == 0)
-+ riscv_set_rvc (FALSE);
-+ else if (strcmp (name, "push") == 0)
-+ {
-+ struct riscv_option_stack *s;
-+
-+ s = (struct riscv_option_stack *) xmalloc (sizeof *s);
-+ s->next = riscv_opts_stack;
-+ s->options = riscv_opts;
-+ riscv_opts_stack = s;
-+ }
-+ else if (strcmp (name, "pop") == 0)
-+ {
-+ struct riscv_option_stack *s;
-+
-+ s = riscv_opts_stack;
-+ if (s == NULL)
-+ as_bad (_(".option pop with no .option push"));
-+ else
-+ {
-+ riscv_opts = s->options;
-+ riscv_opts_stack = s->next;
-+ free (s);
-+ }
-+ }
-+ else
-+ {
-+ as_warn (_("Unrecognized .option directive: %s\n"), name);
-+ }
-+ *input_line_pointer = ch;
-+ demand_empty_rest_of_line ();
-+}
-+
-+/* Handle the .dtprelword and .dtpreldword pseudo-ops. They generate
-+ a 32-bit or 64-bit DTP-relative relocation (BYTES says which) for
-+ use in DWARF debug information. */
-+
-+static void
-+s_dtprel (int bytes)
-+{
-+ expressionS ex;
-+ char *p;
-+
-+ expression (&ex);
-+
-+ if (ex.X_op != O_symbol)
-+ {
-+ as_bad (_("Unsupported use of %s"), (bytes == 8
-+ ? ".dtpreldword"
-+ : ".dtprelword"));
-+ ignore_rest_of_line ();
-+ }
-+
-+ p = frag_more (bytes);
-+ md_number_to_chars (p, 0, bytes);
-+ fix_new_exp (frag_now, p - frag_now->fr_literal, bytes, &ex, FALSE,
-+ (bytes == 8
-+ ? BFD_RELOC_RISCV_TLS_DTPREL64
-+ : BFD_RELOC_RISCV_TLS_DTPREL32));
-+
-+ demand_empty_rest_of_line ();
-+}
-+
-+/* Handle the .bss pseudo-op. */
-+
-+static void
-+s_bss (int ignore ATTRIBUTE_UNUSED)
-+{
-+ subseg_set (bss_section, 0);
-+ demand_empty_rest_of_line ();
-+}
-+
-+/* Align to a given power of two. */
-+
-+static void
-+s_align (int bytes_p)
-+{
-+ int fill_value = 0, fill_value_specified = 0;
-+ int min_text_alignment = riscv_opts.rvc ? 2 : 4;
-+ int alignment = get_absolute_expression(), bytes;
-+
-+ if (bytes_p)
-+ {
-+ bytes = alignment;
-+ if (bytes < 1 || (bytes & (bytes-1)) != 0)
-+ as_bad (_("alignment not a power of 2: %d"), bytes);
-+ for (alignment = 0; bytes > 1; bytes >>= 1)
-+ alignment++;
-+ }
-+
-+ bytes = 1 << alignment;
-+
-+ if (alignment < 0 || alignment > 31)
-+ as_bad (_("unsatisfiable alignment: %d"), alignment);
-+
-+ if (*input_line_pointer == ',')
-+ {
-+ ++input_line_pointer;
-+ fill_value = get_absolute_expression ();
-+ fill_value_specified = 1;
-+ }
-+
-+ if (!fill_value_specified
-+ && subseg_text_p (now_seg)
-+ && bytes > min_text_alignment)
-+ {
-+ /* Emit the worst-case NOP string. The linker will delete any
-+ unnecessary NOPs. This allows us to support code alignment
-+ in spite of linker relaxations. */
-+ bfd_vma i, worst_case_bytes = bytes - min_text_alignment;
-+ char *nops = frag_more (worst_case_bytes);
-+ for (i = 0; i < worst_case_bytes - 2; i += 4)
-+ md_number_to_chars (nops + i, RISCV_NOP, 4);
-+ if (i < worst_case_bytes)
-+ md_number_to_chars (nops + i, RVC_NOP, 2);
-+
-+ expressionS ex;
-+ ex.X_op = O_constant;
-+ ex.X_add_number = worst_case_bytes;
-+
-+ fix_new_exp (frag_now, nops - frag_now->fr_literal, 0,
-+ &ex, FALSE, BFD_RELOC_RISCV_ALIGN);
-+ }
-+ else if (alignment)
-+ frag_align (alignment, fill_value, 0);
-+
-+ record_alignment (now_seg, alignment);
-+
-+ demand_empty_rest_of_line ();
-+}
-+
-+int
-+md_estimate_size_before_relax (fragS *fragp, asection *segtype)
-+{
-+ return (fragp->fr_var = relaxed_branch_length (fragp, segtype, FALSE));
-+}
-+
-+/* Translate internal representation of relocation info to BFD target
-+ format. */
-+
-+arelent *
-+tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
-+{
-+ arelent *reloc = (arelent *) xmalloc (sizeof (arelent));
-+
-+ reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
-+ *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
-+ reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
-+ reloc->addend = fixp->fx_addnumber;
-+
-+ reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
-+ if (reloc->howto == NULL)
-+ {
-+ if ((fixp->fx_r_type == BFD_RELOC_16 || fixp->fx_r_type == BFD_RELOC_8)
-+ && fixp->fx_addsy != NULL && fixp->fx_subsy != NULL)
-+ {
-+ /* We don't have R_RISCV_8/16, but for this special case,
-+ we can use R_RISCV_ADD8/16 with R_RISCV_SUB8/16. */
-+ return reloc;
-+ }
-+
-+ as_bad_where (fixp->fx_file, fixp->fx_line,
-+ _("cannot represent %s relocation in object file"),
-+ bfd_get_reloc_code_name (fixp->fx_r_type));
-+ return NULL;
-+ }
-+
-+ return reloc;
-+}
-+
-+int
-+riscv_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
-+{
-+ if (RELAX_BRANCH_P (fragp->fr_subtype))
-+ {
-+ offsetT old_var = fragp->fr_var;
-+ fragp->fr_var = relaxed_branch_length (fragp, sec, TRUE);
-+ return fragp->fr_var - old_var;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Expand far branches to multi-instruction sequences. */
-+
-+static void
-+md_convert_frag_branch (fragS *fragp)
-+{
-+ bfd_byte *buf;
-+ expressionS exp;
-+ fixS *fixp;
-+ insn_t insn;
-+ int rs1, reloc;
-+
-+ buf = (bfd_byte *)fragp->fr_literal + fragp->fr_fix;
-+
-+ exp.X_op = O_symbol;
-+ exp.X_add_symbol = fragp->fr_symbol;
-+ exp.X_add_number = fragp->fr_offset;
-+
-+ gas_assert (fragp->fr_var == RELAX_BRANCH_LENGTH (fragp->fr_subtype));
-+
-+ if (RELAX_BRANCH_RVC (fragp->fr_subtype))
-+ {
-+ switch (RELAX_BRANCH_LENGTH (frag