summaryrefslogtreecommitdiffstats
path: root/patches
diff options
context:
space:
mode:
authorStephan Linz <linz@li-pro.net>2011-04-30 10:07:26 +0200
committerMichael Olbrich <m.olbrich@pengutronix.de>2011-05-03 21:21:36 +0200
commit5415182a3a956cebe5c600e0c327fad16e7c5ff7 (patch)
tree1e6da8a2ae6a86b8f21c84f415f9a2e7cd51312a /patches
parent76f4fbad4fc8aee215d65c8401b326af4f36c37f (diff)
downloadOSELAS.Toolchain-5415182a3a956cebe5c600e0c327fad16e7c5ff7.tar.gz
OSELAS.Toolchain-5415182a3a956cebe5c600e0c327fad16e7c5ff7.tar.xz
add Atmel AVR patches for 4.4.3
Signed-off-by: Stephan Linz <linz@li-pro.net> Signed-off-by: Michael Olbrich <m.olbrich@pengutronix.de>
Diffstat (limited to 'patches')
-rw-r--r--patches/gcc-4.4.3/atmel/0001-Add-support-for-fixed-point-operations.patch2851
-rw-r--r--patches/gcc-4.4.3/atmel/0002-Fix-incomplete-check-in-RTL-for-pm-annotation.patch89
-rw-r--r--patches/gcc-4.4.3/atmel/0003-Fix-handling-of-empty-.data-or-.bss-section.patch216
-rw-r--r--patches/gcc-4.4.3/atmel/0004-Add-support-for-XMEGA-devices.patch897
-rw-r--r--patches/gcc-4.4.3/atmel/0005-Add-remove-support-for-devices.patch662
-rw-r--r--patches/gcc-4.4.3/atmel/0006-Add-support-for-more-XMEGA-devices.patch145
-rw-r--r--patches/gcc-4.4.3/atmel/0007-Add-support-for-devices-with-16-gp-registers.patch2520
-rw-r--r--patches/gcc-4.4.3/atmel/0008-Adds-OS_main-attribute-feature.patch78
-rw-r--r--patches/gcc-4.4.3/atmel/0009-Adds-AVR-builtin-functions.patch611
-rw-r--r--patches/gcc-4.4.3/atmel/0010-Disable-fixed-point-support-for-avrtiny10-family.patch84
-rw-r--r--patches/gcc-4.4.3/series12
11 files changed, 8165 insertions, 0 deletions
diff --git a/patches/gcc-4.4.3/atmel/0001-Add-support-for-fixed-point-operations.patch b/patches/gcc-4.4.3/atmel/0001-Add-support-for-fixed-point-operations.patch
new file mode 100644
index 0000000..f59f59e
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0001-Add-support-for-fixed-point-operations.patch
@@ -0,0 +1,2851 @@
+From fdd8239a3ee04c4926fc5e53338d397e12d96d8a Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Tue, 19 Apr 2011 19:55:44 +0200
+Subject: [PATCH 01/10] Add support for fixed-point operations
+
+This patch contains three patches contributed by Sean D'Epagnier
+that support fixed-point operations for ATMEL AVR micro controllers:
+ - support for fixed-point in avr backend
+ - fixes what Sean D'Epagnier believed was a bug but only for
+ fixedpoint on 8bit cpus but is not specific to avr
+ - support for fixed point in dwarf output
+
+Quote by Sean D'Epagnier (2009-01-10):
+http://old.nabble.com/-patch--patches-for-fixed-point-support-td21387529.html
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/30-gcc-4.4.3-fixedpoint-3-4-2010.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr-fixed.md | 338 +++++++++++++
+ gcc/config/avr/avr-modes.def | 34 ++
+ gcc/config/avr/avr-protos.h | 2 +
+ gcc/config/avr/avr.c | 219 ++++++++-
+ gcc/config/avr/avr.md | 438 ++++++++++-------
+ gcc/config/avr/libgcc-fixed.S | 1123 +++++++++++++++++++++++++++++++++++++++++
+ gcc/config/avr/libgcc.S | 59 ++-
+ gcc/config/avr/t-avr | 35 ++
+ gcc/cse.c | 5 +-
+ gcc/dwarf2out.c | 13 +
+ gcc/fold-const.c | 5 +
+ gcc/varasm.c | 2 +-
+ 12 files changed, 2085 insertions(+), 188 deletions(-)
+ create mode 100644 gcc/config/avr/avr-fixed.md
+ create mode 100644 gcc/config/avr/avr-modes.def
+ create mode 100644 gcc/config/avr/libgcc-fixed.S
+
+diff --git a/gcc/config/avr/avr-fixed.md b/gcc/config/avr/avr-fixed.md
+new file mode 100644
+index 0000000..117664c
+--- /dev/null
++++ b/gcc/config/avr/avr-fixed.md
+@@ -0,0 +1,338 @@
++;; -*- Mode: Scheme -*-
++;; This file contains instructions that support fixed-point operations
++;; for ATMEL AVR micro controllers.
++;; Copyright (C) 2009
++;; Free Software Foundation, Inc.
++;; Contributed by Sean D'Epagnier (sean@depagnier.com)
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_mode_iterator ALLQQ [(QQ "") (UQQ "")])
++(define_mode_iterator ALLHQ [(HQ "") (UHQ "")])
++(define_mode_iterator ALLHA [(HA "") (UHA "")])
++(define_mode_iterator ALLHQHA [(HQ "") (UHQ "") (HA "") (UHA "")])
++(define_mode_iterator ALLSA [(SA "") (USA "")])
++
++;;; Conversions
++
++(define_mode_iterator FIXED1 [(QQ "") (UQQ "") (HQ "") (UHQ "")
++ (SQ "") (USQ "") (DQ "") (UDQ "")
++ (HA "") (UHA "") (SA "") (USA "")
++ (DA "") (UDA "") (TA "") (UTA "")
++ (QI "") (HI "") (SI "") (DI "")])
++(define_mode_iterator FIXED2 [(QQ "") (UQQ "") (HQ "") (UHQ "")
++ (SQ "") (USQ "") (DQ "") (UDQ "")
++ (HA "") (UHA "") (SA "") (USA "")
++ (DA "") (UDA "") (TA "") (UTA "")
++ (QI "") (HI "") (SI "") (DI "")])
++
++(define_insn "fract<FIXED2:mode><FIXED1:mode>2"
++ [(set (match_operand:FIXED1 0 "register_operand" "=r")
++ (fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
++ ""
++ "* return fract_out (insn, operands, 1, NULL);"
++ [(set_attr "cc" "clobber")])
++
++(define_insn "fractuns<FIXED2:mode><FIXED1:mode>2"
++ [(set (match_operand:FIXED1 0 "register_operand" "=r")
++ (unsigned_fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
++ ""
++ "* return fract_out (insn, operands, 0, NULL);"
++ [(set_attr "cc" "clobber")])
++
++;;; Addition/Subtraction, mostly identical to integer versions
++
++(define_insn "add<ALLQQ:mode>3"
++ [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
++ (plus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "%0,0")
++ (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
++ ""
++ "@
++ add %0,%2
++ subi %0,lo8(-(%2))"
++ [(set_attr "length" "1,1")
++ (set_attr "cc" "set_czn,set_czn")])
++
++(define_insn "sub<ALLQQ:mode>3"
++ [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
++ (minus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0,0")
++ (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
++ ""
++ "@
++ sub %0,%2
++ subi %0,lo8(%2)"
++ [(set_attr "length" "1,1")
++ (set_attr "cc" "set_czn,set_czn")])
++
++
++(define_insn "add<ALLHQHA:mode>3"
++ [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
++ (plus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "%0,0")
++ (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
++ ""
++ "@
++ add %A0,%A2\;adc %B0,%B2
++ subi %A0,lo8(-(%2))\;sbci %B0,hi8(-(%2))"
++ [(set_attr "length" "2,2")
++ (set_attr "cc" "set_n,set_czn")])
++
++(define_insn "sub<ALLHQHA:mode>3"
++ [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
++ (minus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "0,0")
++ (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
++ ""
++ "@
++ sub %A0,%A2\;sbc %B0,%B2
++ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)"
++ [(set_attr "length" "2,2")
++ (set_attr "cc" "set_czn,set_czn")])
++
++(define_insn "add<ALLSA:mode>3"
++ [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
++ (plus:ALLSA (match_operand:ALLSA 1 "register_operand" "%0,0")
++ (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
++ ""
++ "@
++ add %A0,%A2\;adc %B0,%B2\;adc %C0,%C2\;adc %D0,%D2
++ subi %0,lo8(-(%2))\;sbci %B0,hi8(-(%2))\;sbci %C0,hlo8(-(%2))\;sbci %D0,hhi8(-(%2))"
++ [(set_attr "length" "4,4")
++ (set_attr "cc" "set_n,set_czn")])
++
++(define_insn "sub<ALLSA:mode>3"
++ [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
++ (minus:ALLSA (match_operand:ALLSA 1 "register_operand" "0,0")
++ (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
++ ""
++ "@
++ sub %0,%2\;sbc %B0,%B2\;sbc %C0,%C2\;sbc %D0,%D2
++ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)\;sbci %C0,hlo8(%2)\;sbci %D0,hhi8(%2)"
++ [(set_attr "length" "4,4")
++ (set_attr "cc" "set_czn,set_czn")])
++
++;******************************************************************************
++; mul
++
++(define_insn "mulqq3"
++ [(set (match_operand:QQ 0 "register_operand" "=r")
++ (mult:QQ (match_operand:QQ 1 "register_operand" "a")
++ (match_operand:QQ 2 "register_operand" "a")))]
++ "AVR_HAVE_MUL"
++ "fmuls %1,%2\;mov %0,r1\;clr r1"
++ [(set_attr "length" "3")
++ (set_attr "cc" "clobber")])
++
++(define_insn "muluqq3"
++ [(set (match_operand:UQQ 0 "register_operand" "=r")
++ (mult:UQQ (match_operand:UQQ 1 "register_operand" "r")
++ (match_operand:UQQ 2 "register_operand" "r")))]
++ "AVR_HAVE_MUL"
++ "mul %1,%2\;mov %0,r1\;clr r1"
++ [(set_attr "length" "3")
++ (set_attr "cc" "clobber")])
++
++;; (reg:ALLHQ 20) not clobbered on the enhanced core.
++;; use registers from 16-23 so we can use fmuls
++;; All call-used registers clobbered otherwise - normal library call.
++(define_expand "mul<ALLHQ:mode>3"
++ [(set (reg:ALLHQ 22) (match_operand:ALLHQ 1 "register_operand" ""))
++ (set (reg:ALLHQ 20) (match_operand:ALLHQ 2 "register_operand" ""))
++ (parallel [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
++ (clobber (reg:ALLHQ 22))])
++ (set (match_operand:ALLHQ 0 "register_operand" "") (reg:ALLHQ 18))]
++ "AVR_HAVE_MUL"
++ "")
++
++(define_insn "*mul<ALLHQ:mode>3_enh_call"
++ [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
++ (clobber (reg:ALLHQ 22))]
++ "AVR_HAVE_MUL"
++ "%~call __mul<ALLHQ:mode>3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++; Special calls for with and without mul.
++(define_expand "mul<ALLHA:mode>3"
++ [(set (reg:ALLHA 22) (match_operand:ALLHA 1 "register_operand" ""))
++ (set (reg:ALLHA 20) (match_operand:ALLHA 2 "register_operand" ""))
++ (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
++ (clobber (reg:ALLHA 22))])
++ (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
++ ""
++ "
++{
++ if (!AVR_HAVE_MUL)
++ {
++ emit_insn (gen_mul<ALLHA:mode>3_call (operands[0], operands[1], operands[2]));
++ DONE;
++ }
++}")
++
++(define_insn "*mul<ALLHA:mode>3_enh"
++ [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
++ (clobber (reg:ALLHA 22))]
++ "AVR_HAVE_MUL"
++ "%~call __mul<ALLHA:mode>3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++; Without multiplier, clobbers both inputs, and needs a separate output register
++(define_expand "mul<ALLHA:mode>3_call"
++ [(set (reg:ALLHA 24) (match_operand:ALLHA 1 "register_operand" ""))
++ (set (reg:ALLHA 22) (match_operand:ALLHA 2 "register_operand" ""))
++ (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
++ (clobber (reg:ALLHA 22))
++ (clobber (reg:ALLHA 24))])
++ (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
++ "!AVR_HAVE_MUL"
++ "")
++
++(define_insn "*mul<ALLHA:mode>3_call"
++ [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
++ (clobber (reg:ALLHA 22))
++ (clobber (reg:ALLHA 24))]
++ "!AVR_HAVE_MUL"
++ "%~call __mul<ALLHA:mode>3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++;; On the enhanced core, don't clobber either input, and use a separate output,
++;; r2 is needed as a zero register since r1 is used for mul
++(define_expand "mul<ALLSA:mode>3"
++ [(set (reg:ALLSA 16) (match_operand:ALLSA 1 "register_operand" ""))
++ (set (reg:ALLSA 20) (match_operand:ALLSA 2 "register_operand" ""))
++ (parallel [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
++ (clobber (reg:QI 15))])
++ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 24))]
++ ""
++ "
++{
++ if (!AVR_HAVE_MUL)
++ {
++ emit_insn (gen_mul<ALLSA:mode>3_call (operands[0], operands[1], operands[2]));
++ DONE;
++ }
++}")
++
++(define_insn "*mul<ALLSA:mode>3_enh"
++ [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
++ (clobber (reg:QI 15))]
++ "AVR_HAVE_MUL"
++ "%~call __mul<ALLSA:mode>3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++; Without multiplier, clobbers both inputs, needs a separate output, and also
++; needs two more scratch registers
++(define_expand "mul<ALLSA:mode>3_call"
++ [(set (reg:ALLSA 18) (match_operand:ALLSA 1 "register_operand" ""))
++ (set (reg:ALLSA 24) (match_operand:ALLSA 2 "register_operand" ""))
++ (parallel [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
++ (clobber (reg:ALLSA 18))
++ (clobber (reg:ALLSA 24))
++ (clobber (reg:HI 22))])
++ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 14))]
++ "!AVR_HAVE_MUL"
++ "")
++
++(define_insn "*mul<ALLSA:mode>3_call"
++ [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
++ (clobber (reg:ALLSA 18))
++ (clobber (reg:ALLSA 24))
++ (clobber (reg:HI 22))]
++ "!AVR_HAVE_MUL"
++ "%~call __mul<ALLSA:mode>3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++; / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
++; div
++
++(define_code_iterator usdiv [udiv div]) ; do signed and unsigned in one shot
++
++(define_expand "<usdiv:code><ALLQQ:mode>3"
++ [(set (reg:ALLQQ 25) (match_operand:ALLQQ 1 "register_operand" ""))
++ (set (reg:ALLQQ 22) (match_operand:ALLQQ 2 "register_operand" ""))
++ (parallel [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
++ (clobber (reg:ALLQQ 25))
++ (clobber (reg:QI 23))])
++ (set (match_operand:ALLQQ 0 "register_operand" "") (reg:ALLQQ 24))]
++ ""
++ "")
++
++(define_insn "*<usdiv:code><ALLQQ:mode>3_call"
++ [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
++ (clobber (reg:ALLQQ 25))
++ (clobber (reg:QI 23))]
++ ""
++ "%~call __<usdiv:code><ALLQQ:mode>3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++(define_expand "<usdiv:code><ALLHQHA:mode>3"
++ [(set (reg:ALLHQHA 26) (match_operand:ALLHQHA 1 "register_operand" ""))
++ (set (reg:ALLHQHA 22) (match_operand:ALLHQHA 2 "register_operand" ""))
++ (parallel [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
++ (clobber (reg:ALLHQHA 26))
++ (clobber (reg:QI 21))])
++ (set (match_operand:ALLHQHA 0 "register_operand" "") (reg:ALLHQHA 24))]
++ ""
++ "")
++
++(define_insn "*<usdiv:code><ALLHQHA:mode>3_call"
++ [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
++ (clobber (reg:ALLHQHA 26))
++ (clobber (reg:QI 21))]
++ ""
++ "%~call __<usdiv:code><ALLHQHA:mode>3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++; note the first parameter gets passed in already offset by 2 bytes
++(define_expand "<usdiv:code><ALLSA:mode>3"
++ [(set (reg:ALLSA 24) (match_operand:ALLSA 1 "register_operand" ""))
++ (set (reg:ALLSA 18) (match_operand:ALLSA 2 "register_operand" ""))
++ (parallel [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
++ (clobber (reg:HI 26))
++ (clobber (reg:HI 30))])
++ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 22))]
++ ""
++ "")
++
++(define_insn "*<usdiv:code><ALLSA:mode>3_call"
++ [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
++ (clobber (reg:HI 26))
++ (clobber (reg:HI 30))]
++ ""
++ "%~call __<usdiv:code><ALLSA:mode>3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++
++;; abs must be defined for fixed types for correct operation
++
++;; abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x)
++
++;; abs
++
++(define_insn "abs<ALLQQ:mode>2"
++ [(set (match_operand:ALLQQ 0 "register_operand" "=r")
++ (abs:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0")))]
++ ""
++ "sbrc %0,7
++ neg %0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "clobber")])
+diff --git a/gcc/config/avr/avr-modes.def b/gcc/config/avr/avr-modes.def
+new file mode 100644
+index 0000000..d4ff603
+--- /dev/null
++++ b/gcc/config/avr/avr-modes.def
+@@ -0,0 +1,34 @@
++/* Definitions of target machine for GCC for AVR.
++ Copyright (C) 2009 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++/* On 8 bit machines it requires fewer instructions for fixed point
++ routines if the decimal place is on a byte boundary which is not
++ the default for signed accum types. */
++
++ADJUST_IBIT (HA, 7);
++ADJUST_FBIT (HA, 8);
++
++ADJUST_IBIT (SA, 15);
++ADJUST_FBIT (SA, 16);
++
++ADJUST_IBIT (DA, 31);
++ADJUST_FBIT (DA, 32);
++
++ADJUST_IBIT (TA, 63);
++ADJUST_FBIT (TA, 64);
+diff --git a/gcc/config/avr/avr-protos.h b/gcc/config/avr/avr-protos.h
+index 2df4a16..d61ac39 100644
+--- a/gcc/config/avr/avr-protos.h
++++ b/gcc/config/avr/avr-protos.h
+@@ -87,6 +87,8 @@ extern const char *lshrqi3_out (rtx insn, rtx operands[], int *len);
+ extern const char *lshrhi3_out (rtx insn, rtx operands[], int *len);
+ extern const char *lshrsi3_out (rtx insn, rtx operands[], int *len);
+
++extern const char *fract_out (rtx insn, rtx operands[], int intsigned, int *l);
++
+ extern void expand_prologue (void);
+ extern void expand_epilogue (void);
+ extern int avr_epilogue_uses (int regno);
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index 79607c3..4351e59 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -308,6 +308,16 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ { NULL, ARCH_UNKNOWN, NULL }
+ };
+
++/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
++static bool
++avr_scalar_mode_supported_p (enum machine_mode mode)
++{
++ if (ALL_FIXED_POINT_MODE_P (mode))
++ return true;
++
++ return default_scalar_mode_supported_p (mode);
++}
++
+ int avr_case_values_threshold = 30000;
+
+ /* Initialize the GCC target structure. */
+@@ -361,6 +371,9 @@ int avr_case_values_threshold = 30000;
+ #undef TARGET_HARD_REGNO_SCRATCH_OK
+ #define TARGET_HARD_REGNO_SCRATCH_OK avr_hard_regno_scratch_ok
+
++#undef TARGET_SCALAR_MODE_SUPPORTED_P
++#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
++
+ struct gcc_target targetm = TARGET_INITIALIZER;
+
+ void
+@@ -1718,9 +1731,9 @@ output_movqi (rtx insn, rtx operands[], int *l)
+
+ *l = 1;
+
+- if (register_operand (dest, QImode))
++ if (register_operand (dest, VOIDmode))
+ {
+- if (register_operand (src, QImode)) /* mov r,r */
++ if (register_operand (src, VOIDmode)) /* mov r,r */
+ {
+ if (test_hard_reg_class (STACK_REG, dest))
+ return AS2 (out,%0,%1);
+@@ -1808,9 +1821,9 @@ output_movhi (rtx insn, rtx operands[], int *l)
+ if (!l)
+ l = &dummy;
+
+- if (register_operand (dest, HImode))
++ if (register_operand (dest, VOIDmode))
+ {
+- if (register_operand (src, HImode)) /* mov r,r */
++ if (register_operand (src, VOIDmode)) /* mov r,r */
+ {
+ if (test_hard_reg_class (STACK_REG, dest))
+ {
+@@ -2533,6 +2546,14 @@ output_movsisf(rtx insn, rtx operands[], int *l)
+ {
+ if (test_hard_reg_class (LD_REGS, dest)) /* ldi d,i */
+ {
++ if (AVR_HAVE_MOVW
++ && (UINTVAL (src) >> 16) == (UINTVAL (src) & 0xffff))
++ {
++ *l = 3;
++ return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
++ AS2 (ldi,%B0,hi8(%1)) CR_TAB
++ AS2 (movw,%C0,%A0));
++ }
+ *l = 4;
+ return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
+ AS2 (ldi,%B0,hi8(%1)) CR_TAB
+@@ -4327,6 +4348,196 @@ lshrsi3_out (rtx insn, rtx operands[], int *len)
+ return "";
+ }
+
++/* Outputs instructions needed for fixed point conversion. */
++
++const char *
++fract_out (rtx insn ATTRIBUTE_UNUSED, rtx operands[], int intsigned, int *len)
++{
++ int i, k = 0;
++ int sbit[2], ilen[2], flen[2], tlen[2];
++ int rdest, rsource, offset;
++ int start, end, dir;
++ int hadbst = 0, hadlsl = 0;
++ int clrword = -1, lastclr = 0, clr = 0;
++ char buf[20];
++
++ if (!len)
++ len = &k;
++
++ for (i = 0; i < 2; i++)
++ {
++ enum machine_mode mode = GET_MODE (operands[i]);
++ tlen[i] = GET_MODE_SIZE (mode);
++ if (SCALAR_INT_MODE_P (mode))
++ {
++ sbit[i] = intsigned;
++ ilen[i] = GET_MODE_BITSIZE(mode) / 8;
++ flen[i] = 0;
++ }
++ else if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
++ {
++ sbit[i] = SIGNED_SCALAR_FIXED_POINT_MODE_P (mode);
++ ilen[i] = (GET_MODE_IBIT (mode) + 1) / 8;
++ flen[i] = (GET_MODE_FBIT (mode) + 1) / 8;
++ }
++ else
++ fatal_insn ("unsupported fixed-point conversion", insn);
++ }
++
++ rdest = true_regnum (operands[0]);
++ rsource = true_regnum (operands[1]);
++ offset = flen[1] - flen[0];
++
++ /* Store the sign bit if the destination is a signed
++ fract and the source has a sign in the integer part. */
++ if (sbit[0] && !ilen[0] && sbit[1] && ilen[1])
++ {
++ /* To avoid using bst and bld if the source and
++ destination registers overlap we can use a single lsl
++ since we don't care about preserving the source register. */
++ if (rdest < rsource + tlen[1] && rdest + tlen[0] > rsource)
++ {
++ sprintf (buf, "lsl r%d", rsource + tlen[1] - 1);
++ hadlsl = 1;
++ }
++ else
++ {
++ sprintf (buf, "bst r%d, 7", rsource + tlen[1] - 1);
++ hadbst = 1;
++ }
++ output_asm_insn (buf, operands);
++ ++*len;
++ }
++
++ /* Pick the correct direction. */
++ if (rdest < rsource + offset)
++ {
++ dir = 1;
++ start = 0;
++ end = tlen[0];
++ }
++ else
++ {
++ dir = -1;
++ start = tlen[0] - 1;
++ end = -1;
++ }
++
++ /* Move registers into place, clearing registers that do not overlap. */
++ for (i = start; i != end; i += dir)
++ {
++ int destloc = rdest + i, sourceloc = rsource + i + offset;
++ if (sourceloc < rsource || sourceloc >= rsource + tlen[1])
++ {
++ if (AVR_HAVE_MOVW && i+dir != end
++ && (sourceloc+dir < rsource || sourceloc+dir >= rsource + tlen[1])
++ && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
++ || (dir == -1 && (destloc%2) && (sourceloc%2)))
++ && clrword != -1)
++ {
++ sprintf (buf, "movw r%d, r%d", destloc&0xfe, clrword&0xfe);
++ i += dir;
++ }
++ else
++ {
++ /* Do not clear the register if it is going to get
++ sign extended with a mov later. */
++ if (sbit[0] && sbit[1] && i != tlen[0] - 1 && i >= flen[0])
++ continue;
++
++ sprintf (buf, "clr r%d", destloc);
++ if (lastclr)
++ clrword = destloc;
++ clr=1;
++ }
++ }
++ else if (destloc == sourceloc)
++ continue;
++ else
++ if (AVR_HAVE_MOVW && i+dir != end
++ && sourceloc+dir >= rsource && sourceloc+dir < rsource + tlen[1]
++ && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
++ || (dir == -1 && (destloc%2) && (sourceloc%2))))
++ {
++ sprintf (buf, "movw r%d, r%d", destloc&0xfe, sourceloc&0xfe);
++ i += dir;
++ }
++ else
++ sprintf (buf, "mov r%d, r%d", destloc, sourceloc);
++
++ output_asm_insn (buf, operands);
++ ++*len;
++
++ lastclr = clr;
++ clr = 0;
++ }
++
++ /* Perform sign extension if needed. */
++ if (sbit[0] && sbit[1] && ilen[0] > ilen[1])
++ {
++ sprintf (buf, "sbrc r%d, 7", rdest+tlen[1]-1-offset);
++ output_asm_insn (buf, operands);
++ sprintf (buf, "com r%d", rdest+tlen[0]-1);
++ output_asm_insn (buf, operands);
++ *len += 2;
++ /* Sign extend additional bytes. */
++ start = rdest + tlen[0] - 2;
++ end = rdest + flen[0] + ilen[1] - 1;
++ for (i = start; i != end; i--)
++ {
++ if (AVR_HAVE_MOVW && i != start && i-1 != end)
++ sprintf (buf, "movw r%d, r%d", --i, rdest+tlen[0]-2);
++ else
++ sprintf (buf, "mov r%d, r%d", i, rdest+tlen[0]-1);
++ output_asm_insn (buf, operands);
++ ++*len;
++ }
++ }
++
++ /* Perform shifts, only needed if one operand
++ is a signed fract, and the other is not. */
++ if (sbit[0] && !ilen[0] && (!sbit[1] || ilen[1]))
++ {
++ start = rdest+flen[0]-1;
++ end = rdest + flen[0] - flen[1];
++ if (end < rdest)
++ end = rdest;
++ for (i = start; i >= end; i--)
++ {
++ if (i == start && !hadlsl)
++ sprintf (buf, "lsr r%d", i);
++ else
++ sprintf (buf, "ror r%d", i);
++ output_asm_insn (buf, operands);
++ ++*len;
++ }
++
++ if (hadbst)
++ {
++ sprintf (buf, "bld r%d, 7", rdest + tlen[0] - 1);
++ output_asm_insn (buf, operands);
++ ++*len;
++ }
++ }
++ else if (sbit[1] && !ilen[1] && (!sbit[0] || ilen[0]))
++ {
++ start = rdest + flen[0] - flen[1];
++ if (start < rdest)
++ start = rdest;
++ for (i = start; i<rdest+flen[0]; i++)
++ {
++ if (i == start)
++ sprintf (buf, "lsl r%d", i);
++ else
++ sprintf (buf, "rol r%d", i);
++ output_asm_insn (buf, operands);
++ ++*len;
++ }
++ }
++
++ return "";
++}
++
+ /* Modifies the length assigned to instruction INSN
+ LEN is the initially computed length of the insn. */
+
+diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md
+index 4fd556e..5090e53 100644
+--- a/gcc/config/avr/avr.md
++++ b/gcc/config/avr/avr.md
+@@ -63,7 +63,16 @@
+
+ (include "predicates.md")
+ (include "constraints.md")
+-
++
++; fixed-point instructions.
++(include "avr-fixed.md")
++(define_mode_iterator ALLQ [(QI "") (QQ "") (UQQ "")])
++(define_mode_iterator ALLH [(HI "") (HQ "") (UHQ "") (HA "") (UHA "")])
++(define_mode_iterator ALLS [(SI "") (SA "") (USA "")])
++(define_mode_iterator ALLQS [(QI "") (QQ "") (UQQ "")
++ (HI "") (HQ "") (UHQ "") (HA "") (UHA "")
++ (SI "") (SA "") (USA "")])
++
+ ;; Condition code settings.
+ (define_attr "cc" "none,set_czn,set_zn,set_n,compare,clobber"
+ (const_string "none"))
+@@ -176,9 +185,9 @@
+ })
+
+
+-(define_insn "*pushqi"
+- [(set (mem:QI (post_dec (reg:HI REG_SP)))
+- (match_operand:QI 0 "reg_or_0_operand" "r,L"))]
++(define_insn "*push<ALLQ:mode>"
++ [(set (mem:ALLQ (post_dec (reg:HI REG_SP)))
++ (match_operand:ALLQ 0 "reg_or_0_operand" "r,L"))]
+ ""
+ "@
+ push %0
+@@ -186,18 +195,18 @@
+ [(set_attr "length" "1,1")])
+
+
+-(define_insn "*pushhi"
+- [(set (mem:HI (post_dec (reg:HI REG_SP)))
+- (match_operand:HI 0 "reg_or_0_operand" "r,L"))]
++(define_insn "*push<ALLH:mode>"
++ [(set (mem:ALLH (post_dec (reg:HI REG_SP)))
++ (match_operand:ALLH 0 "reg_or_0_operand" "r,L"))]
+ ""
+ "@
+ push %B0\;push %A0
+ push __zero_reg__\;push __zero_reg__"
+ [(set_attr "length" "2,2")])
+
+-(define_insn "*pushsi"
+- [(set (mem:SI (post_dec (reg:HI REG_SP)))
+- (match_operand:SI 0 "reg_or_0_operand" "r,L"))]
++(define_insn "*push<ALLS:mode>"
++ [(set (mem:ALLS (post_dec (reg:HI REG_SP)))
++ (match_operand:ALLS 0 "reg_or_0_operand" "r,L"))]
+ ""
+ "@
+ push %D0\;push %C0\;push %B0\;push %A0
+@@ -223,21 +232,21 @@
+ ;; are call-saved registers, and most of LD_REGS are call-used registers,
+ ;; so this may still be a win for registers live across function calls.
+
+-(define_expand "movqi"
+- [(set (match_operand:QI 0 "nonimmediate_operand" "")
+- (match_operand:QI 1 "general_operand" ""))]
++(define_expand "mov<ALLQ:mode>"
++ [(set (match_operand:ALLQ 0 "nonimmediate_operand" "")
++ (match_operand:ALLQ 1 "general_operand" ""))]
+ ""
+ "/* One of the ops has to be in a register. */
+- if (!register_operand(operand0, QImode)
+- && ! (register_operand(operand1, QImode) || const0_rtx == operand1))
+- operands[1] = copy_to_mode_reg(QImode, operand1);
++ if (!register_operand(operand0, <ALLQ:MODE>mode)
++ && ! (register_operand(operand1, <ALLQ:MODE>mode) || const0_rtx == operand1))
++ operands[1] = copy_to_mode_reg(<ALLQ:MODE>mode, operand1);
+ ")
+
+-(define_insn "*movqi"
+- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
+- (match_operand:QI 1 "general_operand" "r,i,rL,Qm,r,q,i"))]
+- "(register_operand (operands[0],QImode)
+- || register_operand (operands[1], QImode) || const0_rtx == operands[1])"
++(define_insn "*mov<ALLQ:mode>"
++ [(set (match_operand:ALLQ 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
++ (match_operand:ALLQ 1 "general_operand" "r,i,rL,Qm,r,q,i"))]
++ "(register_operand (operands[0],<ALLQ:MODE>mode)
++ || register_operand (operands[1], <ALLQ:MODE>mode) || const0_rtx == operands[1])"
+ "* return output_movqi (insn, operands, NULL);"
+ [(set_attr "length" "1,1,5,5,1,1,4")
+ (set_attr "cc" "none,none,clobber,clobber,none,none,clobber")])
+@@ -269,17 +278,17 @@
+ ;;============================================================================
+ ;; move word (16 bit)
+
+-(define_expand "movhi"
+- [(set (match_operand:HI 0 "nonimmediate_operand" "")
+- (match_operand:HI 1 "general_operand" ""))]
++(define_expand "mov<ALLH:mode>"
++ [(set (match_operand:ALLH 0 "nonimmediate_operand" "")
++ (match_operand:ALLH 1 "general_operand" ""))]
+ ""
+ "
+ {
+ /* One of the ops has to be in a register. */
+- if (!register_operand(operand0, HImode)
+- && !(register_operand(operand1, HImode) || const0_rtx == operands[1]))
++ if (!register_operand(operand0, <ALLH:MODE>mode)
++ && !(register_operand(operand1, <ALLH:MODE>mode) || const0_rtx == operands[1]))
+ {
+- operands[1] = copy_to_mode_reg(HImode, operand1);
++ operands[1] = copy_to_mode_reg(<ALLH:MODE>mode, operand1);
+ }
+ }")
+
+@@ -334,20 +343,20 @@
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")])
+
+-(define_insn "*movhi"
+- [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
+- (match_operand:HI 1 "general_operand" "r,m,rL,i,i,r,q"))]
+- "(register_operand (operands[0],HImode)
+- || register_operand (operands[1],HImode) || const0_rtx == operands[1])"
++(define_insn "*mov<ALLH:mode>"
++ [(set (match_operand:ALLH 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
++ (match_operand:ALLH 1 "general_operand" "r,m,rL,i,i,r,q"))]
++ "(register_operand (operands[0],<ALLH:MODE>mode)
++ || register_operand (operands[1],<ALLH:MODE>mode) || const0_rtx == operands[1])"
+ "* return output_movhi (insn, operands, NULL);"
+ [(set_attr "length" "2,6,7,2,6,5,2")
+ (set_attr "cc" "none,clobber,clobber,none,clobber,none,none")])
+
+ (define_peephole2 ; movw
+- [(set (match_operand:QI 0 "even_register_operand" "")
+- (match_operand:QI 1 "even_register_operand" ""))
+- (set (match_operand:QI 2 "odd_register_operand" "")
+- (match_operand:QI 3 "odd_register_operand" ""))]
++ [(set (match_operand:ALLQ 0 "even_register_operand" "")
++ (match_operand:ALLQ 1 "even_register_operand" ""))
++ (set (match_operand:ALLQ 2 "odd_register_operand" "")
++ (match_operand:ALLQ 3 "odd_register_operand" ""))]
+ "(AVR_HAVE_MOVW
+ && REGNO (operands[0]) == REGNO (operands[2]) - 1
+ && REGNO (operands[1]) == REGNO (operands[3]) - 1)"
+@@ -358,10 +367,10 @@
+ })
+
+ (define_peephole2 ; movw_r
+- [(set (match_operand:QI 0 "odd_register_operand" "")
+- (match_operand:QI 1 "odd_register_operand" ""))
+- (set (match_operand:QI 2 "even_register_operand" "")
+- (match_operand:QI 3 "even_register_operand" ""))]
++ [(set (match_operand:ALLQ 0 "odd_register_operand" "")
++ (match_operand:ALLQ 1 "odd_register_operand" ""))
++ (set (match_operand:ALLQ 2 "even_register_operand" "")
++ (match_operand:ALLQ 3 "even_register_operand" ""))]
+ "(AVR_HAVE_MOVW
+ && REGNO (operands[2]) == REGNO (operands[0]) - 1
+ && REGNO (operands[3]) == REGNO (operands[1]) - 1)"
+@@ -374,26 +383,24 @@
+ ;;==========================================================================
+ ;; move double word (32 bit)
+
+-(define_expand "movsi"
+- [(set (match_operand:SI 0 "nonimmediate_operand" "")
+- (match_operand:SI 1 "general_operand" ""))]
++(define_expand "mov<ALLS:mode>"
++ [(set (match_operand:ALLS 0 "nonimmediate_operand" "")
++ (match_operand:ALLS 1 "general_operand" ""))]
+ ""
+ "
+ {
+ /* One of the ops has to be in a register. */
+- if (!register_operand (operand0, SImode)
+- && !(register_operand (operand1, SImode) || const0_rtx == operand1))
++ if (!register_operand (operand0, <ALLS:MODE>mode)
++ && !(register_operand (operand1, <ALLS:MODE>mode) || const0_rtx == operand1))
+ {
+- operands[1] = copy_to_mode_reg (SImode, operand1);
++ operands[1] = copy_to_mode_reg (<ALLS:MODE>mode, operand1);
+ }
+ }")
+
+-
+-
+ (define_peephole2 ; movsi_lreg_const
+ [(match_scratch:QI 2 "d")
+- (set (match_operand:SI 0 "l_register_operand" "")
+- (match_operand:SI 1 "immediate_operand" ""))
++ (set (match_operand:ALLS 0 "l_register_operand" "")
++ (match_operand:ALLS 1 "immediate_operand" ""))
+ (match_dup 2)]
+ "(operands[1] != const0_rtx
+ && operands[1] != constm1_rtx)"
+@@ -403,8 +410,8 @@
+
+ ;; '*' because it is not used in rtl generation.
+ (define_insn "*reload_insi"
+- [(set (match_operand:SI 0 "register_operand" "=r")
+- (match_operand:SI 1 "immediate_operand" "i"))
++ [(set (match_operand:ALLS 0 "register_operand" "=r")
++ (match_operand:ALLS 1 "immediate_operand" "i"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ "* return output_reload_insisf (insn, operands, NULL);"
+@@ -412,11 +419,11 @@
+ (set_attr "cc" "none")])
+
+
+-(define_insn "*movsi"
+- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
+- (match_operand:SI 1 "general_operand" "r,L,Qm,rL,i,i"))]
+- "(register_operand (operands[0],SImode)
+- || register_operand (operands[1],SImode) || const0_rtx == operands[1])"
++(define_insn "*mov<ALLS:mode>"
++ [(set (match_operand:ALLS 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
++ (match_operand:ALLS 1 "general_operand" "r,L,Qm,rL,i,i"))]
++ "(register_operand (operands[0],<ALLS:MODE>mode)
++ || register_operand (operands[1],<ALLS:MODE>mode) || const0_rtx == operands[1])"
+ "* return output_movsisf (insn, operands, NULL);"
+ [(set_attr "length" "4,4,8,9,4,10")
+ (set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")])
+@@ -955,30 +962,61 @@
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+-(define_insn "mulqihi3"
++;; Define code iterators
++(define_code_iterator any_extend [sign_extend zero_extend])
++(define_code_attr s [(sign_extend "s") (zero_extend "")])
++(define_code_attr u [(sign_extend "") (zero_extend "u")])
++(define_code_attr su [(sign_extend "s") (zero_extend "u")])
++
++(define_insn "<any_extend:su>mulqi3_highpart"
++ [(set (match_operand:QI 0 "register_operand" "=r")
++ (truncate:QI
++ (lshiftrt:HI
++ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "d"))
++ (any_extend:HI (match_operand:QI 2 "register_operand" "d")))
++ (const_int 8))))]
++ "AVR_HAVE_MUL && !optimize_size"
++ "mul<any_extend:s> %1,%2
++ mov %0,r1
++ clr r1"
++ [(set_attr "length" "3")
++ (set_attr "cc" "clobber")])
++
++(define_insn "<any_extend:u>mulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+- (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))
+- (sign_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
++ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "d"))
++ (any_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
+ "AVR_HAVE_MUL"
+- "muls %1,%2
++ "mul<any_extend:s> %1,%2
+ movw %0,r0
+ clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+-(define_insn "umulqihi3"
++(define_insn "*sumulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+- (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
+- (zero_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
++ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
++ (zero_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
+ "AVR_HAVE_MUL"
+- "mul %1,%2
++ "mulsu %1,%2
++ movw %0,r0
++ clr r1"
++ [(set_attr "length" "3")
++ (set_attr "cc" "clobber")])
++
++(define_insn "*usmulqihi3"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "a"))
++ (sign_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
++ "AVR_HAVE_MUL"
++ "mulsu %2,%1
+ movw %0,r0
+ clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+ (define_expand "mulhi3"
+- [(set (match_operand:HI 0 "register_operand" "")
++ [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "register_operand" "")))]
+ ""
+@@ -1025,6 +1063,50 @@
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
++(define_expand "<any_extend:u>mulhisi3"
++ [(set (reg:HI 18) (match_operand:SI 1 "register_operand" ""))
++ (set (reg:HI 20) (match_operand:SI 2 "register_operand" ""))
++ (set (reg:SI 22)
++ (mult:SI (any_extend:SI (reg:HI 18))
++ (any_extend:SI (reg:HI 20))))
++ (set (match_operand:SI 0 "register_operand" "") (reg:SI 22))]
++ "!optimize_size"
++ "")
++
++(define_insn "*<any_extend:u>mulhisi3_call"
++ [(set (reg:SI 22)
++ (mult:SI (any_extend:SI (reg:HI 18))
++ (any_extend:SI (reg:HI 20))))]
++ "!optimize_size"
++ "%~call __<any_extend:u>mulhisi3"
++ [(set_attr "type" "xcall")
++ (set_attr "cc" "clobber")])
++
++(define_expand "<any_extend:su>mulhi3_highpart"
++ [(set (reg:HI 18) (match_operand:HI 1 "register_operand" ""))
++ (set (reg:HI 20) (match_operand:HI 2 "register_operand" ""))
++ (set (reg:HI 24) (truncate:HI (lshiftrt:SI
++ (mult:SI (any_extend:SI (reg:HI 18))
++ (any_extend:SI (reg:HI 20)))
++ (const_int 16))))
++ (set (match_operand:SI 0 "register_operand" "") (reg:HI 24))]
++ "AVR_HAVE_MUL"
++ "")
++
++(define_insn_and_split "*<any_extend:su>mulhi3_highpart_call"
++ [(set (reg:HI 24) (truncate:HI (lshiftrt:SI
++ (mult:SI (any_extend:SI (reg:HI 18))
++ (any_extend:SI (reg:HI 20)))
++ (const_int 16))))]
++ "AVR_HAVE_MUL"
++ ""
++ ""
++ [(set (reg:SI 22)
++ (mult:SI (any_extend:SI (reg:HI 18))
++ (any_extend:SI (reg:HI 20))))
++ (clobber (reg:HI 22))]
++ "")
++
+ ;; Operand 2 (reg:SI 18) not clobbered on the enhanced core.
+ ;; All call-used registers clobbered otherwise - normal library call.
+ (define_expand "mulsi3"
+@@ -1612,9 +1694,9 @@
+ ;;<< << << << << << << << << << << << << << << << << << << << << << << << << <<
+ ;; arithmetic shift left
+
+-(define_expand "ashlqi3"
+- [(set (match_operand:QI 0 "register_operand" "")
+- (ashift:QI (match_operand:QI 1 "register_operand" "")
++(define_expand "ashl<ALLQ:mode>3"
++ [(set (match_operand:ALLQ 0 "register_operand" "")
++ (ashift:ALLQ (match_operand:ALLQ 1 "register_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
+ ""
+ "")
+@@ -1648,28 +1730,28 @@
+ (set (match_dup 0) (and:QI (match_dup 0) (const_int -64)))]
+ "")
+
+-(define_insn "*ashlqi3"
+- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
+- (ashift:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
+- (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
++(define_insn "*ashl<ALLQ:mode>3"
++ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,!d,r,r")
++ (ashift:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0,0")
++ (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
+ ""
+ "* return ashlqi3_out (insn, operands, NULL);"
+ [(set_attr "length" "5,0,1,2,4,6,9")
+ (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
+
+-(define_insn "ashlhi3"
+- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
+- (ashift:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
+- (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
++(define_insn "ashl<ALLH:mode>3"
++ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
++ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
++ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return ashlhi3_out (insn, operands, NULL);"
+ [(set_attr "length" "6,0,2,2,4,10,10")
+ (set_attr "cc" "clobber,none,set_n,clobber,set_n,clobber,clobber")])
+
+-(define_insn "ashlsi3"
+- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
+- (ashift:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
+- (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
++(define_insn "ashl<ALLS:mode>3"
++ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r,r,r,r")
++ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "0,0,0,r,0,0,0")
++ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return ashlsi3_out (insn, operands, NULL);"
+ [(set_attr "length" "8,0,4,4,8,10,12")
+@@ -1714,18 +1796,18 @@
+
+ (define_peephole2
+ [(match_scratch:QI 3 "d")
+- (set (match_operand:HI 0 "register_operand" "")
+- (ashift:HI (match_operand:HI 1 "register_operand" "")
+- (match_operand:QI 2 "const_int_operand" "")))]
++ (set (match_operand:ALLH 0 "register_operand" "")
++ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "")
++ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+- [(parallel [(set (match_dup 0) (ashift:HI (match_dup 1) (match_dup 2)))
++ [(parallel [(set (match_dup 0) (ashift:ALLH (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+-(define_insn "*ashlhi3_const"
+- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+- (ashift:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
+- (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
++(define_insn "*ashl<ALLH:mode>3_const"
++ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
++ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
++ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+ "* return ashlhi3_out (insn, operands, NULL);"
+@@ -1734,18 +1816,18 @@
+
+ (define_peephole2
+ [(match_scratch:QI 3 "d")
+- (set (match_operand:SI 0 "register_operand" "")
+- (ashift:SI (match_operand:SI 1 "register_operand" "")
+- (match_operand:QI 2 "const_int_operand" "")))]
++ (set (match_operand:ALLS 0 "register_operand" "")
++ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "")
++ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+- [(parallel [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
++ [(parallel [(set (match_dup 0) (ashift:ALLS (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+-(define_insn "*ashlsi3_const"
+- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+- (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+- (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
++(define_insn "*ashl<ALLS:mode>3_const"
++ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
++ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
++ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ "* return ashlsi3_out (insn, operands, NULL);"
+@@ -1755,27 +1837,27 @@
+ ;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
+ ;; arithmetic shift right
+
+-(define_insn "ashrqi3"
+- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r,r")
+- (ashiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0")
+- (match_operand:QI 2 "general_operand" "r,L,P,K,n,Qm")))]
++(define_insn "ashr<ALLQ:mode>3"
++ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,r,r")
++ (ashiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0")
++ (match_operand:QI 2 "general_operand" "r,L,P,K,n,Qm")))]
+ ""
+ "* return ashrqi3_out (insn, operands, NULL);"
+ [(set_attr "length" "5,0,1,2,5,9")
+ (set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber")])
+
+-(define_insn "ashrhi3"
+- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
+- (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
+- (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
++(define_insn "ashr<ALLH:mode>3"
++ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
++ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
++ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return ashrhi3_out (insn, operands, NULL);"
+ [(set_attr "length" "6,0,2,4,4,10,10")
+ (set_attr "cc" "clobber,none,clobber,set_n,clobber,clobber,clobber")])
+
+-(define_insn "ashrsi3"
+- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
+- (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
++(define_insn "ashr<ALLS:mode>3"
++ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r,r,r,r")
++ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return ashrsi3_out (insn, operands, NULL);"
+@@ -1786,18 +1868,18 @@
+
+ (define_peephole2
+ [(match_scratch:QI 3 "d")
+- (set (match_operand:HI 0 "register_operand" "")
+- (ashiftrt:HI (match_operand:HI 1 "register_operand" "")
++ (set (match_operand:ALLH 0 "register_operand" "")
++ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+- [(parallel [(set (match_dup 0) (ashiftrt:HI (match_dup 1) (match_dup 2)))
++ [(parallel [(set (match_dup 0) (ashiftrt:ALLH (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+ (define_insn "*ashrhi3_const"
+- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+- (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
+- (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
++ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
++ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
++ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+ "* return ashrhi3_out (insn, operands, NULL);"
+@@ -1806,18 +1888,18 @@
+
+ (define_peephole2
+ [(match_scratch:QI 3 "d")
+- (set (match_operand:SI 0 "register_operand" "")
+- (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
+- (match_operand:QI 2 "const_int_operand" "")))]
++ (set (match_operand:ALLS 0 "register_operand" "")
++ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "")
++ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+- [(parallel [(set (match_dup 0) (ashiftrt:SI (match_dup 1) (match_dup 2)))
++ [(parallel [(set (match_dup 0) (ashiftrt:ALLS (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+ (define_insn "*ashrsi3_const"
+- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+- (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+- (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
++ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
++ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
++ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ "* return ashrsi3_out (insn, operands, NULL);"
+@@ -1827,54 +1909,54 @@
+ ;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
+ ;; logical shift right
+
+-(define_expand "lshrqi3"
+- [(set (match_operand:QI 0 "register_operand" "")
+- (lshiftrt:QI (match_operand:QI 1 "register_operand" "")
+- (match_operand:QI 2 "general_operand" "")))]
++(define_expand "lshr<ALLQ:mode>3"
++ [(set (match_operand:ALLQ 0 "register_operand" "")
++ (lshiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "")
++ (match_operand:ALLQ 2 "general_operand" "")))]
+ ""
+ "")
+
+ (define_split ; lshrqi3_const4
+- [(set (match_operand:QI 0 "d_register_operand" "")
+- (lshiftrt:QI (match_dup 0)
++ [(set (match_operand:ALLQ 0 "d_register_operand" "")
++ (lshiftrt:ALLQ (match_dup 0)
+ (const_int 4)))]
+ ""
+- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+- (set (match_dup 0) (and:QI (match_dup 0) (const_int 15)))]
++ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
++ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 15)))]
+ "")
+
+ (define_split ; lshrqi3_const5
+- [(set (match_operand:QI 0 "d_register_operand" "")
+- (lshiftrt:QI (match_dup 0)
++ [(set (match_operand:ALLQ 0 "d_register_operand" "")
++ (lshiftrt:ALLQ (match_dup 0)
+ (const_int 5)))]
+ ""
+- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+- (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 1)))
+- (set (match_dup 0) (and:QI (match_dup 0) (const_int 7)))]
++ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
++ (set (match_dup 0) (lshiftrt:ALLQ (match_dup 0) (const_int 1)))
++ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 7)))]
+ "")
+
+ (define_split ; lshrqi3_const6
+- [(set (match_operand:QI 0 "d_register_operand" "")
+- (lshiftrt:QI (match_dup 0)
++ [(set (match_operand:ALLQ 0 "d_register_operand" "")
++ (lshiftrt:ALLQ (match_dup 0)
+ (const_int 6)))]
+ ""
+- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
+- (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 2)))
+- (set (match_dup 0) (and:QI (match_dup 0) (const_int 3)))]
++ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
++ (set (match_dup 0) (lshiftrt:ALLQ (match_dup 0) (const_int 2)))
++ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 3)))]
+ "")
+
+ (define_insn "*lshrqi3"
+- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
+- (lshiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
+- (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
++ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,!d,r,r")
++ (lshiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0,0")
++ (match_operand:ALLQ 2 "general_operand" "r,L,P,K,n,n,Qm")))]
+ ""
+ "* return lshrqi3_out (insn, operands, NULL);"
+ [(set_attr "length" "5,0,1,2,4,6,9")
+ (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
+
+-(define_insn "lshrhi3"
+- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
+- (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
++(define_insn "lshr<ALLH:mode>3"
++ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
++ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ "* return lshrhi3_out (insn, operands, NULL);"
+@@ -1929,17 +2011,17 @@
+
+ (define_peephole2
+ [(match_scratch:QI 3 "d")
+- (set (match_operand:HI 0 "register_operand" "")
+- (lshiftrt:HI (match_operand:HI 1 "register_operand" "")
+- (match_operand:QI 2 "const_int_operand" "")))]
++ (set (match_operand:ALLH 0 "register_operand" "")
++ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "")
++ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+- [(parallel [(set (match_dup 0) (lshiftrt:HI (match_dup 1) (match_dup 2)))
++ [(parallel [(set (match_dup 0) (lshiftrt:ALLH (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])]
+ "")
+
+-(define_insn "*lshrhi3_const"
+- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
+- (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
++(define_insn "*lshr<ALLH:mode>3_const"
++ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
++ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+@@ -1957,10 +2039,10 @@
+ (clobber (match_dup 3))])]
+ "")
+
+-(define_insn "*lshrsi3_const"
+- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+- (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
+- (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
++(define_insn "*lshr<ALLS:mode>3_const"
++ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
++ (lshiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
++ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ "* return lshrsi3_out (insn, operands, NULL);"
+@@ -2202,53 +2284,53 @@
+ ;;<=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=>
+ ;; compare
+
+-(define_insn "tstqi"
++(define_insn "tst<ALLQ:mode>"
+ [(set (cc0)
+- (match_operand:QI 0 "register_operand" "r"))]
++ (match_operand:ALLQ 0 "register_operand" "r"))]
+ ""
+ "tst %0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "1")])
+
+-(define_insn "*reversed_tstqi"
++(define_insn "*reversed_tst<ALLQ:mode>"
+ [(set (cc0)
+ (compare (const_int 0)
+- (match_operand:QI 0 "register_operand" "r")))]
++ (match_operand:ALLQ 0 "register_operand" "r")))]
+ ""
+ "cp __zero_reg__,%0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "1")])
+
+-(define_insn "tsthi"
++(define_insn "tst<ALLH:mode>"
+ [(set (cc0)
+- (match_operand:HI 0 "register_operand" "!w,r"))]
++ (match_operand:ALLH 0 "register_operand" "!w,r"))]
+ ""
+ "* return out_tsthi (insn,NULL);"
+ [(set_attr "cc" "compare,compare")
+ (set_attr "length" "1,2")])
+
+-(define_insn "*reversed_tsthi"
++(define_insn "*reversed_tst<ALLH:mode>"
+ [(set (cc0)
+ (compare (const_int 0)
+- (match_operand:HI 0 "register_operand" "r")))]
++ (match_operand:ALLH 0 "register_operand" "r")))]
+ ""
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "2")])
+
+-(define_insn "tstsi"
++(define_insn "tst<ALLS:mode>"
+ [(set (cc0)
+- (match_operand:SI 0 "register_operand" "r"))]
++ (match_operand:ALLS 0 "register_operand" "r"))]
+ ""
+ "* return out_tstsi (insn,NULL);"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "4")])
+
+-(define_insn "*reversed_tstsi"
++(define_insn "*reversed_tst<ALLS:mode>"
+ [(set (cc0)
+ (compare (const_int 0)
+- (match_operand:SI 0 "register_operand" "r")))]
++ (match_operand:ALLS 0 "register_operand" "r")))]
+ ""
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0
+@@ -2258,10 +2340,10 @@
+ (set_attr "length" "4")])
+
+
+-(define_insn "cmpqi"
++(define_insn "cmp<ALLQ:mode>"
+ [(set (cc0)
+- (compare (match_operand:QI 0 "register_operand" "r,d")
+- (match_operand:QI 1 "nonmemory_operand" "r,i")))]
++ (compare (match_operand:ALLQ 0 "register_operand" "r,d")
++ (match_operand:ALLQ 1 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ cp %0,%1
+@@ -2279,10 +2361,10 @@
+ [(set_attr "cc" "compare")
+ (set_attr "length" "1")])
+
+-(define_insn "cmphi"
++(define_insn "cmp<ALLH:mode>"
+ [(set (cc0)
+- (compare (match_operand:HI 0 "register_operand" "r,d,d,r,r")
+- (match_operand:HI 1 "nonmemory_operand" "r,M,i,M,i")))
++ (compare (match_operand:ALLH 0 "register_operand" "r,d,d,r,r")
++ (match_operand:ALLH 1 "nonmemory_operand" "r,M,i,M,i")))
+ (clobber (match_scratch:QI 2 "=X,X,&d,&d,&d"))]
+ ""
+ "*{
+@@ -2324,10 +2406,10 @@
+ (set_attr "length" "2,2,3,3,4")])
+
+
+-(define_insn "cmpsi"
++(define_insn "cmp<ALLS:mode>"
+ [(set (cc0)
+- (compare (match_operand:SI 0 "register_operand" "r,d,d,r,r")
+- (match_operand:SI 1 "nonmemory_operand" "r,M,i,M,i")))
++ (compare (match_operand:ALLS 0 "register_operand" "r,d,d,r,r")
++ (match_operand:ALLS 1 "nonmemory_operand" "r,M,i,M,i")))
+ (clobber (match_scratch:QI 2 "=X,X,&d,&d,&d"))]
+ ""
+ "*{
+@@ -2388,7 +2470,7 @@
+ ; Optimize negated tests into reverse compare if overflow is undefined.
+ (define_insn_and_split "negated_tst<mode>"
+ [(set (cc0)
+- (neg:QISI (match_operand:QISI 0 "register_operand")))]
++ (neg:ALLQS (match_operand:ALLQS 0 "register_operand")))]
+
+ "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
+ "#"
+diff --git a/gcc/config/avr/libgcc-fixed.S b/gcc/config/avr/libgcc-fixed.S
+new file mode 100644
+index 0000000..a694ee6
+--- /dev/null
++++ b/gcc/config/avr/libgcc-fixed.S
+@@ -0,0 +1,1123 @@
++/* -*- Mode: Asm -*- */
++/* Copyright (C) 2009
++ Free Software Foundation, Inc.
++ Contributed by Sean D'Epagnier
++
++This file is free software; you can redistribute it and/or modify it
++under the terms of the GNU General Public License as published by the
++Free Software Foundation; either version 3, or (at your option) any
++later version.
++
++In addition to the permissions in the GNU General Public License, the
++Free Software Foundation gives you unlimited permission to link the
++compiled version of this file into combinations with other programs,
++and to distribute those combinations without any restriction coming
++from the use of this file. (The General Public License restrictions
++do apply in other respects; for example, they cover modification of
++the file, and distribution when not linked into a combine
++executable.)
++
++This file is distributed in the hope that it will be useful, but
++WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this program; see the file COPYING. If not, write to
++the Free Software Foundation, 51 Franklin Street, Fifth Floor,
++Boston, MA 02110-1301, USA. */
++
++/* Fixed point library routines for avr. */
++
++#define __zero_reg__ r1
++#define __tmp_reg__ r0
++#define __SREG__ 0x3f
++#define __SP_H__ 0x3e
++#define __SP_L__ 0x3d
++#define __RAMPZ__ 0x3B
++
++/* Conversions to float. */
++#if defined (L_fractqqsf)
++ .global __fractqqsf
++ .func __fractqqsf
++__fractqqsf:
++ clr r25
++ sbrc r24, 7 ; if negative
++ ser r25 ; sign extend
++ mov r23, r24 ; move in place
++ mov r24, r25 ; sign extend lower byte
++ lsl r23
++ clr r22
++ rjmp __fractsasf ; call larger conversion
++.endfunc
++#endif /* defined (L_fractqqsf) */
++
++#if defined (L_fractuqqsf)
++ .global __fractuqqsf
++ .func __fractuqqsf
++__fractuqqsf:
++ clr r22
++ mov r23, r24
++ clr r24
++ clr r25
++ rjmp __fractsasf ; call larger conversion
++.endfunc
++#endif /* defined (L_fractuqqsf) */
++
++#if defined (L_fracthqsf)
++ .global __fracthqsf
++ .func __fracthqsf
++__fracthqsf:
++ mov_l r22, r24 ; put fractional part in place
++ mov_h r23, r25
++ clr r25
++ sbrc r23, 7 ; if negative
++ ser r25 ; sign extend
++ mov r24, r25 ; sign extend lower byte
++ lsl r22
++ rol r23
++ rjmp __fractsasf ; call larger conversion
++.endfunc
++#endif /* defined (L_fracthqsf) */
++
++#if defined (L_fractuhqsf)
++ .global __fractuhqsf
++ .func __fractuhqsf
++__fractuhqsf:
++ mov_l r22, r24 ; put fractional part in place
++ mov_h r23, r25
++ clr r24
++ clr r25
++ rjmp __fractsasf ; call larger conversion
++.endfunc
++#endif /* defined (L_fractuhqsf) */
++
++#if defined (L_fracthasf)
++ .global __fracthasf
++ .func __fracthasf
++__fracthasf:
++ clr r22
++ mov r23, r24 ; move into place
++ mov r24, r25
++ clr r25
++ sbrc r24, 7 ; if negative
++ ser r25 ; sign extend
++ rjmp __fractsasf ; call larger conversion
++#endif /* defined (L_fracthasf) */
++
++#if defined (L_fractuhasf)
++ .global __fractuhasf
++ .func __fractuhasf
++__fractuhasf:
++ clr r22
++ mov r23, r24 ; move into place
++ rjmp __fractsasf ; call larger conversion
++.endfunc
++#endif /* defined (L_fractuhasf) */
++
++#if defined (L_fractsasf)
++ .global __fractsasf
++ .func __fractsasf
++__fractsasf:
++ rcall __floatsisf
++ tst r25
++ breq __fractsasf_exit ; skip if zero
++ subi r25, 0x08 ; adjust exponent
++__fractsasf_exit:
++ ret
++.endfunc
++#endif /* defined (L_fractsasf) */
++
++#if defined (L_fractusasf)
++ .global __fractusasf
++ .func __fractusasf
++__fractusasf:
++ rcall __floatunsisf
++ tst r25
++ breq __fractusasf_exit ; skip if zero
++ subi r25, 0x08 ; adjust exponent
++__fractusasf_exit:
++ ret
++.endfunc
++#endif /* defined (L_fractusasf) */
++
++#if defined (L_fractsfqq) /* Conversions from float. */
++ .global __fractsfqq
++ .func __fractsfqq
++__fractsfqq:
++ subi r25, -11 ; adjust exponent
++ subi r24, 128
++ rjmp __fixsfsi
++.endfunc
++#endif /* defined (L_fractqq) */
++
++#if defined (L_fractsfuqq)
++ .global __fractsfuqq
++ .func __fractsfuqq
++__fractsfuqq:
++ subi r25, -12 ; adjust exponent
++ rjmp __fixsfsi
++.endfunc
++#endif /* defined (L_fractuqq) */
++
++#if defined (L_fractsfhq)
++ .global __fractsfhq
++ .func __fractsfhq
++__fractsfhq:
++ subi r25, -15 ; adjust exponent
++ subi r24, 128
++ rjmp __fixsfsi
++.endfunc
++#endif /* defined (L_fractsfhq) */
++
++#if defined (L_fractsfuhq)
++ .global __fractsfuhq
++ .func __fractsfuhq
++__fractsfuhq:
++ subi r25, -16 ; adjust exponent
++ rjmp __fixsfsi
++.endfunc
++#endif /* defined (L_fractsfuhq) */
++
++#if defined (L_fractsfha)
++ .global __fractsfha
++ .func __fractsfha
++__fractsfha:
++.endfunc
++ .global __fractsfuha
++ .func __fractsfuha
++__fractsfuha:
++ subi r25, -12 ; adjust exponent
++ rjmp __fixsfsi
++.endfunc
++#endif /* defined (L_fractsfha) */
++
++#if defined (L_fractsfsa)
++ .global __fractsfsa
++ .func __fractsfsa
++__fractsfsa:
++.endfunc
++ .global __fractsfusa
++ .func __fractsfusa
++__fractsfusa:
++ subi r25, -8 ; adjust exponent
++ rjmp __fixsfsi
++.endfunc
++#endif /* defined (L_fractsfsa) */
++
++/* For multiplication the functions here are called directly from
++ avr-fixed.md patterns, instead of using the standard libcall mechanisms.
++ This can make better code because GCC knows exactly which
++ of the call-used registers (not all of them) are clobbered. */
++
++/* mulqq and muluqq open coded on the enhanced core */
++#if !defined (__AVR_HAVE_MUL__)
++/*******************************************************
++ Fractional Multiplication 8 x 8
++*******************************************************/
++#define r_arg2 r22 /* multiplicand */
++#define r_arg1 r24 /* multiplier */
++#define r_res __tmp_reg__ /* result */
++
++#if defined (L_mulqq3)
++ .global __mulqq3
++ .func __mulqq3
++__mulqq3:
++ mov r_res, r_arg1
++ eor r_res, r_arg2
++ bst r_res, 7
++ lsl r_arg1
++ lsl r_arg2
++ brcc __mulqq3_skipneg
++ neg r_arg2
++__mulqq3_skipneg:
++ rcall __muluqq3
++ lsr r_arg1
++ brtc __mulqq3_exit
++ neg r_arg1
++__mulqq3_exit:
++ ret
++
++.endfunc
++#endif /* defined (L_mulqq3) */
++
++#if defined (L_muluqq3)
++ .global __muluqq3
++ .func __muluqq3
++__muluqq3:
++ clr r_res ; clear result
++__muluqq3_loop:
++ lsr r_arg2 ; shift multiplicand
++ sbrc r_arg1,7
++ add r_res,r_arg2
++ breq __muluqq3_exit ; while multiplicand != 0
++ lsl r_arg1
++ brne __muluqq3_loop ; exit if multiplier = 0
++__muluqq3_exit:
++ mov r_arg1,r_res ; result to return register
++ ret
++#undef r_arg2
++#undef r_arg1
++#undef r_res
++
++.endfunc
++#endif /* defined (L_muluqq3) */
++#endif /* !defined (__AVR_HAVE_MUL__) */
++
++/*******************************************************
++ Fractional Multiplication 16 x 16
++*******************************************************/
++
++#if defined (__AVR_HAVE_MUL__)
++#define r_arg1L r22 /* multiplier Low */
++#define r_arg1H r23 /* multiplier High */
++#define r_arg2L r20 /* multiplicand Low */
++#define r_arg2H r21 /* multiplicand High */
++#define r_resL r18 /* result Low */
++#define r_resH r19 /* result High */
++
++#if defined (L_mulhq3)
++ .global __mulhq3
++ .func __mulhq3
++__mulhq3:
++ fmuls r_arg1H, r_arg2H
++ movw r_resL, r0
++ fmulsu r_arg2H, r_arg1L
++ clr r_arg1L
++ sbc r_resH, r_arg1L
++ add r_resL, r1
++ adc r_resH, r_arg1L
++ fmulsu r_arg1H, r_arg2L
++ sbc r_resH, r_arg1L
++ add r_resL, r1
++ adc r_resH, r_arg1L
++ clr __zero_reg__
++ ret
++.endfunc
++#endif /* defined (L_mulhq3) */
++
++#if defined (L_muluhq3)
++ .global __muluhq3
++ .func __muluhq3
++__muluhq3:
++ mul r_arg1H, r_arg2H
++ movw r_resL, r0
++ mul r_arg1H, r_arg2L
++ add r_resL, r1
++ clr __zero_reg__
++ adc r_resH, __zero_reg__
++ mul r_arg1L, r_arg2H
++ add r_resL, r1
++ clr __zero_reg__
++ adc r_resH, __zero_reg__
++ ret
++.endfunc
++#endif /* defined (L_muluhq3) */
++
++#else
++#define r_arg1L r24 /* multiplier Low */
++#define r_arg1H r25 /* multiplier High */
++#define r_arg2L r22 /* multiplicand Low */
++#define r_arg2H r23 /* multiplicand High */
++#define r_resL __tmp_reg__ /* result Low */
++#define r_resH __zero_reg__ /* result High */
++
++#if defined (L_mulhq3)
++ .global __mulhq3
++ .func __mulhq3
++__mulhq3:
++ mov r_resL, r_arg1H
++ eor r_resL, r_arg2H
++ bst r_resL, 7
++ lsl r_arg1L
++ rol r_arg1H
++ lsl r_arg2L
++ rol r_arg2H
++ brcc mulhq3_skipneg
++ com r_arg2H
++ neg r_arg2L
++ sbci r_arg2H, -1
++mulhq3_skipneg:
++ rcall __muluhq3
++ lsr r_arg1H
++ ror r_arg1L
++ brtc mulhq3_exit
++ com r_arg1H
++ neg r_arg1L
++ sbci r_arg1H, -1
++mulhq3_exit:
++ ret
++.endfunc
++#endif /* defined (L_mulhq3) */
++
++#if defined (L_muluhq3)
++ .global __muluhq3
++ .func __muluhq3
++__muluhq3:
++ clr r_resL ; clear result
++__muluhq3_loop:
++ lsr r_arg2H ; shift multiplicand
++ ror r_arg2L
++ sbrs r_arg1H,7
++ rjmp __muluhq3_skip
++ add r_resL,r_arg2L ; result + multiplicand
++ adc r_resH,r_arg2H
++__muluhq3_skip:
++ lsl r_arg1L ; shift multiplier
++ rol r_arg1H
++ brne __muluhq3_loop
++ cpi r_arg1L, 0
++ brne __muluhq3_loop ; exit multiplier = 0
++ mov_l r_arg1L,r_resL
++ mov_h r_arg1H,r_resH ; result to return register
++ clr __zero_reg__ ; zero the zero reg
++ ret
++.endfunc
++#endif /* defined (L_muluhq3) */
++
++#endif /* defined (__AVR_HAVE_MUL__) */
++
++#undef r_arg1L
++#undef r_arg1H
++#undef r_arg2L
++#undef r_arg2H
++#undef r_resL
++#undef r_resH
++
++/*******************************************************
++ Fixed Multiplication 8.8 x 8.8
++*******************************************************/
++
++#if defined (__AVR_HAVE_MUL__)
++#define r_arg1L r22 /* multiplier Low */
++#define r_arg1H r23 /* multiplier High */
++#define r_arg2L r20 /* multiplicand Low */
++#define r_arg2H r21 /* multiplicand High */
++#define r_resL r18 /* result Low */
++#define r_resH r19 /* result High */
++
++#if defined (L_mulha3)
++ .global __mulha3
++ .func __mulha3
++__mulha3:
++ mul r_arg1L, r_arg2L
++ mov r_resL, r1
++ muls r_arg1H, r_arg2H
++ mov r_resH, r0
++ mulsu r_arg1H, r_arg2L
++ add r_resL, r0
++ adc r_resH, r1
++ mulsu r_arg2H, r_arg1L
++ add r_resL, r0
++ adc r_resH, r1
++ clr __zero_reg__
++ ret
++.endfunc
++#endif /* defined (L_mulha3) */
++
++#if defined (L_muluha3)
++ .global __muluha3
++ .func __muluha3
++__muluha3:
++ mul r_arg1L, r_arg2L
++ mov r_resL, r1
++ mul r_arg1H, r_arg2H
++ mov r_resH, r0
++ mul r_arg1H, r_arg2L
++ add r_resL, r0
++ adc r_resH, r1
++ mul r_arg1L, r_arg2H
++ add r_resL, r0
++ adc r_resH, r1
++ clr __zero_reg__
++ ret
++.endfunc
++#endif /* defined (L_muluha3) */
++
++#else
++
++#define r_arg1L r24 /* multiplier Low */
++#define r_arg1H r25 /* multiplier High */
++#define r_arg2L r22 /* multiplicand Low */
++#define r_arg2H r23 /* multiplicand High */
++#define r_resL r18 /* result Low */
++#define r_resH r19 /* result High */
++#define r_scratchL r0 /* scratch Low */
++#define r_scratchH r1
++
++#if defined (L_mulha3)
++ .global __mulha3
++ .func __mulha3
++__mulha3:
++ mov r_resL, r_arg1H
++ eor r_resL, r_arg2H
++ bst r_resL, 7
++ sbrs r_arg1H, 7
++ rjmp __mulha3_arg1pos
++ com r_arg1H
++ neg r_arg1L
++ sbci r_arg1H,-1
++__mulha3_arg1pos:
++ sbrs r_arg2H, 7
++ rjmp __mulha3_arg2pos
++ com r_arg2H
++ neg r_arg2L
++ sbci r_arg2H,-1
++__mulha3_arg2pos:
++ rcall __muluha3
++ brtc __mulha3_exit
++ com r_resH
++ neg r_resL
++ sbci r_resH,-1
++__mulha3_exit:
++ ret
++.endfunc
++#endif /* defined (L_mulha3) */
++
++#if defined (L_muluha3)
++ .global __muluha3
++ .func __muluha3
++__muluha3:
++ clr r_resL ; clear result
++ clr r_resH
++ mov_l r0, r_arg1L ; save multiplicand
++ mov_h r1, r_arg1H
++__muluha3_loop1:
++ sbrs r_arg2H,0
++ rjmp __muluha3_skip1
++ add r_resL,r_arg1L ; result + multiplicand
++ adc r_resH,r_arg1H
++__muluha3_skip1:
++ lsl r_arg1L ; shift multiplicand
++ rol r_arg1H
++ sbiw r_arg1L,0
++ breq __muluha3_loop1_done ; exit multiplicand = 0
++ lsr r_arg2H
++ brne __muluha3_loop1 ; exit multiplier = 0
++__muluha3_loop1_done:
++ mov_l r_arg1L, r_scratchL ; restore multiplicand
++ mov_h r_arg1H, r_scratchH
++__muluha3_loop2:
++ lsr r_arg1H ; shift multiplicand
++ ror r_arg1L
++ sbiw r_arg1L,0
++ breq __muluha3_exit ; exit if multiplicand = 0
++ sbrs r_arg2L,7
++ rjmp __muluha3_skip2
++ add r_resL,r_arg1L ; result + multiplicand
++ adc r_resH,r_arg1H
++__muluha3_skip2:
++ lsl r_arg2L
++ brne __muluha3_loop2 ; exit if multiplier = 0
++__muluha3_exit:
++ clr __zero_reg__ ; got clobbered
++ ret
++.endfunc
++#endif /* defined (L_muluha3) */
++
++#endif /* defined (__AVR_HAVE_MUL__) */
++
++#undef r_arg1L
++#undef r_arg1H
++#undef r_arg2L
++#undef r_arg2H
++#undef r_resL
++#undef r_resH
++
++/*******************************************************
++ Fixed Multiplication 16.16 x 16.16
++*******************************************************/
++
++#if defined (__AVR_HAVE_MUL__)
++/* uses nonstandard registers because mulus only works from 16-23 */
++#define r_clr r15
++
++#define r_arg1L r16 /* multiplier Low */
++#define r_arg1H r17
++#define r_arg1HL r18
++#define r_arg1HH r19 /* multiplier High */
++
++#define r_arg2L r20 /* multiplicand Low */
++#define r_arg2H r21
++#define r_arg2HL r22
++#define r_arg2HH r23 /* multiplicand High */
++
++#define r_resL r24 /* result Low */
++#define r_resH r25
++#define r_resHL r26
++#define r_resHH r27 /* result High */
++
++#if defined (L_mulsa3)
++ .global __mulsa3
++ .func __mulsa3
++__mulsa3:
++ clr r_clr
++ clr r_resH
++ clr r_resHL
++ clr r_resHH
++ mul r_arg1H, r_arg2L
++ mov r_resL, r1
++ mul r_arg1L, r_arg2H
++ add r_resL, r1
++ adc r_resH, r_clr
++ mul r_arg1L, r_arg2HL
++ add r_resL, r0
++ adc r_resH, r1
++ adc r_resHL, r_clr
++ mul r_arg1H, r_arg2H
++ add r_resL, r0
++ adc r_resH, r1
++ adc r_resHL, r_clr
++ mul r_arg1HL, r_arg2L
++ add r_resL, r0
++ adc r_resH, r1
++ adc r_resHL, r_clr
++ mulsu r_arg2HH, r_arg1L
++ sbc r_resHH, r_clr
++ add r_resH, r0
++ adc r_resHL, r1
++ adc r_resHH, r_clr
++ mul r_arg1H, r_arg2HL
++ add r_resH, r0
++ adc r_resHL, r1
++ adc r_resHH, r_clr
++ mul r_arg1HL, r_arg2H
++ add r_resH, r0
++ adc r_resHL, r1
++ adc r_resHH, r_clr
++ mulsu r_arg1HH, r_arg2L
++ sbc r_resHH, r_clr
++ add r_resH, r0
++ adc r_resHL, r1
++ adc r_resHH, r_clr
++ mulsu r_arg2HH, r_arg1H
++ add r_resHL, r0
++ adc r_resHH, r1
++ mul r_arg1HL, r_arg2HL
++ add r_resHL, r0
++ adc r_resHH, r1
++ mulsu r_arg1HH, r_arg2H
++ add r_resHL, r0
++ adc r_resHH, r1
++ mulsu r_arg2HH, r_arg1HL
++ add r_resHH, r0
++ mulsu r_arg1HH, r_arg2HL
++ add r_resHH, r0
++ clr __zero_reg__
++ ret
++.endfunc
++#endif
++
++#if defined (L_mulusa3)
++ .global __mulusa3
++ .func __mulusa3
++__mulusa3:
++ clr r_clr
++ clr r_resH
++ clr r_resHL
++ clr r_resHH
++ mul r_arg1H, r_arg2L
++ mov r_resL, r1
++ mul r_arg1L, r_arg2H
++ add r_resL, r1
++ adc r_resH, r_clr
++ mul r_arg1L, r_arg2HL
++ add r_resL, r0
++ adc r_resH, r1
++ adc r_resHL, r_clr
++ mul r_arg1H, r_arg2H
++ add r_resL, r0
++ adc r_resH, r1
++ adc r_resHL, r_clr
++ mul r_arg1HL, r_arg2L
++ add r_resL, r0
++ adc r_resH, r1
++ adc r_resHL, r_clr
++ mul r_arg1L, r_arg2HH
++ add r_resH, r0
++ adc r_resHL, r1
++ adc r_resHH, r_clr
++ mul r_arg1H, r_arg2HL
++ add r_resH, r0
++ adc r_resHL, r1
++ adc r_resHH, r_clr
++ mul r_arg1HL, r_arg2H
++ add r_resH, r0
++ adc r_resHL, r1
++ adc r_resHH, r_clr
++ mul r_arg1HH, r_arg2L
++ add r_resH, r0
++ adc r_resHL, r1
++ adc r_resHH, r_clr
++ mul r_arg1H, r_arg2HH
++ add r_resHL, r0
++ adc r_resHH, r1
++ mul r_arg1HL, r_arg2HL
++ add r_resHL, r0
++ adc r_resHH, r1
++ mul r_arg1HH, r_arg2H
++ add r_resHL, r0
++ adc r_resHH, r1
++ mul r_arg1HL, r_arg2HH
++ add r_resHH, r0
++ mul r_arg1HH, r_arg2HL
++ add r_resHH, r0
++ clr __zero_reg__
++ ret
++.endfunc
++#endif
++
++#else
++
++#define r_arg1L r18 /* multiplier Low */
++#define r_arg1H r19
++#define r_arg1HL r20
++#define r_arg1HH r21 /* multiplier High */
++
++/* these registers needed for sbiw */
++#define r_arg2L r24 /* multiplicand Low */
++#define r_arg2H r25
++#define r_arg2HL r26
++#define r_arg2HH r27 /* multiplicand High */
++
++#define r_resL r14 /* result Low */
++#define r_resH r15
++#define r_resHL r16
++#define r_resHH r17 /* result High */
++
++#define r_scratchL r0 /* scratch Low */
++#define r_scratchH r1
++#define r_scratchHL r22
++#define r_scratchHH r23 /* scratch High */
++
++#if defined (L_mulsa3)
++ .global __mulsa3
++ .func __mulsa3
++__mulsa3:
++ mov r_resL, r_arg1HH
++ eor r_resL, r_arg2HH
++ bst r_resL, 7
++ sbrs r_arg1HH, 7
++ rjmp __mulsa3_arg1pos
++ com r_arg1HH
++ com r_arg1HL
++ com r_arg1H
++ neg r_arg1L
++ sbci r_arg1H,-1
++ sbci r_arg1HL,-1
++ sbci r_arg1HH,-1
++__mulsa3_arg1pos:
++ sbrs r_arg2HH, 7
++ rjmp __mulsa3_arg2pos
++ com r_arg2HH
++ com r_arg2HL
++ com r_arg2H
++ neg r_arg2L
++ sbci r_arg2H,-1
++ sbci r_arg2HL,-1
++ sbci r_arg2HH,-1
++__mulsa3_arg2pos:
++ rcall __mulusa3
++ brtc __mulsa3_exit
++ com r_resHH
++ com r_resHL
++ com r_resH
++ com r_resL
++ adc r_resL,__zero_reg__
++ adc r_resH,__zero_reg__
++ adc r_resHL,__zero_reg__
++ adc r_resHH,__zero_reg__
++__mulsa3_exit:
++ ret
++.endfunc
++#endif /* defined (L_mulsa3) */
++
++#if defined (L_mulusa3)
++ .global __mulusa3
++ .func __mulusa3
++__mulusa3:
++ clr r_resL ; clear result
++ clr r_resH
++ mov_l r_resHL, r_resL
++ mov_h r_resHH, r_resH
++ mov_l r_scratchL, r_arg1L ; save multiplicand
++ mov_h r_scratchH, r_arg1H
++ mov_l r_scratchHL, r_arg1HL
++ mov_h r_scratchHH, r_arg1HH
++__mulusa3_loop1:
++ sbrs r_arg2HL,0
++ rjmp __mulusa3_skip1
++ add r_resL,r_arg1L ; result + multiplicand
++ adc r_resH,r_arg1H
++ adc r_resHL,r_arg1HL
++ adc r_resHH,r_arg1HH
++__mulusa3_skip1:
++ lsl r_arg1L ; shift multiplicand
++ rol r_arg1H
++ rol r_arg1HL
++ rol r_arg1HH
++ lsr r_arg2HH
++ ror r_arg2HL
++ sbiw r_arg2HL,0
++ brne __mulusa3_loop1 ; exit multiplier = 0
++__mulusa3_loop1_done:
++ mov_l r_arg1L, r_scratchL ; restore multiplicand
++ mov_h r_arg1H, r_scratchH
++ mov_l r_arg1HL, r_scratchHL
++ mov_h r_arg1HH, r_scratchHH
++__mulusa3_loop2:
++ lsr r_arg1HH ; shift multiplicand
++ ror r_arg1HL
++ ror r_arg1H
++ ror r_arg1L
++ sbrs r_arg2H,7
++ rjmp __mulusa3_skip2
++ add r_resL,r_arg1L ; result + multiplicand
++ adc r_resH,r_arg1H
++ adc r_resHL,r_arg1HL
++ adc r_resHH,r_arg1HH
++__mulusa3_skip2:
++ lsl r_arg2L
++ rol r_arg2H
++ sbiw r_arg2L,0
++ brne __mulusa3_loop2 ; exit if multiplier = 0
++__mulusa3_exit:
++ clr __zero_reg__ ; got clobbered
++ ret
++.endfunc
++#endif /* defined (L_mulusa3) */
++
++#undef r_scratchL
++#undef r_scratchH
++#undef r_scratchHL
++#undef r_scratchHH
++
++#endif
++
++#undef r_arg1L
++#undef r_arg1H
++#undef r_arg1HL
++#undef r_arg1HH
++
++#undef r_arg2L
++#undef r_arg2H
++#undef r_arg2HL
++#undef r_arg2HH
++
++#undef r_resL
++#undef r_resH
++#undef r_resHL
++#undef r_resHH
++
++/*******************************************************
++ Fractional Division 8 / 8
++*******************************************************/
++#define r_divd r25 /* dividend */
++#define r_quo r24 /* quotient */
++#define r_div r22 /* divisor */
++#define r_cnt r23 /* loop count */
++
++#if defined (L_divqq3)
++ .global __divqq3
++ .func __divqq3
++__divqq3:
++ mov r0, r_divd
++ eor r0, r_div
++ sbrc r_div, 7
++ neg r_div
++ sbrc r_divd, 7
++ neg r_divd
++ cp r_divd, r_div
++ breq __divqq3_minus1 ; if equal return -1
++ rcall __udivuqq3
++ lsr r_quo
++ sbrc r0, 7 ; negate result if needed
++ neg r_quo
++ ret
++__divqq3_minus1:
++ ldi r_quo, 0x80
++ ret
++.endfunc
++#endif /* defined (L_divqq3) */
++
++#if defined (L_udivuqq3)
++ .global __udivuqq3
++ .func __udivuqq3
++__udivuqq3:
++ clr r_quo ; clear quotient
++ ldi r_cnt,8 ; init loop counter
++__udivuqq3_loop:
++ lsl r_divd ; shift dividend
++ brcs __udivuqq3_ep ; dividend overflow
++ cp r_divd,r_div ; compare dividend & divisor
++ brcc __udivuqq3_ep ; dividend >= divisor
++ rol r_quo ; shift quotient (with CARRY)
++ rjmp __udivuqq3_cont
++__udivuqq3_ep:
++ sub r_divd,r_div ; restore dividend
++ lsl r_quo ; shift quotient (without CARRY)
++__udivuqq3_cont:
++ dec r_cnt ; decrement loop counter
++ brne __udivuqq3_loop
++ com r_quo ; complement result
++ ; because C flag was complemented in loop
++ ret
++.endfunc
++#endif /* defined (L_udivuqq3) */
++
++#undef r_divd
++#undef r_quo
++#undef r_div
++#undef r_cnt
++
++
++/*******************************************************
++ Fractional Division 16 / 16
++*******************************************************/
++#define r_divdL r26 /* dividend Low */
++#define r_divdH r27 /* dividend Hig */
++#define r_quoL r24 /* quotient Low */
++#define r_quoH r25 /* quotient High */
++#define r_divL r22 /* divisor */
++#define r_divH r23 /* divisor */
++#define r_cnt 21
++
++#if defined (L_divhq3)
++ .global __divhq3
++ .func __divhq3
++__divhq3:
++ mov r0, r_divdH
++ eor r0, r_divH
++ sbrs r_divH, 7
++ rjmp __divhq3_divpos
++ com r_divH
++ neg r_divL
++ sbci r_divH,-1
++__divhq3_divpos:
++ sbrs r_divdH, 7
++ rjmp __divhq3_divdpos
++ com r_divdH
++ neg r_divdL
++ sbci r_divdH,-1
++__divhq3_divdpos:
++ cp r_divdL, r_divL
++ cpc r_divdH, r_divH
++ breq __divhq3_minus1 ; if equal return -1
++ rcall __udivuhq3
++ lsr r_quoH
++ ror r_quoL
++ sbrs r0, 7 ; negate result if needed
++ ret
++ com r_quoH
++ neg r_quoL
++ sbci r_quoH,-1
++ ret
++__divhq3_minus1:
++ ldi r_quoH, 0x80
++ clr r_quoL
++ ret
++.endfunc
++#endif /* defined (L_divhq3) */
++
++#if defined (L_udivuhq3)
++ .global __udivuhq3
++ .func __udivuhq3
++__udivuhq3:
++ sub r_quoH,r_quoH ; clear quotient and carry
++ .global __udivuha3_entry
++__udivuha3_entry:
++ clr r_quoL ; clear quotient
++ ldi r_cnt,16 ; init loop counter
++__udivuhq3_loop:
++ rol r_divdL ; shift dividend (with CARRY)
++ rol r_divdH
++ brcs __udivuhq3_ep ; dividend overflow
++ cp r_divdL,r_divL ; compare dividend & divisor
++ cpc r_divdH,r_divH
++ brcc __udivuhq3_ep ; dividend >= divisor
++ rol r_quoL ; shift quotient (with CARRY)
++ rjmp __udivuhq3_cont
++__udivuhq3_ep:
++ sub r_divdL,r_divL ; restore dividend
++ sbc r_divdH,r_divH
++ lsl r_quoL ; shift quotient (without CARRY)
++__udivuhq3_cont:
++ rol r_quoH ; shift quotient
++ dec r_cnt ; decrement loop counter
++ brne __udivuhq3_loop
++ com r_quoL ; complement result
++ com r_quoH ; because C flag was complemented in loop
++ ret
++.endfunc
++#endif /* defined (L_udivuhq3) */
++
++/*******************************************************
++ Fixed Division 8.8 / 8.8
++*******************************************************/
++#if defined (L_divha3)
++ .global __divha3
++ .func __divha3
++__divha3:
++ mov r0, r_divdH
++ eor r0, r_divH
++ sbrs r_divH, 7
++ rjmp __divha3_divpos
++ com r_divH
++ neg r_divL
++ sbci r_divH,-1
++__divha3_divpos:
++ sbrs r_divdH, 7
++ rjmp __divha3_divdpos
++ com r_divdH
++ neg r_divdL
++ sbci r_divdH,-1
++__divha3_divdpos:
++ rcall __udivuha3
++ sbrs r0, 7 ; negate result if needed
++ ret
++ com r_quoH
++ neg r_quoL
++ sbci r_quoH,-1
++ ret
++.endfunc
++#endif /* defined (L_divha3) */
++
++#if defined (L_udivuha3)
++ .global __udivuha3
++ .func __udivuha3
++__udivuha3:
++ mov r_quoH, r_divdL
++ mov r_divdL, r_divdH
++ clr r_divdH
++ lsl r_quoH ; shift quotient into carry
++ rjmp __udivuha3_entry ; same as fractional after rearrange
++.endfunc
++#endif /* defined (L_udivuha3) */
++
++#undef r_divdL
++#undef r_divdH
++#undef r_quoL
++#undef r_quoH
++#undef r_divL
++#undef r_divH
++#undef r_cnt
++
++/*******************************************************
++ Fixed Division 16.16 / 16.16
++*******************************************************/
++#define r_arg1L r24 /* arg1 gets passed already in place */
++#define r_arg1H r25
++#define r_arg1HL r26
++#define r_arg1HH r27
++#define r_divdL r26 /* dividend Low */
++#define r_divdH r27
++#define r_divdHL r30
++#define r_divdHH r31 /* dividend High */
++#define r_quoL r22 /* quotient Low */
++#define r_quoH r23
++#define r_quoHL r24
++#define r_quoHH r25 /* quotient High */
++#define r_divL r18 /* divisor Low */
++#define r_divH r19
++#define r_divHL r20
++#define r_divHH r21 /* divisor High */
++#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
++
++#if defined (L_divsa3)
++ .global __divsa3
++ .func __divsa3
++__divsa3:
++ mov r0, r27
++ eor r0, r_divHH
++ sbrs r_divHH, 7
++ rjmp __divsa3_divpos
++ com r_divHH
++ com r_divHL
++ com r_divH
++ neg r_divL
++ sbci r_divH,-1
++ sbci r_divHL,-1
++ sbci r_divHH,-1
++__divsa3_divpos:
++ sbrs r_arg1HH, 7
++ rjmp __divsa3_arg1pos
++ com r_arg1HH
++ com r_arg1HL
++ com r_arg1H
++ neg r_arg1L
++ sbci r_arg1H,-1
++ sbci r_arg1HL,-1
++ sbci r_arg1HH,-1
++__divsa3_arg1pos:
++ rcall __udivusa3
++ sbrs r0, 7 ; negate result if needed
++ ret
++ com r_quoHH
++ com r_quoHL
++ com r_quoH
++ neg r_quoL
++ sbci r_quoH,-1
++ sbci r_quoHL,-1
++ sbci r_quoHH,-1
++ ret
++.endfunc
++#endif /* defined (L_divsa3) */
++
++#if defined (L_udivusa3)
++ .global __udivusa3
++ .func __udivusa3
++__udivusa3:
++ ldi r_divdHL, 32 ; init loop counter
++ mov r_cnt, r_divdHL
++ clr r_divdHL
++ clr r_divdHH
++ mov_l r_quoL, r_divdHL
++ mov_h r_quoH, r_divdHH
++ lsl r_quoHL ; shift quotient into carry
++ rol r_quoHH
++__udivusa3_loop:
++ rol r_divdL ; shift dividend (with CARRY)
++ rol r_divdH
++ rol r_divdHL
++ rol r_divdHH
++ brcs __udivusa3_ep ; dividend overflow
++ cp r_divdL,r_divL ; compare dividend & divisor
++ cpc r_divdH,r_divH
++ cpc r_divdHL,r_divHL
++ cpc r_divdHH,r_divHH
++ brcc __udivusa3_ep ; dividend >= divisor
++ rol r_quoL ; shift quotient (with CARRY)
++ rjmp __udivusa3_cont
++__udivusa3_ep:
++ sub r_divdL,r_divL ; restore dividend
++ sbc r_divdH,r_divH
++ sbc r_divdHL,r_divHL
++ sbc r_divdHH,r_divHH
++ lsl r_quoL ; shift quotient (without CARRY)
++__udivusa3_cont:
++ rol r_quoH ; shift quotient
++ rol r_quoHL
++ rol r_quoHH
++ dec r_cnt ; decrement loop counter
++ brne __udivusa3_loop
++ com r_quoL ; complement result
++ com r_quoH ; because C flag was complemented in loop
++ com r_quoHL
++ com r_quoHH
++ ret
++.endfunc
++#endif /* defined (L_udivusa3) */
++
++#undef r_divdL
++#undef r_divdH
++#undef r_divdHL
++#undef r_divdHH
++#undef r_quoL
++#undef r_quoH
++#undef r_quoHL
++#undef r_quoHH
++#undef r_divL
++#undef r_divH
++#undef r_divHL
++#undef r_divHH
++#undef r_cnt
+diff --git a/gcc/config/avr/libgcc.S b/gcc/config/avr/libgcc.S
+index b6262b3..76fdd9f 100644
+--- a/gcc/config/avr/libgcc.S
++++ b/gcc/config/avr/libgcc.S
+@@ -162,6 +162,23 @@ __mulhi3_exit:
+ .global __mulhisi3
+ .func __mulhisi3
+ __mulhisi3:
++#if defined (__AVR_HAVE_MUL__)
++ muls r21, r19
++ movw r24, r0
++ mul r20, r18
++ movw r22, r0
++ mulsu r21, r18
++ add r23, r0
++ adc r24, r1
++ clr r1
++ adc r25, r1
++ mulsu r19, r20
++ add r23, r0
++ adc r24, r1
++ clr r1
++ adc r25, r1
++ ret
++#else
+ mov_l r18, r24
+ mov_h r19, r25
+ clr r24
+@@ -173,6 +190,7 @@ __mulhisi3:
+ dec r20
+ mov r21, r20
+ rjmp __mulsi3
++#endif /* defined (__AVR_HAVE_MUL__) */
+ .endfunc
+ #endif /* defined (L_mulhisi3) */
+
+@@ -180,13 +198,31 @@ __mulhisi3:
+ .global __umulhisi3
+ .func __umulhisi3
+ __umulhisi3:
+- mov_l r18, r24
+- mov_h r19, r25
++#if defined (__AVR_HAVE_MUL__)
++ mul r21, r19
++ movw r24, r0
++ mul r20, r18
++ movw r22, r0
++ mul r21, r18
++ add r23, r0
++ adc r24, r1
++ clr r1
++ adc r25, r1
++ mul r19, r20
++ add r23, r0
++ adc r24, r1
++ clr r1
++ adc r25, r1
++ ret
++#else
++ mov_l r22, r20
++ mov_h r23, r21
+ clr r24
+ clr r25
+ clr r20
+ clr r21
+ rjmp __mulsi3
++#endif
+ .endfunc
+ #endif /* defined (L_umulhisi3) */
+
+@@ -199,7 +235,6 @@ __umulhisi3:
+ #define r_arg1HL r24
+ #define r_arg1HH r25 /* multiplier High */
+
+-
+ #define r_arg2L r18 /* multiplicand Low */
+ #define r_arg2H r19
+ #define r_arg2HL r20
+@@ -555,6 +590,23 @@ __divmodsi4_neg1:
+ .endfunc
+ #endif /* defined (L_divmodsi4) */
+
++#undef r_remHH
++#undef r_remHL
++#undef r_remH
++#undef r_remL
++
++#undef r_arg1HH
++#undef r_arg1HL
++#undef r_arg1H
++#undef r_arg1L
++
++#undef r_arg2HH
++#undef r_arg2HL
++#undef r_arg2H
++#undef r_arg2L
++
++#undef r_cnt
++
+ /**********************************
+ * This is a prologue subroutine
+ **********************************/
+@@ -897,3 +949,4 @@ __tablejump_elpm__:
+ .endfunc
+ #endif /* defined (L_tablejump_elpm) */
+
++#include "libgcc-fixed.S"
+diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr
+index 7513b3d..ef6b7ae 100644
+--- a/gcc/config/avr/t-avr
++++ b/gcc/config/avr/t-avr
+@@ -2,6 +2,8 @@ LIB1ASMSRC = avr/libgcc.S
+ LIB1ASMFUNCS = \
+ _mulqi3 \
+ _mulhi3 \
++ _mulhisi3 \
++ _umulhisi3 \
+ _mulsi3 \
+ _udivmodqi4 \
+ _divmodqi4 \
+@@ -20,6 +22,39 @@ LIB1ASMFUNCS = \
+ _ctors \
+ _dtors
+
++# Fixed point routines
++LIB1ASMFUNCS += \
++ _fractqqsf \
++ _fractuqqsf \
++ _fracthqsf \
++ _fractuhqsf \
++ _fracthasf \
++ _fractuhasf \
++ _fractsasf \
++ _fractusasf \
++ _fractsfqq \
++ _fractsfuqq \
++ _fractsfhq \
++ _fractsfuhq \
++ _fractsfha \
++ _fractsfsa \
++ _mulqq3 \
++ _muluqq3 \
++ _mulhq3 \
++ _muluhq3 \
++ _mulha3 \
++ _muluha3 \
++ _mulsa3 \
++ _mulusa3 \
++ _divqq3 \
++ _udivuqq3 \
++ _divhq3 \
++ _udivuhq3 \
++ _divha3 \
++ _udivuha3 \
++ _divsa3 \
++ _udivusa3
++
+ # We do not have the DF type.
+ # Most of the C functions in libgcc2 use almost all registers,
+ # so use -mcall-prologues for smaller code size.
+diff --git a/gcc/cse.c b/gcc/cse.c
+index c16181e..8f3634a 100644
+--- a/gcc/cse.c
++++ b/gcc/cse.c
+@@ -3511,9 +3511,10 @@ fold_rtx (rtx x, rtx insn)
+ && exact_log2 (- INTVAL (const_arg1)) >= 0)))
+ break;
+
+- /* ??? Vector mode shifts by scalar
++ /* ??? Vector and Fixed Point shifts by scalar
+ shift operand are not supported yet. */
+- if (is_shift && VECTOR_MODE_P (mode))
++ if (is_shift && (VECTOR_MODE_P (mode)
++ || ALL_FIXED_POINT_MODE_P (mode)))
+ break;
+
+ if (is_shift
+diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
+index 7001ccb..602002b 100644
+--- a/gcc/dwarf2out.c
++++ b/gcc/dwarf2out.c
+@@ -9368,6 +9368,12 @@ base_type_die (tree type)
+
+ add_AT_unsigned (base_type_result, DW_AT_byte_size,
+ int_size_in_bytes (type));
++
++ /* version 3 dwarf specifies that for fixed-point types DW_AT_binary_scale
++ describes the location of the decimal place */
++ if (TREE_CODE (type) == FIXED_POINT_TYPE)
++ add_AT_int (base_type_result, DW_AT_binary_scale, -TYPE_FBIT (type));
++
+ add_AT_unsigned (base_type_result, DW_AT_encoding, encoding);
+
+ return base_type_result;
+@@ -11268,6 +11274,13 @@ add_const_value_attribute (dw_die_ref die, rtx rtl)
+ }
+ break;
+
++ case CONST_FIXED:
++ {
++ add_AT_long_long (die, DW_AT_const_value,
++ CONST_FIXED_VALUE_HIGH (rtl), CONST_FIXED_VALUE_LOW (rtl));
++ }
++ break;
++
+ case CONST_VECTOR:
+ {
+ enum machine_mode mode = GET_MODE (rtl);
+diff --git a/gcc/fold-const.c b/gcc/fold-const.c
+index 1926659..14f67ae 100644
+--- a/gcc/fold-const.c
++++ b/gcc/fold-const.c
+@@ -11859,6 +11859,11 @@ fold_binary (enum tree_code code, tree type, tree op0, tree op1)
+ if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
+ return NULL_TREE;
+
++ /* Since fixed point types cannot perform bitwise and, or, etc..
++ don't try to convert to an expression with them. */
++ if (TREE_CODE(type) == FIXED_POINT_TYPE)
++ return NULL_TREE;
++
+ /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
+ if (TREE_CODE (op0) == code && host_integerp (arg1, false)
+ && TREE_INT_CST_LOW (arg1) < TYPE_PRECISION (type)
+diff --git a/gcc/varasm.c b/gcc/varasm.c
+index 9385b47..5f2bcb2 100644
+--- a/gcc/varasm.c
++++ b/gcc/varasm.c
+@@ -2639,7 +2639,7 @@ assemble_integer (rtx x, unsigned int size, unsigned int align, int force)
+ else
+ mclass = MODE_INT;
+
+- omode = mode_for_size (subsize * BITS_PER_UNIT, mclass, 0);
++ omode = mode_for_size (subsize * BITS_PER_UNIT, MODE_INT, 0);
+ imode = mode_for_size (size * BITS_PER_UNIT, mclass, 0);
+
+ for (i = 0; i < size; i += subsize)
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0002-Fix-incomplete-check-in-RTL-for-pm-annotation.patch b/patches/gcc-4.4.3/atmel/0002-Fix-incomplete-check-in-RTL-for-pm-annotation.patch
new file mode 100644
index 0000000..e8995b6
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0002-Fix-incomplete-check-in-RTL-for-pm-annotation.patch
@@ -0,0 +1,89 @@
+From 78d4497ba8df93b225e3303d96334186fa7945ae Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Tue, 19 Apr 2011 22:39:33 +0200
+Subject: [PATCH 02/10] Fix incomplete check in RTL for pm() annotation
+
+Backend (avr part) creates special "pm()" annotation by looking at
+RTL. This is done in avr.c by function avr_assemble_integer(). This
+patch allows function address expressions of the form address+k to
+be correctly recognized as program memory addresses and thus force
+use of pm() assembler syntax.
+
+GCC bug #35013 report:
+http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35013
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/40-gcc-4.4.3-bug-35013.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr-protos.h | 1 +
+ gcc/config/avr/avr.c | 26 ++++++++++++++++++++++----
+ 2 files changed, 23 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/config/avr/avr-protos.h b/gcc/config/avr/avr-protos.h
+index d61ac39..c300c89 100644
+--- a/gcc/config/avr/avr-protos.h
++++ b/gcc/config/avr/avr-protos.h
+@@ -114,6 +114,7 @@ extern int reg_unused_after (rtx insn, rtx reg);
+ extern int _reg_unused_after (rtx insn, rtx reg);
+ extern int avr_jump_mode (rtx x, rtx insn);
+ extern int byte_immediate_operand (rtx op, enum machine_mode mode);
++extern int text_segment_operand (rtx op, enum machine_mode mode);
+ extern int test_hard_reg_class (enum reg_class rclass, rtx x);
+ extern int jump_over_one_insn_p (rtx insn, rtx dest);
+
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index 4351e59..4a2588d 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -1276,8 +1276,7 @@ print_operand_address (FILE *file, rtx addr)
+
+ default:
+ if (CONSTANT_ADDRESS_P (addr)
+- && ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (addr))
+- || GET_CODE (addr) == LABEL_REF))
++ && text_segment_operand (addr, VOIDmode))
+ {
+ fprintf (file, "gs(");
+ output_addr_const (file,addr);
+@@ -1593,6 +1592,26 @@ byte_immediate_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+ && INTVAL (op) <= 0xff && INTVAL (op) >= 0);
+ }
+
++/* Return true if OP is a program memory reference.*/
++int
++text_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
++{
++ switch (GET_CODE (op))
++ {
++ case LABEL_REF :
++ return true;
++ case SYMBOL_REF :
++ return SYMBOL_REF_FUNCTION_P (op);
++ case PLUS :
++ /* Assume canonical format of symbol + constant.
++ Fall through. */
++ case CONST :
++ return text_segment_operand (XEXP (op, 0), VOIDmode);
++ default :
++ return false;
++ }
++}
++
+ /* Output all insn addresses and their sizes into the assembly language
+ output file. This is helpful for debugging whether the length attributes
+ in the md file are correct.
+@@ -4808,8 +4827,7 @@ static bool
+ avr_assemble_integer (rtx x, unsigned int size, int aligned_p)
+ {
+ if (size == POINTER_SIZE / BITS_PER_UNIT && aligned_p
+- && ((GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (x))
+- || GET_CODE (x) == LABEL_REF))
++ && text_segment_operand (x, VOIDmode) )
+ {
+ fputs ("\t.word\tgs(", asm_out_file);
+ output_addr_const (asm_out_file, x);
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0003-Fix-handling-of-empty-.data-or-.bss-section.patch b/patches/gcc-4.4.3/atmel/0003-Fix-handling-of-empty-.data-or-.bss-section.patch
new file mode 100644
index 0000000..f4a2555
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0003-Fix-handling-of-empty-.data-or-.bss-section.patch
@@ -0,0 +1,216 @@
+From 946c12c25ba51e81a019b52d9d74d170499a9586 Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Tue, 19 Apr 2011 22:52:41 +0200
+Subject: [PATCH 03/10] Fix handling of empty .data or .bss section
+
+Backend (avr part) lacks correct handling of empty .data or .bss
+section and shouldn't emit __do_copy_data or __do_clear_bss in
+such cases. This patch allows output these only if there is anything
+in the .data or .bss sections. Some code size could be saved by not
+linking in the initialization code from libgcc if one or both
+sections are empty.
+
+GCC bug #18145 report:
+http://gcc.gnu.org/bugzilla/show_bug.cgi?id=18145
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/41-gcc-4.4.3-bug-18145-v4.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr-protos.h | 3 +
+ gcc/config/avr/avr.c | 91 ++++++++++++++++++++++++++++++++++++++++---
+ gcc/config/avr/avr.h | 14 +-----
+ 3 files changed, 91 insertions(+), 17 deletions(-)
+
+diff --git a/gcc/config/avr/avr-protos.h b/gcc/config/avr/avr-protos.h
+index c300c89..c832e39 100644
+--- a/gcc/config/avr/avr-protos.h
++++ b/gcc/config/avr/avr-protos.h
+@@ -39,6 +39,8 @@ extern int avr_simple_epilogue (void);
+ extern void gas_output_limited_string (FILE *file, const char *str);
+ extern void gas_output_ascii (FILE *file, const char *str, size_t length);
+ extern int avr_hard_regno_rename_ok (unsigned int, unsigned int);
++extern void avr_asm_output_common (FILE *stream, const char *name, unsigned HOST_WIDE_INT size, unsigned HOST_WIDE_INT rounded);
++extern void avr_asm_output_local (FILE *stream, const char *name, unsigned HOST_WIDE_INT size, unsigned HOST_WIDE_INT rounded);
+
+ #ifdef TREE_CODE
+ extern void asm_output_external (FILE *file, tree decl, char *name);
+@@ -126,6 +128,7 @@ extern RTX_CODE avr_normalize_condition (RTX_CODE condition);
+ extern int compare_eq_p (rtx insn);
+ extern void out_shift_with_cnt (const char *templ, rtx insn,
+ rtx operands[], int *len, int t_len);
++extern rtx avr_return_addr_rtx (int count, rtx tem);
+ #endif /* RTX_CODE */
+
+ #ifdef HAVE_MACHINE_MODES
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index 4a2588d..a8aff30 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -76,6 +76,12 @@ static rtx avr_function_value (const_tree, const_tree, bool);
+ static void avr_insert_attributes (tree, tree *);
+ static void avr_asm_init_sections (void);
+ static unsigned int avr_section_type_flags (tree, const char *, int);
++static void avr_asm_named_section (const char *name, unsigned int flags, tree decl);
++/* Track if code will use .bss and/or .data */
++static int avr_need_clear_bss_p = 0;
++static int avr_need_copy_data_p = 0;
++static void avr_output_data_section_asm_op (const void*);
++static void avr_output_bss_section_asm_op (const void*);
+
+ static void avr_reorg (void);
+ static void avr_asm_out_ctor (rtx, int);
+@@ -5135,6 +5141,54 @@ avr_output_progmem_section_asm_op (const void *arg ATTRIBUTE_UNUSED)
+ fprintf (asm_out_file, "\t.p2align 1\n");
+ }
+
++/* ASM_OUTPUT_COMMON */
++/* Track need of __do_clear_bss */
++
++void
++avr_asm_output_common (FILE *stream, const char *name,
++ unsigned HOST_WIDE_INT size,
++ unsigned HOST_WIDE_INT rounded ATTRIBUTE_UNUSED)
++{
++ avr_need_clear_bss_p = 1;
++ fputs ("\t.comm ", stream);
++ assemble_name (stream, name);
++ fprintf (stream, ",%lu,1\n", (unsigned long) size);
++}
++
++/* ASM_OUTPUT_LOCAL */
++/* Track need of __do_clear_bss */
++
++void
++avr_asm_output_local (FILE *stream, const char *name,
++ unsigned HOST_WIDE_INT size,
++ unsigned HOST_WIDE_INT rounded ATTRIBUTE_UNUSED)
++{
++ avr_need_clear_bss_p = 1;
++ fputs ("\t.lcomm ", stream);
++ assemble_name (stream, name);
++ fprintf (stream, ",%d\n", (int) size);
++}
++
++/* Unnamed section callback to track need of __do_copy_data */
++
++static void
++avr_output_data_section_asm_op (const void *data)
++{
++ avr_need_copy_data_p = 1;
++ /* Dispatch to default */
++ output_section_asm_op (data);
++}
++
++/* Unnamed section callback to track need of __do_clear_bss */
++
++static void
++avr_output_bss_section_asm_op (const void *data)
++{
++ avr_need_clear_bss_p = 1;
++ /* Dispatch to default */
++ output_section_asm_op (data);
++}
++
+ /* Implement TARGET_ASM_INIT_SECTIONS. */
+
+ static void
+@@ -5144,6 +5198,27 @@ avr_asm_init_sections (void)
+ avr_output_progmem_section_asm_op,
+ NULL);
+ readonly_data_section = data_section;
++
++ data_section->unnamed.callback = avr_output_data_section_asm_op;
++ bss_section->unnamed.callback = avr_output_bss_section_asm_op;
++}
++
++/* TARGET_ASM_NAMED_SECTION */
++/* Track need of __do_clear_bss, __do_copy_data for named sections */
++
++static void
++avr_asm_named_section (const char *name, unsigned int flags, tree decl)
++{
++ if (!avr_need_copy_data_p)
++ avr_need_copy_data_p =
++ (0 == strncmp (name, ".data", 5)
++ || 0 == strncmp (name, ".rodata", 7)
++ || 0 == strncmp (name, ".gnu.linkonce.", 14));
++
++ if (!avr_need_clear_bss_p)
++ avr_need_clear_bss_p = (0 == strncmp (name, ".bss", 4));
++
++ default_elf_asm_named_section (name, flags, decl);
+ }
+
+ static unsigned int
+@@ -5182,12 +5257,6 @@ avr_file_start (void)
+
+ fputs ("__tmp_reg__ = 0\n"
+ "__zero_reg__ = 1\n", asm_out_file);
+-
+- /* FIXME: output these only if there is anything in the .data / .bss
+- sections - some code size could be saved by not linking in the
+- initialization code from libgcc if one or both sections are empty. */
+- fputs ("\t.global __do_copy_data\n", asm_out_file);
+- fputs ("\t.global __do_clear_bss\n", asm_out_file);
+ }
+
+ /* Outputs to the stdio stream FILE some
+@@ -5196,6 +5265,16 @@ avr_file_start (void)
+ static void
+ avr_file_end (void)
+ {
++ /* Output these only if there is anything in the
++ .data* / .rodata* / .gnu.linkonce.* resp. .bss*
++ input section(s) - some code size can be saved by not
++ linking in the initialization code from libgcc if resp.
++ sections are empty. */
++ if (avr_need_copy_data_p)
++ fputs (".global __do_copy_data\n", asm_out_file);
++
++ if (avr_need_clear_bss_p)
++ fputs (".global __do_clear_bss\n", asm_out_file);
+ }
+
+ /* Choose the order in which to allocate hard registers for
+diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
+index 54c607a..c0efa57 100644
+--- a/gcc/config/avr/avr.h
++++ b/gcc/config/avr/avr.h
+@@ -544,7 +544,7 @@ do { \
+ #define ASM_APP_OFF "/* #NOAPP */\n"
+
+ /* Switch into a generic section. */
+-#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
++#define TARGET_ASM_NAMED_SECTION avr_asm_named_section
+ #define TARGET_ASM_INIT_SECTIONS avr_asm_init_sections
+
+ #define ASM_OUTPUT_ASCII(FILE, P, SIZE) gas_output_ascii (FILE,P,SIZE)
+@@ -552,21 +552,13 @@ do { \
+ #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == '\n' || ((C) == '$'))
+
+ #define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+-do { \
+- fputs ("\t.comm ", (STREAM)); \
+- assemble_name ((STREAM), (NAME)); \
+- fprintf ((STREAM), ",%lu,1\n", (unsigned long)(SIZE)); \
+-} while (0)
++ avr_asm_output_common (STREAM, NAME, SIZE, ROUNDED)
+
+ #define ASM_OUTPUT_BSS(FILE, DECL, NAME, SIZE, ROUNDED) \
+ asm_output_bss ((FILE), (DECL), (NAME), (SIZE), (ROUNDED))
+
+ #define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+-do { \
+- fputs ("\t.lcomm ", (STREAM)); \
+- assemble_name ((STREAM), (NAME)); \
+- fprintf ((STREAM), ",%d\n", (int)(SIZE)); \
+-} while (0)
++ avr_asm_output_local (STREAM, NAME, SIZE, ROUNDED)
+
+ #undef TYPE_ASM_OP
+ #undef SIZE_ASM_OP
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0004-Add-support-for-XMEGA-devices.patch b/patches/gcc-4.4.3/atmel/0004-Add-support-for-XMEGA-devices.patch
new file mode 100644
index 0000000..e76b8b7
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0004-Add-support-for-XMEGA-devices.patch
@@ -0,0 +1,897 @@
+From 3c28f783fe1d9b707c3a07f5f3bc29db0e7e0ddf Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Tue, 19 Apr 2011 23:30:06 +0200
+Subject: [PATCH 04/10] Add support for XMEGA devices
+
+Add support for the AVR XMEGA family of devices, device names that
+are used in this family. This is the patch that will NOT be the
+final XMEGA patch for the upcoming release. There are other
+outstanding issues (raised on the mailing lists recently) with the
+xmega support that I hope to address in the next day or two.
+
+Eric Weddington
+Quote by Eric Weddington (2010-06-08):
+http://www.mail-archive.com/avr-libc-dev@nongnu.org/msg04097.html
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/51-gcc-4.4.3-xmega-v14.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr.c | 339 +++++++++++++++++++++++++++++++++++++-----
+ gcc/config/avr/avr.h | 62 ++++++++-
+ gcc/config/avr/avr.md | 9 +-
+ gcc/config/avr/libgcc.S | 15 ++
+ gcc/config/avr/predicates.md | 12 +-
+ gcc/config/avr/t-avr | 23 +++-
+ 6 files changed, 408 insertions(+), 52 deletions(-)
+
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index a8aff30..60918ac 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -52,6 +52,7 @@
+ static int avr_naked_function_p (tree);
+ static int interrupt_function_p (tree);
+ static int signal_function_p (tree);
++static int nmi_function_p (tree);
+ static int avr_OS_task_function_p (tree);
+ static int avr_OS_main_function_p (tree);
+ static int avr_regs_to_save (HARD_REG_SET *);
+@@ -118,17 +119,24 @@ const struct base_arch_s *avr_current_arch;
+ section *progmem_section;
+
+ static const struct base_arch_s avr_arch_types[] = {
+- { 1, 0, 0, 0, 0, 0, 0, 0, NULL }, /* unknown device specified */
+- { 1, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=1" },
+- { 0, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=2" },
+- { 0, 0, 0, 1, 0, 0, 0, 0, "__AVR_ARCH__=25" },
+- { 0, 0, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=3" },
+- { 0, 0, 1, 0, 1, 0, 0, 0, "__AVR_ARCH__=31" },
+- { 0, 0, 1, 1, 0, 0, 0, 0, "__AVR_ARCH__=35" },
+- { 0, 1, 0, 1, 0, 0, 0, 0, "__AVR_ARCH__=4" },
+- { 0, 1, 1, 1, 0, 0, 0, 0, "__AVR_ARCH__=5" },
+- { 0, 1, 1, 1, 1, 1, 0, 0, "__AVR_ARCH__=51" },
+- { 0, 1, 1, 1, 1, 1, 1, 0, "__AVR_ARCH__=6" }
++ { 1, 0, 0, 0, 0, 0, 0, 0, 0, NULL }, /* Unknown device specified. */
++ { 1, 0, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=1" },
++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=2" },
++ { 0, 0, 0, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=25" },
++ { 0, 0, 1, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=3" },
++ { 0, 0, 1, 0, 1, 0, 0, 0, 0, "__AVR_ARCH__=31" },
++ { 0, 0, 1, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=35" },
++ { 0, 1, 0, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=4" },
++ { 0, 1, 1, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=5" },
++ { 0, 1, 1, 1, 1, 1, 0, 0, 0, "__AVR_ARCH__=51" },
++ { 0, 1, 1, 1, 1, 1, 1, 0, 0, "__AVR_ARCH__=6" },
++ { 0, 1, 0, 1, 0, 0, 0, 1, 0, "__AVR_ARCH__=101" },
++ { 0, 1, 1, 1, 0, 0, 0, 1, 0, "__AVR_ARCH__=102" },
++ { 0, 1, 1, 1, 0, 0, 0, 1, 1, "__AVR_ARCH__=103" },
++ { 0, 1, 1, 1, 1, 1, 0, 1, 0, "__AVR_ARCH__=104" },
++ { 0, 1, 1, 1, 1, 1, 0, 1, 1, "__AVR_ARCH__=105" },
++ { 0, 1, 1, 1, 1, 1, 1, 1, 0, "__AVR_ARCH__=106" },
++ { 0, 1, 1, 1, 1, 1, 1, 1, 1, "__AVR_ARCH__=107" }
+ };
+
+ /* These names are used as the index into the avr_arch_types[] table
+@@ -146,7 +154,14 @@ enum avr_arch
+ ARCH_AVR4,
+ ARCH_AVR5,
+ ARCH_AVR51,
+- ARCH_AVR6
++ ARCH_AVR6,
++ ARCH_AVRXMEGA1,
++ ARCH_AVRXMEGA2,
++ ARCH_AVRXMEGA3,
++ ARCH_AVRXMEGA4,
++ ARCH_AVRXMEGA5,
++ ARCH_AVRXMEGA6,
++ ARCH_AVRXMEGA7
+ };
+
+ struct mcu_type_s {
+@@ -304,6 +319,35 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ { "avr6", ARCH_AVR6, NULL },
+ { "atmega2560", ARCH_AVR6, "__AVR_ATmega2560__" },
+ { "atmega2561", ARCH_AVR6, "__AVR_ATmega2561__" },
++ /* Enhanced, == 256K. */
++ /* Xmega, <= 8K FLASH. */
++ /* Xmega, > 8K, <= 64K FLASH, <= 64K RAM. */
++ { "avrxmega2", ARCH_AVRXMEGA2, NULL },
++ { "atxmega16a4", ARCH_AVRXMEGA2, "__AVR_ATxmega16A4__" },
++ { "atxmega16d4", ARCH_AVRXMEGA2, "__AVR_ATxmega16D4__" },
++ { "atxmega32a4", ARCH_AVRXMEGA2, "__AVR_ATxmega32A4__" },
++ { "atxmega32d4", ARCH_AVRXMEGA2, "__AVR_ATxmega32D4__" },
++ /* Xmega, > 8K, <= 64K FLASH, > 64K RAM. */
++ /* { "avrxmega3", ARCH_AVRXMEGA3, NULL }, */
++ /* Xmega, > 64K, <= 128K FLASH, <= 64K RAM. */
++ { "avrxmega4", ARCH_AVRXMEGA4, NULL },
++ { "atxmega64a3", ARCH_AVRXMEGA4, "__AVR_ATxmega64A3__" },
++ { "atxmega64d3", ARCH_AVRXMEGA4, "__AVR_ATxmega64D3__" },
++ /* Xmega, > 64K, <= 128K FLASH, > 64K RAM. */
++ { "avrxmega5", ARCH_AVRXMEGA5, NULL },
++ { "atxmega64a1", ARCH_AVRXMEGA5, "__AVR_ATxmega64A1__" },
++ /* Xmega, > 128K, <= 256K FLASH, <= 64K RAM. */
++ { "avrxmega6", ARCH_AVRXMEGA6, NULL },
++ { "atxmega128a3", ARCH_AVRXMEGA6, "__AVR_ATxmega128A3__" },
++ { "atxmega128d3", ARCH_AVRXMEGA6, "__AVR_ATxmega128D3__" },
++ { "atxmega192a3", ARCH_AVRXMEGA6, "__AVR_ATxmega192A3__" },
++ { "atxmega192d3", ARCH_AVRXMEGA6, "__AVR_ATxmega192D3__" },
++ { "atxmega256a3", ARCH_AVRXMEGA6, "__AVR_ATxmega256A3__" },
++ { "atxmega256a3b",ARCH_AVRXMEGA6, "__AVR_ATxmega256A3B__" },
++ { "atxmega256d3", ARCH_AVRXMEGA6, "__AVR_ATxmega256D3__" },
++ /* Xmega, > 128K, <= 256K FLASH, > 64K RAM. */
++ { "avrxmega7", ARCH_AVRXMEGA7, NULL },
++ { "atxmega128a1", ARCH_AVRXMEGA7, "__AVR_ATxmega128A1__" },
+ /* Assembler only. */
+ { "avr1", ARCH_AVR1, NULL },
+ { "at90s1200", ARCH_AVR1, "__AVR_AT90S1200__" },
+@@ -505,6 +549,21 @@ signal_function_p (tree func)
+ return a != NULL_TREE;
+ }
+
++/* Return nonzero if FUNC is a nmi function as specified
++ by the "nmi" attribute. */
++
++static int
++nmi_function_p (tree func)
++{
++ tree a;
++
++ if (TREE_CODE (func) != FUNCTION_DECL)
++ return 0;
++
++ a = lookup_attribute ("nmi", DECL_ATTRIBUTES (func));
++ return a != NULL_TREE;
++}
++
+ /* Return nonzero if FUNC is a OS_task function. */
+
+ static int
+@@ -703,6 +762,7 @@ expand_prologue (void)
+ cfun->machine->is_naked = avr_naked_function_p (current_function_decl);
+ cfun->machine->is_interrupt = interrupt_function_p (current_function_decl);
+ cfun->machine->is_signal = signal_function_p (current_function_decl);
++ cfun->machine->is_nmi = nmi_function_p (current_function_decl);
+ cfun->machine->is_OS_task = avr_OS_task_function_p (current_function_decl);
+ cfun->machine->is_OS_main = avr_OS_main_function_p (current_function_decl);
+
+@@ -740,17 +800,48 @@ expand_prologue (void)
+
+ /* Push SREG. */
+ insn = emit_move_insn (tmp_reg_rtx,
+- gen_rtx_MEM (QImode, GEN_INT (SREG_ADDR)));
++ gen_rtx_MEM (QImode, GEN_INT (AVR_SREG_ADDR)));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ insn = emit_move_insn (pushbyte, tmp_reg_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ /* Push RAMPD, RAMPX, RAMPY. */
++ if (AVR_HAVE_RAMPX_Y_D)
++ {
++ /* Push RAMPD. */
++ insn = emit_move_insn (tmp_reg_rtx,
++ gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPD_ADDR)));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ insn = emit_move_insn (pushbyte, tmp_reg_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ /* Push RAMPX. */
++ if (TEST_HARD_REG_BIT (set, REG_X) && TEST_HARD_REG_BIT (set, REG_X + 1))
++ {
++ insn = emit_move_insn (tmp_reg_rtx,
++ gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPX_ADDR)));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ insn = emit_move_insn (pushbyte, tmp_reg_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++
++ /* Push RAMPY. */
++ if (TEST_HARD_REG_BIT (set, REG_Y) && TEST_HARD_REG_BIT (set, REG_Y + 1))
++ {
++ insn = emit_move_insn (tmp_reg_rtx,
++ gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPY_ADDR)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_move_insn (pushbyte, tmp_reg_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++ }
+
+ /* Push RAMPZ. */
+ if(AVR_HAVE_RAMPZ
+ && (TEST_HARD_REG_BIT (set, REG_Z) && TEST_HARD_REG_BIT (set, REG_Z + 1)))
+ {
+ insn = emit_move_insn (tmp_reg_rtx,
+- gen_rtx_MEM (QImode, GEN_INT (RAMPZ_ADDR)));
++ gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPZ_ADDR)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_move_insn (pushbyte, tmp_reg_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+@@ -762,6 +853,41 @@ expand_prologue (void)
+
+ /* Prevent any attempt to delete the setting of ZERO_REG! */
+ emit_use (zero_reg_rtx);
++
++
++ /*
++ Clear RAMP? registers if used for data access in the interrupt/signal
++ context. Do this after the zero register has been explictly cleared.
++ */
++ if (AVR_HAVE_RAMPX_Y_D)
++ {
++ /* Set RAMPD to 0. */
++ insn = emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPD_ADDR)), const0_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ if (TEST_HARD_REG_BIT (set, REG_X) && TEST_HARD_REG_BIT (set, REG_X + 1))
++ {
++ /* Set RAMPX to 0. */
++ insn = emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPX_ADDR)), const0_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++
++ if (TEST_HARD_REG_BIT (set, REG_Y) && TEST_HARD_REG_BIT (set, REG_Y + 1))
++ {
++ /* Set RAMPY to 0. */
++ insn = emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPY_ADDR)), const0_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++
++ if(AVR_HAVE_RAMPZ
++ && (TEST_HARD_REG_BIT (set, REG_Z) && TEST_HARD_REG_BIT (set, REG_Z + 1)))
++ {
++ /* Set RAMPZ to 0. */
++ insn = emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPZ_ADDR)), const0_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++ }
++
+ }
+ if (minimize && (frame_pointer_needed
+ || (AVR_2_BYTE_PC && live_seq > 6)
+@@ -850,16 +976,16 @@ expand_prologue (void)
+ insn = emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+- else if (TARGET_NO_INTERRUPTS
+- || cfun->machine->is_signal
+- || cfun->machine->is_OS_main)
++ else if ((!AVR_XMEGA && TARGET_NO_INTERRUPTS)
++ || (!AVR_XMEGA && cfun->machine->is_signal)
++ || (!AVR_XMEGA && cfun->machine->is_OS_main))
+ {
+ insn =
+ emit_insn (gen_movhi_sp_r_irq_off (stack_pointer_rtx,
+ frame_pointer_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+- else if (cfun->machine->is_interrupt)
++ else if (!AVR_XMEGA && cfun->machine->is_interrupt)
+ {
+ insn = emit_insn (gen_movhi_sp_r_irq_on (stack_pointer_rtx,
+ frame_pointer_rtx));
+@@ -1024,13 +1150,13 @@ expand_epilogue (void)
+ {
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ }
+- else if (TARGET_NO_INTERRUPTS
+- || cfun->machine->is_signal)
++ else if ((!AVR_XMEGA && TARGET_NO_INTERRUPTS)
++ || (!AVR_XMEGA && cfun->machine->is_signal))
+ {
+ emit_insn (gen_movhi_sp_r_irq_off (stack_pointer_rtx,
+ frame_pointer_rtx));
+ }
+- else if (cfun->machine->is_interrupt)
++ else if (!AVR_XMEGA && cfun->machine->is_interrupt)
+ {
+ emit_insn (gen_movhi_sp_r_irq_on (stack_pointer_rtx,
+ frame_pointer_rtx));
+@@ -1083,14 +1209,39 @@ expand_epilogue (void)
+ && (TEST_HARD_REG_BIT (set, REG_Z) && TEST_HARD_REG_BIT (set, REG_Z + 1)))
+ {
+ emit_insn (gen_popqi (tmp_reg_rtx));
+- emit_move_insn (gen_rtx_MEM(QImode, GEN_INT(RAMPZ_ADDR)),
++ emit_move_insn (gen_rtx_MEM(QImode, GEN_INT(AVR_RAMPZ_ADDR)),
++ tmp_reg_rtx);
++ }
++
++ /* Restore RAMPY, RAMPX, RAMPD using tmp reg as scratch. */
++ if (AVR_HAVE_RAMPX_Y_D)
++ {
++ /* Pop RAMPY. */
++ if (TEST_HARD_REG_BIT (set, REG_Y) && TEST_HARD_REG_BIT (set, REG_Y + 1))
++ {
++ emit_insn (gen_popqi (tmp_reg_rtx));
++ emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPY_ADDR)),
++ tmp_reg_rtx);
++ }
++
++ /* Pop RAMPX. */
++ if (TEST_HARD_REG_BIT (set, REG_X) && TEST_HARD_REG_BIT (set, REG_X + 1))
++ {
++ emit_insn (gen_popqi (tmp_reg_rtx));
++ emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPX_ADDR)),
++ tmp_reg_rtx);
++ }
++
++ /* Pop RAMPD. */
++ emit_insn (gen_popqi (tmp_reg_rtx));
++ emit_move_insn (gen_rtx_MEM (QImode, GEN_INT (AVR_RAMPD_ADDR)),
+ tmp_reg_rtx);
+ }
+
+ /* Restore SREG using tmp reg as scratch. */
+ emit_insn (gen_popqi (tmp_reg_rtx));
+
+- emit_move_insn (gen_rtx_MEM(QImode, GEN_INT(SREG_ADDR)),
++ emit_move_insn (gen_rtx_MEM(QImode, GEN_INT(AVR_SREG_ADDR)),
+ tmp_reg_rtx);
+
+ /* Restore tmp REG. */
+@@ -1856,9 +2007,17 @@ output_movhi (rtx insn, rtx operands[], int *l)
+ return *l = 1, AS2 (out,__SP_L__,%A1);
+ /* Use simple load of stack pointer if no interrupts are
+ used. */
+- else if (TARGET_NO_INTERRUPTS)
++ else if (!AVR_XMEGA && TARGET_NO_INTERRUPTS)
+ return *l = 2, (AS2 (out,__SP_H__,%B1) CR_TAB
+ AS2 (out,__SP_L__,%A1));
++ if(AVR_XMEGA)
++ {
++ *l = 2;
++ return (AS2 (out,__SP_L__,%A1) CR_TAB
++ AS2 (out,__SP_H__,%B1));
++ }
++ else
++ {
+ *l = 5;
+ return (AS2 (in,__tmp_reg__,__SREG__) CR_TAB
+ "cli" CR_TAB
+@@ -1866,6 +2025,7 @@ output_movhi (rtx insn, rtx operands[], int *l)
+ AS2 (out,__SREG__,__tmp_reg__) CR_TAB
+ AS2 (out,__SP_L__,%A1));
+ }
++ }
+ else if (test_hard_reg_class (STACK_REG, src))
+ {
+ *l = 2;
+@@ -1999,7 +2159,7 @@ out_movqi_r_mr (rtx insn, rtx op[], int *l)
+
+ if (CONSTANT_ADDRESS_P (x))
+ {
+- if (CONST_INT_P (x) && INTVAL (x) == SREG_ADDR)
++ if (CONST_INT_P (x) && INTVAL (x) == AVR_SREG_ADDR)
+ {
+ *l = 1;
+ return AS2 (in,%0,__SREG__);
+@@ -2007,7 +2167,8 @@ out_movqi_r_mr (rtx insn, rtx op[], int *l)
+ if (optimize > 0 && io_address_operand (x, QImode))
+ {
+ *l = 1;
+- return AS2 (in,%0,%1-0x20);
++ op[2] = GEN_INT(AVR_IO_OFFSET);
++ return AS2 (in,%0,%1-%2);
+ }
+ *l = 2;
+ return AS2 (lds,%0,%1);
+@@ -2195,8 +2356,9 @@ out_movhi_r_mr (rtx insn, rtx op[], int *l)
+ if (optimize > 0 && io_address_operand (base, HImode))
+ {
+ *l = 2;
+- return (AS2 (in,%A0,%A1-0x20) CR_TAB
+- AS2 (in,%B0,%B1-0x20));
++ op[2] = GEN_INT(AVR_IO_OFFSET);
++ return (AS2 (in,%A0,%A1-%2) CR_TAB
++ AS2 (in,%B0,%B1-%2));
+ }
+ *l = 4;
+ return (AS2 (lds,%A0,%A1) CR_TAB
+@@ -2695,7 +2857,7 @@ out_movqi_mr_r (rtx insn, rtx op[], int *l)
+
+ if (CONSTANT_ADDRESS_P (x))
+ {
+- if (CONST_INT_P (x) && INTVAL (x) == SREG_ADDR)
++ if (CONST_INT_P (x) && INTVAL (x) == AVR_SREG_ADDR)
+ {
+ *l = 1;
+ return AS2 (out,__SREG__,%1);
+@@ -2703,7 +2865,8 @@ out_movqi_mr_r (rtx insn, rtx op[], int *l)
+ if (optimize > 0 && io_address_operand (x, QImode))
+ {
+ *l = 1;
+- return AS2 (out,%0-0x20,%1);
++ op[2] = GEN_INT(AVR_IO_OFFSET);
++ return AS2 (out,%0-%2,%1);
+ }
+ *l = 2;
+ return AS2 (sts,%0,%1);
+@@ -2782,9 +2945,18 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ if (optimize > 0 && io_address_operand (base, HImode))
+ {
+ *l = 2;
+- return (AS2 (out,%B0-0x20,%B1) CR_TAB
+- AS2 (out,%A0-0x20,%A1));
++ op[2] = GEN_INT(AVR_IO_OFFSET);
++ if (AVR_XMEGA)
++ return (AS2 (out,%A0-%2,%A1) CR_TAB
++ AS2 (out,%B0-%2,%B1));
++ else
++ return (AS2 (out,%B0-%2,%B1) CR_TAB
++ AS2 (out,%A0-%2,%A1));
+ }
++ if (AVR_XMEGA)
++ return *l = 4, (AS2 (sts,%A0,%A1) CR_TAB
++ AS2 (sts,%B0,%B1));
++ else
+ return *l = 4, (AS2 (sts,%B0,%B1) CR_TAB
+ AS2 (sts,%A0,%A1));
+ }
+@@ -2801,11 +2973,20 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X,__tmp_reg__));
+ else
++ {
++ if (!AVR_XMEGA)
+ return *l=5, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+ AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X,__tmp_reg__) CR_TAB
+ AS2 (sbiw,r26,1) CR_TAB
+ AS2 (st,X,r26));
++ else
++ return *l=5, (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (adiw,r26,1) CR_TAB
++ AS2 (st,X,__tmp_reg__) CR_TAB
++ AS2 (sbiw,r26,1));
++ }
+ }
+ else
+ {
+@@ -2813,14 +2994,27 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ return *l=2, (AS2 (st,X+,%A1) CR_TAB
+ AS2 (st,X,%B1));
+ else
++ {
++ if (!AVR_XMEGA)
+ return *l=3, (AS2 (adiw,r26,1) CR_TAB
+ AS2 (st,X,%B1) CR_TAB
+ AS2 (st,-X,%A1));
++ else
++ return *l=3, (AS2 (st,X+,%A1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (sbiw,r26,1));
++ }
+ }
+ }
+ else
++ {
++ if (!AVR_XMEGA)
+ return *l=2, (AS2 (std,%0+1,%B1) CR_TAB
+ AS2 (st,%0,%A1));
++ else
++ return *l=2, (AS2 (st,%0,%A1) CR_TAB
++ AS2 (std,%0+1,%B1));
++ }
+ }
+ else if (GET_CODE (base) == PLUS)
+ {
+@@ -2831,6 +3025,8 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ if (reg_base != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
++ if (!AVR_XMEGA)
++ {
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+ return *l = 4, (AS2 (adiw,r28,%o0-62) CR_TAB
+ AS2 (std,Y+63,%B1) CR_TAB
+@@ -2844,11 +3040,29 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ AS2 (subi,r28,lo8(%o0)) CR_TAB
+ AS2 (sbci,r29,hi8(%o0)));
+ }
++ else
++ {
++ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
++ return *l = 4, (AS2 (adiw,r28,%o0-62) CR_TAB
++ AS2 (std,Y+62,%A1) CR_TAB
++ AS2 (std,Y+63,%B1) CR_TAB
++ AS2 (sbiw,r28,%o0-62));
++
++ return *l = 6, (AS2 (subi,r28,lo8(-%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
++ AS2 (st,Y,%A1) CR_TAB
++ AS2 (std,Y+1,%B1) CR_TAB
++ AS2 (subi,r28,lo8(%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0)));
++ }
++ }
+ if (reg_base == REG_X)
+ {
+ /* (X + d) = R */
+ if (reg_src == REG_X)
+ {
++ if (!AVR_XMEGA)
++ {
+ *l = 7;
+ return (AS2 (mov,__tmp_reg__,r26) CR_TAB
+ AS2 (mov,__zero_reg__,r27) CR_TAB
+@@ -2858,21 +3072,57 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ AS1 (clr,__zero_reg__) CR_TAB
+ AS2 (sbiw,r26,%o0));
+ }
++ else
++ {
++ *l = 7;
++ return (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X,__zero_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (sbiw,r26,%o0+1));
++ }
++ }
++ if (!AVR_XMEGA)
++ {
+ *l = 4;
+ return (AS2 (adiw,r26,%o0+1) CR_TAB
+ AS2 (st,X,%B1) CR_TAB
+ AS2 (st,-X,%A1) CR_TAB
+ AS2 (sbiw,r26,%o0));
+ }
++ else
++ {
++ *l = 4;
++ return (AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X+,%A1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (sbiw,r26,%o0+1));
++ }
++ }
++
++ if (!AVR_XMEGA)
+ return *l=2, (AS2 (std,%B0,%B1) CR_TAB
+ AS2 (std,%A0,%A1));
++ else
++ return *l=2, (AS2 (std,%A0,%A1) CR_TAB
++ AS2 (std,%B0,%B1));
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
++ {
++ if (mem_volatile_p && AVR_XMEGA)
++ return *l = 4, (AS2 (sbiw,%r0,2) CR_TAB
++ AS2 (st,%p0+,%A1) CR_TAB
++ AS2 (st,%p0,%B1) CR_TAB
++ AS2 (sbiw,%r0,1));
++ else
+ return *l=2, (AS2 (st,%0,%B1) CR_TAB
+ AS2 (st,%0,%A1));
++ }
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ {
+- if (mem_volatile_p)
++ if (mem_volatile_p && !AVR_XMEGA)
+ {
+ if (REGNO (XEXP (base, 0)) == REG_X)
+ {
+@@ -4872,6 +5122,16 @@ avr_asm_declare_function_name (FILE *file, const char *name, tree decl)
+ }
+ }
+
++ else if (cfun->machine->is_nmi)
++ {
++ if (strncmp (name, "__vector", strlen ("__vector")) != 0)
++ {
++ warning_at (DECL_SOURCE_LOCATION (decl), 0,
++ "%qs appears to be a misspelled nmi handler",
++ name);
++ }
++ }
++
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
+ ASM_OUTPUT_LABEL (file, name);
+ }
+@@ -4996,6 +5256,7 @@ const struct attribute_spec avr_attribute_table[] =
+ { "progmem", 0, 0, false, false, false, avr_handle_progmem_attribute },
+ { "signal", 0, 0, true, false, false, avr_handle_fndecl_attribute },
+ { "interrupt", 0, 0, true, false, false, avr_handle_fndecl_attribute },
++ { "nmi", 0, 0, true, false, false, avr_handle_fndecl_attribute },
+ { "naked", 0, 0, false, true, true, avr_handle_fntype_attribute },
+ { "OS_task", 0, 0, false, true, true, avr_handle_fntype_attribute },
+ { "OS_main", 0, 0, false, true, true, avr_handle_fntype_attribute },
+@@ -5253,7 +5514,8 @@ avr_file_start (void)
+ /* fprintf (asm_out_file, "\t.arch %s\n", avr_mcu_name);*/
+ fputs ("__SREG__ = 0x3f\n"
+ "__SP_H__ = 0x3e\n"
+- "__SP_L__ = 0x3d\n", asm_out_file);
++ "__SP_L__ = 0x3d\n"
++ "__CCP__ = 0x34\n", asm_out_file);
+
+ fputs ("__tmp_reg__ = 0\n"
+ "__zero_reg__ = 1\n", asm_out_file);
+@@ -6354,16 +6616,17 @@ avr_out_sbxx_branch (rtx insn, rtx operands[])
+
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+- if (INTVAL (operands[1]) < 0x40)
++ operands[4] = GEN_INT(AVR_IO_OFFSET); /* operands[3] is for the jump */
++ if (low_io_address_operand (operands[1], VOIDmode))
+ {
+ if (comp == EQ)
+- output_asm_insn (AS2 (sbis,%1-0x20,%2), operands);
++ output_asm_insn (AS2 (sbis,%1-%4,%2), operands);
+ else
+- output_asm_insn (AS2 (sbic,%1-0x20,%2), operands);
++ output_asm_insn (AS2 (sbic,%1-%4,%2), operands);
+ }
+ else
+ {
+- output_asm_insn (AS2 (in,__tmp_reg__,%1-0x20), operands);
++ output_asm_insn (AS2 (in,__tmp_reg__,%1-%4), operands);
+ if (comp == EQ)
+ output_asm_insn (AS2 (sbrs,__tmp_reg__,%2), operands);
+ else
+diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
+index c0efa57..e580d5e 100644
+--- a/gcc/config/avr/avr.h
++++ b/gcc/config/avr/avr.h
+@@ -44,8 +44,11 @@ struct base_arch_s {
+ /* Core have 'EICALL' and 'EIJMP' instructions. */
+ int have_eijmp_eicall;
+
+- /* Reserved. */
+- int reserved;
++ /* Core is in Xmega family. */
++ int xmega;
++
++ /* Core have RAMPX, RAMPY and RAMPD registers. */
++ int have_rampx_y_d;
+
+ const char *const macro;
+ };
+@@ -94,6 +97,17 @@ extern const struct base_arch_s *avr_current_arch;
+ } \
+ if (TARGET_NO_INTERRUPTS) \
+ builtin_define ("__NO_INTERRUPTS__"); \
++ if (avr_current_arch->xmega) \
++ { \
++ builtin_define ("__AVR_XMEGA__"); \
++ builtin_define ("__AVR_HAVE_SPMX__"); \
++ } \
++ if (avr_current_arch->have_rampx_y_d) \
++ { \
++ builtin_define ("__AVR_HAVE_RAMPX__");\
++ builtin_define ("__AVR_HAVE_RAMPY__");\
++ builtin_define ("__AVR_HAVE_RAMPD__");\
++ } \
+ } \
+ while (0)
+
+@@ -109,10 +123,19 @@ extern GTY(()) section *progmem_section;
+ #define AVR_HAVE_LPMX (avr_current_arch->have_movw_lpmx)
+ #define AVR_HAVE_RAMPZ (avr_current_arch->have_elpm)
+ #define AVR_HAVE_EIJMP_EICALL (avr_current_arch->have_eijmp_eicall)
++#define AVR_XMEGA (avr_current_arch->xmega)
++#define AVR_HAVE_RAMPX_Y_D (avr_current_arch->have_rampx_y_d)
+
+ #define AVR_2_BYTE_PC (!AVR_HAVE_EIJMP_EICALL)
+ #define AVR_3_BYTE_PC (AVR_HAVE_EIJMP_EICALL)
+
++#define AVR_IO_OFFSET (AVR_XMEGA ? 0 : 0x20)
++#define AVR_RAMPD_ADDR (AVR_XMEGA ? 0x38 : 0)
++#define AVR_RAMPX_ADDR (AVR_XMEGA ? 0x39 : 0)
++#define AVR_RAMPY_ADDR (AVR_XMEGA ? 0x3A : 0)
++#define AVR_RAMPZ_ADDR (AVR_XMEGA ? 0x3B : 0x5B)
++#define AVR_SREG_ADDR (AVR_XMEGA ? 0x3F: 0x5F)
++
+ #define TARGET_VERSION fprintf (stderr, " (GNU assembler syntax)");
+
+ #define OVERRIDE_OPTIONS avr_override_options ()
+@@ -853,6 +876,20 @@ mmcu=*:-mmcu=%*}"
+ mmcu=m3000*|\
+ mmcu=m3001*: -m avr5}\
+ %{mmcu=atmega256*:-m avr6}\
++%{mmcu=atxmega16a4|\
++ mmcu=atxmega16d4|\
++ mmcu=atxmega32d4|\
++ mmcu=atxmega32a4:-m avrxmega2} \
++%{mmcu=atxmega64a3|\
++ mmcu=atxmega64d3:-m avrxmega4} \
++%{mmcu=atxmega64a1:-m avrxmega5} \
++%{mmcu=atxmega128a3|\
++ mmcu=atxmega128d3|\
++ mmcu=atxmega192a3|\
++ mmcu=atxmega192d3|\
++ mmcu=atxmega256a3*|\
++ mmcu=atxmega256d3:-m avrxmega6} \
++%{mmcu=atxmega128a1:-m avrxmega7} \
+ %{mmcu=atmega324*|\
+ mmcu=atmega325*|\
+ mmcu=atmega328p|\
+@@ -1042,7 +1079,22 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=m3000s:crtm3000s.o%s} \
+ %{mmcu=m3001b:crtm3001b.o%s} \
+ %{mmcu=atmega2560|mmcu=avr6:crtm2560.o%s} \
+-%{mmcu=atmega2561:crtm2561.o%s}"
++%{mmcu=atmega2561:crtm2561.o%s} \
++%{mmcu=avrxmega2|mmcu=atxmega32d4:crtx32d4.o%s} \
++%{mmcu=atxmega16a4:crtx16a4.o%s} \
++%{mmcu=atxmega16d4:crtx16d4.o%s} \
++%{mmcu=atxmega3|mmcu=atxmega32a4:crtx32a4.o%s} \
++%{mmcu=atxmega4|mmcu=atxmega64a3:crtx64a3.o%s} \
++%{mmcu=atxmega64d3:crtx64d3.o%s} \
++%{mmcu=atxmega5|mmcu=atxmega64a1:crtx64a1.o%s} \
++%{mmcu=atxmega6|mmcu=atxmega128a3:crtx128a3.o%s} \
++%{mmcu=atxmega128d3:crtx128d3.o%s}\
++%{mmcu=atxmega192a3:crtx192a3.o%s}\
++%{mmcu=atxmega192d3:crtx192d3.o%s}\
++%{mmcu=atxmega256a3:crtx256a3.o%s} \
++%{mmcu=atxmega256a3b:crtx256a3b.o%s} \
++%{mmcu=atxmega256d3:crtx256d3.o%s} \
++%{mmcu=atxmega7|mmcu=atxmega128a1:crtx128a1.o%s}"
+
+ #define EXTRA_SPECS {"crt_binutils", CRT_BINUTILS_SPECS},
+
+@@ -1102,6 +1154,10 @@ struct machine_function GTY(())
+ as specified by the "signal" attribute. */
+ int is_signal;
+
++ /* 'true' - if current function is a signal function
++ as specified by the "nmi" attribute. */
++ int is_nmi;
++
+ /* 'true' - if current function is a 'task' function
+ as specified by the "OS_task" attribute. */
+ int is_OS_task;
+diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md
+index 5090e53..f91e98c 100644
+--- a/gcc/config/avr/avr.md
++++ b/gcc/config/avr/avr.md
+@@ -47,9 +47,6 @@
+ (TMP_REGNO 0) ; temporary register r0
+ (ZERO_REGNO 1) ; zero register r1
+
+- (SREG_ADDR 0x5F)
+- (RAMPZ_ADDR 0x5B)
+-
+ (UNSPEC_STRLEN 0)
+ (UNSPEC_INDEX_JMP 1)
+ (UNSPEC_SEI 2)
+@@ -3017,7 +3014,8 @@
+ "(optimize > 0)"
+ {
+ operands[2] = GEN_INT (exact_log2 (~INTVAL (operands[1]) & 0xff));
+- return AS2 (cbi,%0-0x20,%2);
++ operands[3] = GEN_INT(AVR_IO_OFFSET);
++ return AS2 (cbi,%0-%3,%2);
+ }
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+@@ -3029,7 +3027,8 @@
+ "(optimize > 0)"
+ {
+ operands[2] = GEN_INT (exact_log2 (INTVAL (operands[1]) & 0xff));
+- return AS2 (sbi,%0-0x20,%2);
++ operands[3] = GEN_INT(AVR_IO_OFFSET);
++ return AS2 (sbi,%0-%3,%2);
+ }
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+diff --git a/gcc/config/avr/libgcc.S b/gcc/config/avr/libgcc.S
+index 76fdd9f..5a711a8 100644
+--- a/gcc/config/avr/libgcc.S
++++ b/gcc/config/avr/libgcc.S
+@@ -637,11 +637,19 @@ __prologue_saves__:
+ in r29,__SP_H__
+ sub r28,r26
+ sbc r29,r27
++
++/* Restore stack pointer. */
++#if defined (__AVR_XMEGA__)
++ out __SP_L__,r28
++ out __SP_H__,r29
++#else
+ in __tmp_reg__,__SREG__
+ cli
+ out __SP_H__,r29
+ out __SREG__,__tmp_reg__
+ out __SP_L__,r28
++#endif
++
+ #if defined (__AVR_HAVE_EIJMP_EICALL__)
+ eijmp
+ #else
+@@ -679,11 +687,18 @@ __epilogue_restores__:
+ ldd r27,Y+1
+ add r28,r30
+ adc r29,__zero_reg__
++
++/* Restore stack pointer. */
++#if defined(__AVR_XMEGA__)
++ out __SP_L__,r28
++ out __SP_H__,r29
++#else
+ in __tmp_reg__,__SREG__
+ cli
+ out __SP_H__,r29
+ out __SREG__,__tmp_reg__
+ out __SP_L__,r28
++#endif
+ mov_l r28, r26
+ mov_h r29, r27
+ ret
+diff --git a/gcc/config/avr/predicates.md b/gcc/config/avr/predicates.md
+index 020fb5f..aca33d7 100755
+--- a/gcc/config/avr/predicates.md
++++ b/gcc/config/avr/predicates.md
+@@ -45,17 +45,23 @@
+ ;; Return true if OP is a valid address for lower half of I/O space.
+ (define_predicate "low_io_address_operand"
+ (and (match_code "const_int")
+- (match_test "IN_RANGE((INTVAL (op)), 0x20, 0x3F)")))
++ (if_then_else (match_test "AVR_XMEGA")
++ (match_test "IN_RANGE((INTVAL (op)), 0x00, 0x1F)")
++ (match_test "IN_RANGE((INTVAL (op)), 0x20, 0x3F)"))))
+
+ ;; Return true if OP is a valid address for high half of I/O space.
+ (define_predicate "high_io_address_operand"
+ (and (match_code "const_int")
+- (match_test "IN_RANGE((INTVAL (op)), 0x40, 0x5F)")))
++ (if_then_else (match_test "AVR_XMEGA")
++ (match_test "IN_RANGE((INTVAL (op)), 0x20, 0x3F)")
++ (match_test "IN_RANGE((INTVAL (op)), 0x40, 0x5F)"))))
+
+ ;; Return true if OP is a valid address of I/O space.
+ (define_predicate "io_address_operand"
+ (and (match_code "const_int")
+- (match_test "IN_RANGE((INTVAL (op)), 0x20, (0x60 - GET_MODE_SIZE(mode)))")))
++ (if_then_else (match_test "AVR_XMEGA")
++ (match_test "IN_RANGE((INTVAL (op)), 0x0, (0x40 - GET_MODE_SIZE(mode)))")
++ (match_test "IN_RANGE((INTVAL (op)), 0x20, (0x60 - GET_MODE_SIZE(mode)))"))))
+
+ ;; Return 1 if OP is the zero constant for MODE.
+ (define_predicate "const0_operand"
+diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr
+index ef6b7ae..d375daf 100644
+--- a/gcc/config/avr/t-avr
++++ b/gcc/config/avr/t-avr
+@@ -73,8 +73,8 @@ fp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/avr/t-avr
+
+ FPBIT = fp-bit.c
+
+-MULTILIB_OPTIONS = mmcu=avr2/mmcu=avr25/mmcu=avr3/mmcu=avr31/mmcu=avr35/mmcu=avr4/mmcu=avr5/mmcu=avr51/mmcu=avr6
+-MULTILIB_DIRNAMES = avr2 avr25 avr3 avr31 avr35 avr4 avr5 avr51 avr6
++MULTILIB_OPTIONS = mmcu=avr2/mmcu=avr25/mmcu=avr3/mmcu=avr31/mmcu=avr35/mmcu=avr4/mmcu=avr5/mmcu=avr51/mmcu=avr6/mmcu=avrxmega2/mmcu=avrxmega4/mmcu=avrxmega5/mmcu=avrxmega6/mmcu=avrxmega7
++MULTILIB_DIRNAMES = avr2 avr25 avr3 avr31 avr35 avr4 avr5 avr51 avr6 avrxmega2 avrxmega4 avrxmega5 avrxmega6 avrxmega7
+
+ # The many avr2 matches are not listed here - this is the default.
+ MULTILIB_MATCHES = \
+@@ -186,7 +186,24 @@ MULTILIB_MATCHES = \
+ mmcu?avr51=mmcu?m3000s \
+ mmcu?avr51=mmcu?m3001b \
+ mmcu?avr6=mmcu?atmega2560 \
+- mmcu?avr6=mmcu?atmega2561
++ mmcu?avr6=mmcu?atmega2561 \
++ mmcu?avr6=mmcu?atmega2560 \
++ mmcu?avr6=mmcu?atmega2561 \
++ mmcu?avrxmega2=mmcu?atxmega16a4 \
++ mmcu?avrxmega2=mmcu?atxmega16d4 \
++ mmcu?avrxmega2=mmcu?atxmega32d4 \
++ mmcu?avrxmega2=mmcu?atxmega32a4 \
++ mmcu?avrxmega4=mmcu?atxmega64a3 \
++ mmcu?avrxmega4=mmcu?atxmega64d3 \
++ mmcu?avrxmega5=mmcu?atxmega64a1 \
++ mmcu?avrxmega6=mmcu?atxmega128a3 \
++ mmcu?avrxmega6=mmcu?atxmega128d3 \
++ mmcu?avrxmega6=mmcu?atxmega192a3 \
++ mmcu?avrxmega6=mmcu?atxmega192d3 \
++ mmcu?avrxmega6=mmcu?atxmega256a3 \
++ mmcu?avrxmega6=mmcu?atxmega256a3b \
++ mmcu?avrxmega6=mmcu?atxmega256d3 \
++ mmcu?avrxmega7=mmcu?atxmega128a1
+
+ MULTILIB_EXCEPTIONS =
+
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0005-Add-remove-support-for-devices.patch b/patches/gcc-4.4.3/atmel/0005-Add-remove-support-for-devices.patch
new file mode 100644
index 0000000..fa360e3
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0005-Add-remove-support-for-devices.patch
@@ -0,0 +1,662 @@
+From d4986c50a2e21a5fb7f32c1168d0ab8a02f34de7 Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Wed, 20 Apr 2011 20:44:54 +0200
+Subject: [PATCH 05/10] Add/remove support for devices
+
+Add support for a bunch of new AVR devices, and removes support
+for some listed devices as well: atmega8m1, atmega8c1, atmega16c1,
+atmega4hvd, atmega8hvd, atmega16hva, atmega16hvb, attiny327,
+m3000f, m3000s, m3001b.
+
+Assimilable quote for binutils by Eric Weddington (2010-04-08):
+http://old.nabble.com/-PATCH%2C-AVR--Add-remove-support-for-devices-td28172078.html
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/52-gcc-4.4.3-new-devices.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr.c | 63 ++++++++++++++---
+ gcc/config/avr/avr.h | 180 ++++++++++++++++++++++++++++++++++++++++++-------
+ gcc/config/avr/t-avr | 56 +++++++++++++---
+ 3 files changed, 252 insertions(+), 47 deletions(-)
+
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index 60918ac..9c410a5 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -200,15 +200,23 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ { "attiny13", ARCH_AVR25, "__AVR_ATtiny13__" },
+ { "attiny13a", ARCH_AVR25, "__AVR_ATtiny13A__" },
+ { "attiny2313", ARCH_AVR25, "__AVR_ATtiny2313__" },
++ { "attiny2313a", ARCH_AVR25, "__AVR_ATtiny2313A__" },
+ { "attiny24", ARCH_AVR25, "__AVR_ATtiny24__" },
++ { "attiny24a", ARCH_AVR25, "__AVR_ATtiny24A__" },
++ { "attiny4313", ARCH_AVR25, "__AVR_ATtiny4313__"},
+ { "attiny44", ARCH_AVR25, "__AVR_ATtiny44__" },
++ { "attiny44a", ARCH_AVR25, "__AVR_ATtiny44A__" },
+ { "attiny84", ARCH_AVR25, "__AVR_ATtiny84__" },
++ { "attiny84a", ARCH_AVR25, "__AVR_ATtiny84A__" },
+ { "attiny25", ARCH_AVR25, "__AVR_ATtiny25__" },
+ { "attiny45", ARCH_AVR25, "__AVR_ATtiny45__" },
+ { "attiny85", ARCH_AVR25, "__AVR_ATtiny85__" },
+ { "attiny261", ARCH_AVR25, "__AVR_ATtiny261__" },
++ { "attiny261a", ARCH_AVR25, "__AVR_ATtiny261A__" },
+ { "attiny461", ARCH_AVR25, "__AVR_ATtiny461__" },
++ { "attiny461a", ARCH_AVR25, "__AVR_ATtiny461A__" },
+ { "attiny861", ARCH_AVR25, "__AVR_ATtiny861__" },
++ { "attiny861a", ARCH_AVR25, "__AVR_ATtiny861A__" },
+ { "attiny43u", ARCH_AVR25, "__AVR_ATtiny43U__" },
+ { "attiny87", ARCH_AVR25, "__AVR_ATtiny87__" },
+ { "attiny48", ARCH_AVR25, "__AVR_ATtiny48__" },
+@@ -226,22 +234,23 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ { "avr35", ARCH_AVR35, NULL },
+ { "at90usb82", ARCH_AVR35, "__AVR_AT90USB82__" },
+ { "at90usb162", ARCH_AVR35, "__AVR_AT90USB162__" },
++ { "atmega8u2", ARCH_AVR35, "__AVR_ATmega8U2__" },
++ { "atmega16u2", ARCH_AVR35, "__AVR_ATmega16U2__" },
++ { "atmega32u2", ARCH_AVR35, "__AVR_ATmega32U2__" },
+ { "attiny167", ARCH_AVR35, "__AVR_ATtiny167__" },
+- { "attiny327", ARCH_AVR35, "__AVR_ATtiny327__" },
+ /* Enhanced, <= 8K. */
+ { "avr4", ARCH_AVR4, NULL },
+ { "atmega8", ARCH_AVR4, "__AVR_ATmega8__" },
+ { "atmega48", ARCH_AVR4, "__AVR_ATmega48__" },
++ { "atmega48a", ARCH_AVR4, "__AVR_ATmega48A__" },
+ { "atmega48p", ARCH_AVR4, "__AVR_ATmega48P__" },
+ { "atmega88", ARCH_AVR4, "__AVR_ATmega88__" },
++ { "atmega88a", ARCH_AVR4, "__AVR_ATmega88A__" },
+ { "atmega88p", ARCH_AVR4, "__AVR_ATmega88P__" },
++ { "atmega88pa", ARCH_AVR4, "__AVR_ATmega88PA__" },
+ { "atmega8515", ARCH_AVR4, "__AVR_ATmega8515__" },
+ { "atmega8535", ARCH_AVR4, "__AVR_ATmega8535__" },
+ { "atmega8hva", ARCH_AVR4, "__AVR_ATmega8HVA__" },
+- { "atmega4hvd", ARCH_AVR4, "__AVR_ATmega4HVD__" },
+- { "atmega8hvd", ARCH_AVR4, "__AVR_ATmega8HVD__" },
+- { "atmega8c1", ARCH_AVR4, "__AVR_ATmega8C1__" },
+- { "atmega8m1", ARCH_AVR4, "__AVR_ATmega8M1__" },
+ { "at90pwm1", ARCH_AVR4, "__AVR_AT90PWM1__" },
+ { "at90pwm2", ARCH_AVR4, "__AVR_AT90PWM2__" },
+ { "at90pwm2b", ARCH_AVR4, "__AVR_AT90PWM2B__" },
+@@ -251,39 +260,73 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ /* Enhanced, > 8K, <= 64K. */
+ { "avr5", ARCH_AVR5, NULL },
+ { "atmega16", ARCH_AVR5, "__AVR_ATmega16__" },
++ { "atmega16a", ARCH_AVR5, "__AVR_ATmega16A__" },
+ { "atmega161", ARCH_AVR5, "__AVR_ATmega161__" },
+ { "atmega162", ARCH_AVR5, "__AVR_ATmega162__" },
+ { "atmega163", ARCH_AVR5, "__AVR_ATmega163__" },
++ { "atmega164a", ARCH_AVR5, "__AVR_ATmega164A__" },
+ { "atmega164p", ARCH_AVR5, "__AVR_ATmega164P__" },
+ { "atmega165", ARCH_AVR5, "__AVR_ATmega165__" },
++ { "atmega165a", ARCH_AVR5, "__AVR_ATmega165A__" },
+ { "atmega165p", ARCH_AVR5, "__AVR_ATmega165P__" },
+ { "atmega168", ARCH_AVR5, "__AVR_ATmega168__" },
++ { "atmega168a", ARCH_AVR5, "__AVR_ATmega168A__" },
+ { "atmega168p", ARCH_AVR5, "__AVR_ATmega168P__" },
+ { "atmega169", ARCH_AVR5, "__AVR_ATmega169__" },
++ { "atmega169a", ARCH_AVR5, "__AVR_ATmega169A__" },
+ { "atmega169p", ARCH_AVR5, "__AVR_ATmega169P__" },
++ { "atmega169pa", ARCH_AVR5, "__AVR_ATmega169PA__" },
++ { "atmega16hva", ARCH_AVR5, "__AVR_ATmega16HVA__" },
++ { "atmega16hva2", ARCH_AVR5, "__AVR_ATmega16HVA2__" },
++ { "atmega16hvb", ARCH_AVR5, "__AVR_ATmega16HVB__" },
++ { "atmega16m1", ARCH_AVR5, "__AVR_ATmega16M1__" },
++ { "atmega16u4", ARCH_AVR5, "__AVR_ATmega16U4__" },
+ { "atmega32", ARCH_AVR5, "__AVR_ATmega32__" },
+ { "atmega323", ARCH_AVR5, "__AVR_ATmega323__" },
++ { "atmega324a", ARCH_AVR5, "__AVR_ATmega324A__" },
+ { "atmega324p", ARCH_AVR5, "__AVR_ATmega324P__" },
++ { "atmega324pa", ARCH_AVR5, "__AVR_ATmega324PA__" },
+ { "atmega325", ARCH_AVR5, "__AVR_ATmega325__" },
++ { "atmega325a", ARCH_AVR5, "__AVR_ATmega325A__" },
+ { "atmega325p", ARCH_AVR5, "__AVR_ATmega325P__" },
+ { "atmega3250", ARCH_AVR5, "__AVR_ATmega3250__" },
++ { "atmega3250a", ARCH_AVR5, "__AVR_ATmega3250A__" },
+ { "atmega3250p", ARCH_AVR5, "__AVR_ATmega3250P__" },
++ { "atmega328", ARCH_AVR5, "__AVR_ATmega328__" },
+ { "atmega328p", ARCH_AVR5, "__AVR_ATmega328P__" },
+ { "atmega329", ARCH_AVR5, "__AVR_ATmega329__" },
++ { "atmega329a", ARCH_AVR5, "__AVR_ATmega329A__" },
+ { "atmega329p", ARCH_AVR5, "__AVR_ATmega329P__" },
++ { "atmega329pa", ARCH_AVR5, "__AVR_ATmega329PA__" },
+ { "atmega3290", ARCH_AVR5, "__AVR_ATmega3290__" },
++ { "atmega3290a", ARCH_AVR5, "__AVR_ATmega3290A__" },
+ { "atmega3290p", ARCH_AVR5, "__AVR_ATmega3290P__" },
++ { "atmega32c1", ARCH_AVR5, "__AVR_ATmega32C1__" },
++ { "atmega32m1", ARCH_AVR5, "__AVR_ATmega32M1__" },
++ { "atmega32u4", ARCH_AVR5, "__AVR_ATmega32U4__" },
++ { "atmega32u6", ARCH_AVR5, "__AVR_ATmega32U6__" },
+ { "atmega406", ARCH_AVR5, "__AVR_ATmega406__" },
+ { "atmega64", ARCH_AVR5, "__AVR_ATmega64__" },
+ { "atmega640", ARCH_AVR5, "__AVR_ATmega640__" },
+ { "atmega644", ARCH_AVR5, "__AVR_ATmega644__" },
++ { "atmega644a", ARCH_AVR5, "__AVR_ATmega644A__" },
+ { "atmega644p", ARCH_AVR5, "__AVR_ATmega644P__" },
++ { "atmega644pa", ARCH_AVR5, "__AVR_ATmega644PA__" },
++ { "atmega645a", ARCH_AVR5, "__AVR_ATmega645A__" },
++ { "atmega645p", ARCH_AVR5, "__AVR_ATmega645P__" },
+ { "atmega645", ARCH_AVR5, "__AVR_ATmega645__" },
+ { "atmega6450", ARCH_AVR5, "__AVR_ATmega6450__" },
++ { "atmega6450a", ARCH_AVR5, "__AVR_ATmega6450A__" },
++ { "atmega6450p", ARCH_AVR5, "__AVR_ATmega6450P__" },
+ { "atmega649", ARCH_AVR5, "__AVR_ATmega649__" },
++ { "atmega649a", ARCH_AVR5, "__AVR_ATmega649A__" },
++ { "atmega649p", ARCH_AVR5, "__AVR_ATmega649P__" },
+ { "atmega6490", ARCH_AVR5, "__AVR_ATmega6490__" },
+- { "atmega16hva", ARCH_AVR5, "__AVR_ATmega16HVA__" },
+- { "atmega16hvb", ARCH_AVR5, "__AVR_ATmega16HVB__" },
++ { "atmega6490a", ARCH_AVR5, "__AVR_ATmega6490A__" },
++ { "atmega6490p", ARCH_AVR5, "__AVR_ATmega6490P__" },
++ { "atmega64c1", ARCH_AVR5, "__AVR_ATmega64C1__" },
++ { "atmega64m1", ARCH_AVR5, "__AVR_ATmega64M1__" },
++ { "atmega64hve", ARCH_AVR5, "__AVR_ATmega64HVE__" },
+ { "atmega32hvb", ARCH_AVR5, "__AVR_ATmega32HVB__" },
+ { "at90can32", ARCH_AVR5, "__AVR_AT90CAN32__" },
+ { "at90can64", ARCH_AVR5, "__AVR_AT90CAN64__" },
+@@ -302,19 +345,17 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ { "at90usb646", ARCH_AVR5, "__AVR_AT90USB646__" },
+ { "at90usb647", ARCH_AVR5, "__AVR_AT90USB647__" },
+ { "at94k", ARCH_AVR5, "__AVR_AT94K__" },
++ { "m3000", ARCH_AVR5, "__AVR_M3000__" },
+ /* Enhanced, == 128K. */
+ { "avr51", ARCH_AVR51, NULL },
+ { "atmega128", ARCH_AVR51, "__AVR_ATmega128__" },
+ { "atmega1280", ARCH_AVR51, "__AVR_ATmega1280__" },
+ { "atmega1281", ARCH_AVR51, "__AVR_ATmega1281__" },
+ { "atmega1284p", ARCH_AVR51, "__AVR_ATmega1284P__" },
+- { "atmega128rfa1", ARCH_AVR51, "__AVR_ATmega128RFA1__" },
++ { "atmega128rfa1",ARCH_AVR51, "__AVR_ATmega128RFA1__" },
+ { "at90can128", ARCH_AVR51, "__AVR_AT90CAN128__" },
+ { "at90usb1286", ARCH_AVR51, "__AVR_AT90USB1286__" },
+ { "at90usb1287", ARCH_AVR51, "__AVR_AT90USB1287__" },
+- { "m3000f", ARCH_AVR51, "__AVR_M3000F__" },
+- { "m3000s", ARCH_AVR51, "__AVR_M3000S__" },
+- { "m3001b", ARCH_AVR51, "__AVR_M3001B__" },
+ /* 3-Byte PC. */
+ { "avr6", ARCH_AVR6, NULL },
+ { "atmega2560", ARCH_AVR6, "__AVR_ATmega2560__" },
+diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
+index e580d5e..3cbe842 100644
+--- a/gcc/config/avr/avr.h
++++ b/gcc/config/avr/avr.h
+@@ -841,40 +841,137 @@ mmcu=*:-mmcu=%*}"
+ mmcu=ata6289|\
+ mmcu=attiny13*|\
+ mmcu=attiny2313|\
++ mmcu=attiny2313a|\
+ mmcu=attiny24|\
++ mmcu=attiny24a|\
+ mmcu=attiny25|\
+ mmcu=attiny261|\
+- mmcu=attiny4*|\
+- mmcu=attiny8*: -m avr2}\
++ mmcu=attiny261a|\
++ mmcu=attiny4313|\
++ mmcu=attiny43u|\
++ mmcu=attiny44|\
++ mmcu=attiny44a|\
++ mmcu=attiny45|\
++ mmcu=attiny461|\
++ mmcu=attiny461a|\
++ mmcu=attiny48|\
++ mmcu=attiny84|\
++ mmcu=attiny84a|\
++ mmcu=attiny85|\
++ mmcu=attiny861|\
++ mmcu=attiny861a|\
++ mmcu=attiny87|\
++ mmcu=attiny88: -m avr2}\
+ %{mmcu=atmega103|\
+- mmcu=at43*|\
+- mmcu=at76*|\
++ mmcu=at43usb320|\
++ mmcu=at43usb355|\
++ mmcu=at76c711|\
+ mmcu=at90usb82|\
+ mmcu=at90usb162|\
++ mmcu=atmega8u2|\
++ mmcu=atmega16u2|\
++ mmcu=atmega32u2|\
++ mmcu=attiny167|\
+ mmcu=attiny16*|\
+ mmcu=attiny32*: -m avr3}\
+-%{mmcu=atmega8*|\
+- mmcu=atmega4*|\
++%{mmcu=atmega8|\
++ mmcu=atmega88|\
++ mmcu=atmega88a|\
++ mmcu=atmega88p|\
++ mmcu=atmega88pa|\
++ mmcu=atmega8515|\
++ mmcu=atmega8535|\
++ mmcu=atmega8hva|\
++ mmcu=atmega48|\
++ mmcu=atmega48a|\
++ mmcu=atmega48p|\
+ mmcu=at90pwm1|\
+ mmcu=at90pwm2|\
+ mmcu=at90pwm2b|\
+ mmcu=at90pwm3|\
+ mmcu=at90pwm3b|\
+ mmcu=at90pwm81: -m avr4}\
+-%{mmcu=atmega16*|\
+- mmcu=atmega32*|\
++%{mmcu=atmega16|\
++ mmcu=atmega16a|\
++ mmcu=atmega161|\
++ mmcu=atmega162|\
++ mmcu=atmega163|\
++ mmcu=atmega164a|\
++ mmcu=atmega164p|\
++ mmcu=atmega165|\
++ mmcu=atmega165a|\
++ mmcu=atmega165p|\
++ mmcu=atmega168|\
++ mmcu=atmega168a|\
++ mmcu=atmega168p|\
++ mmcu=atmega169|\
++ mmcu=atmega169a|\
++ mmcu=atmega169p|\
++ mmcu=atmega169pa|\
++ mmcu=atmega16hva|\
++ mmcu=atmega16hva2|\
++ mmcu=atmega16hvb|\
++ mmcu=atmega16m1|\
++ mmcu=atmega16u4|\
++ mmcu=atmega32|\
++ mmcu=atmega323|\
++ mmcu=atmega324a|\
++ mmcu=atmega324p|\
++ mmcu=atmega324pa|\
++ mmcu=atmega325|\
++ mmcu=atmega325a|\
++ mmcu=atmega325p|\
++ mmcu=atmega3250|\
++ mmcu=atmega3250a|\
++ mmcu=atmega3250p|\
++ mmcu=atmega328|\
++ mmcu=atmega328p|\
++ mmcu=atmega329|\
++ mmcu=atmega329a|\
++ mmcu=atmega329p|\
++ mmcu=atmega329pa|\
++ mmcu=atmega3290|\
++ mmcu=atmega3290a|\
++ mmcu=atmega3290p|\
++ mmcu=atmega32c1|\
++ mmcu=atmega32hvb|\
++ mmcu=atmega32m1|\
++ mmcu=atmega32u4|\
++ mmcu=atmega32u6|\
+ mmcu=atmega406|\
+- mmcu=atmega64*|\
++ mmcu=atmega64|\
++ mmcu=atmega640|\
++ mmcu=atmega644|\
++ mmcu=atmega644a|\
++ mmcu=atmega644p|\
++ mmcu=atmega644pa|\
++ mmcu=atmega645|\
++ mmcu=atmega645a|\
++ mmcu=atmega645p|\
++ mmcu=atmega6450|\
++ mmcu=atmega6450a|\
++ mmcu=atmega6450p|\
++ mmcu=atmega649|\
++ mmcu=atmega649a|\
++ mmcu=atmega649p|\
++ mmcu=atmega6490|\
++ mmcu=atmega6490a|\
++ mmcu=atmega6490p|\
++ mmcu=atmega64c1|\
++ mmcu=atmega64m1|\
++ mmcu=atmega64hve|\
+ mmcu=atmega128*|\
+ mmcu=at90can*|\
+ mmcu=at90pwm216|\
+ mmcu=at90pwm316|\
++ mmcu=atmega32c1|\
++ mmcu=atmega32m1|\
++ mmcu=atmega32u4|\
+ mmcu=at90scr100|\
+ mmcu=at90usb64*|\
+ mmcu=at90usb128*|\
+ mmcu=at94k|\
+- mmcu=m3000*|\
+- mmcu=m3001*: -m avr5}\
++ mmcu=m3000: -m avr5}\
+ %{mmcu=atmega256*:-m avr6}\
+ %{mmcu=atxmega16a4|\
+ mmcu=atxmega16d4|\
+@@ -892,6 +989,7 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=atxmega128a1:-m avrxmega7} \
+ %{mmcu=atmega324*|\
+ mmcu=atmega325*|\
++ mmcu=atmega328|\
+ mmcu=atmega328p|\
+ mmcu=atmega329*|\
+ mmcu=atmega406|\
+@@ -916,14 +1014,10 @@ mmcu=*:-mmcu=%*}"
+ mmcu=attiny88|\
+ mmcu=attiny87|\
+ mmcu=attiny167|\
+- mmcu=attiny327|\
+ mmcu=at90can*|\
+ mmcu=at90pwm*|\
+- mmcu=atmega8c1|\
+- mmcu=atmega16c1|\
+ mmcu=atmega32c1|\
+ mmcu=atmega64c1|\
+- mmcu=atmega8m1|\
+ mmcu=atmega16m1|\
+ mmcu=atmega32m1|\
+ mmcu=atmega64m1|\
+@@ -931,14 +1025,17 @@ mmcu=*:-mmcu=%*}"
+ mmcu=atmega32u*|\
+ mmcu=at90scr100|\
+ mmcu=ata6289|\
++ mmcu=atmega64hve|\
++ mmcu=atmega8u2|\
++ mmcu=atmega16u2|\
++ mmcu=atmega32u2|\
+ mmcu=at90usb*: -Tdata 0x800100}\
+ %{mmcu=atmega640|\
+ mmcu=atmega1280|\
+ mmcu=atmega1281|\
+ mmcu=atmega256*|\
+ mmcu=atmega128rfa1: -Tdata 0x800200}\
+-%{mmcu=m3000*|\
+- mmcu=m3001*: -Tdata 0x801000}"
++%{mmcu=m3000: -Tdata 0x801000}"
+
+ #define LIB_SPEC \
+ "%{!mmcu=at90s1*:%{!mmcu=attiny11:%{!mmcu=attiny12:%{!mmcu=attiny15:%{!mmcu=attiny28: -lc }}}}}"
+@@ -975,15 +1072,23 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=attiny13:crttn13.o%s} \
+ %{mmcu=attiny13a:crttn13a.o%s} \
+ %{mmcu=attiny2313|mmcu=avr25:crttn2313.o%s} \
++%{mmcu=attiny2313a:crttn2313a.o%s} \
+ %{mmcu=attiny24:crttn24.o%s} \
++%{mmcu=attiny24a:crttn24a.o%s} \
++%{mmcu=attiny4313:crttn4313.o%s} \
+ %{mmcu=attiny44:crttn44.o%s} \
++%{mmcu=attiny44a:crttn44a.o%s} \
+ %{mmcu=attiny84:crttn84.o%s} \
++%{mmcu=attiny84a:crttn84a.o%s} \
+ %{mmcu=attiny25:crttn25.o%s} \
+ %{mmcu=attiny45:crttn45.o%s} \
+ %{mmcu=attiny85:crttn85.o%s} \
+ %{mmcu=attiny261:crttn261.o%s} \
++%{mmcu=attiny261a:crttn261a.o%s} \
+ %{mmcu=attiny461:crttn461.o%s} \
++%{mmcu=attiny461a:crttn461a.o%s} \
+ %{mmcu=attiny861:crttn861.o%s} \
++%{mmcu=attiny861a:crttn861a.o%s} \
+ %{mmcu=attiny43u:crttn43u.o%s} \
+ %{mmcu=attiny87:crttn87.o%s} \
+ %{mmcu=attiny48:crttn48.o%s} \
+@@ -995,17 +1100,20 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=at43usb320:crt43320.o%s} \
+ %{mmcu=at90usb162|mmcu=avr35:crtusb162.o%s} \
+ %{mmcu=at90usb82:crtusb82.o%s} \
++%{mmcu=atmega8u2:crtm8u2.o%s} \
++%{mmcu=atmega16u2:crtm16u2.o%s} \
++%{mmcu=atmega32u2:crtm32u2.o%s} \
+ %{mmcu=attiny167:crttn167.o%s} \
+-%{mmcu=attiny327:crttn327.o%s} \
+ %{mmcu=atmega8|mmcu=avr4:crtm8.o%s} \
+ %{mmcu=atmega48:crtm48.o%s} \
++%{mmcu=atmega48a:crtm48a.o%s} \
+ %{mmcu=atmega48p:crtm48p.o%s} \
+ %{mmcu=atmega88:crtm88.o%s} \
++%{mmcu=atmega88a:crtm88a.o%s} \
+ %{mmcu=atmega88p:crtm88p.o%s} \
++%{mmcu=atmega88pa:crtm88pa.o%s} \
+ %{mmcu=atmega8515:crtm8515.o%s} \
+ %{mmcu=atmega8535:crtm8535.o%s} \
+-%{mmcu=atmega8c1:crtm8c1.o%s} \
+-%{mmcu=atmega8m1:crtm8m1.o%s} \
+ %{mmcu=at90pwm1:crt90pwm1.o%s} \
+ %{mmcu=at90pwm2:crt90pwm2.o%s} \
+ %{mmcu=at90pwm2b:crt90pwm2b.o%s} \
+@@ -1013,48 +1121,70 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=at90pwm3b:crt90pwm3b.o%s} \
+ %{mmcu=at90pwm81:crt90pwm81.o%s} \
+ %{mmcu=atmega16:crtm16.o%s} \
++%{mmcu=atmega16a:crtm16a.o%s} \
+ %{mmcu=atmega161|mmcu=avr5:crtm161.o%s} \
+ %{mmcu=atmega162:crtm162.o%s} \
+ %{mmcu=atmega163:crtm163.o%s} \
++%{mmcu=atmega164a:crtm164a.o%s} \
+ %{mmcu=atmega164p:crtm164p.o%s} \
+ %{mmcu=atmega165:crtm165.o%s} \
++%{mmcu=atmega165a:crtm165a.o%s} \
+ %{mmcu=atmega165p:crtm165p.o%s} \
+ %{mmcu=atmega168:crtm168.o%s} \
++%{mmcu=atmega168a:crtm168a.o%s} \
+ %{mmcu=atmega168p:crtm168p.o%s} \
+ %{mmcu=atmega169:crtm169.o%s} \
++%{mmcu=atmega169a:crtm169a.o%s} \
+ %{mmcu=atmega169p:crtm169p.o%s} \
++%{mmcu=atmega169pa:crtm169pa.o%s} \
+ %{mmcu=atmega32:crtm32.o%s} \
+ %{mmcu=atmega323:crtm323.o%s} \
++%{mmcu=atmega324a:crtm324a.o%s} \
+ %{mmcu=atmega324p:crtm324p.o%s} \
++%{mmcu=atmega324pa:crtm324pa.o%s} \
+ %{mmcu=atmega325:crtm325.o%s} \
++%{mmcu=atmega325a:crtm325a.o%s} \
+ %{mmcu=atmega325p:crtm325p.o%s} \
+ %{mmcu=atmega3250:crtm3250.o%s} \
++%{mmcu=atmega3250a:crtm3250a.o%s} \
+ %{mmcu=atmega3250p:crtm3250p.o%s} \
++%{mmcu=atmega328:crtm328.o%s} \
+ %{mmcu=atmega328p:crtm328p.o%s} \
+ %{mmcu=atmega329:crtm329.o%s} \
++%{mmcu=atmega329a:crtm329a.o%s} \
+ %{mmcu=atmega329p:crtm329p.o%s} \
++%{mmcu=atmega329pa:crtm329pa.o%s} \
+ %{mmcu=atmega3290:crtm3290.o%s} \
++%{mmcu=atmega3290a:crtm3290a.o%s} \
+ %{mmcu=atmega3290p:crtm3290p.o%s} \
+ %{mmcu=atmega406:crtm406.o%s} \
+ %{mmcu=atmega64:crtm64.o%s} \
+ %{mmcu=atmega640:crtm640.o%s} \
+ %{mmcu=atmega644:crtm644.o%s} \
++%{mmcu=atmega644a:crtm644a.o%s} \
+ %{mmcu=atmega644p:crtm644p.o%s} \
++%{mmcu=atmega644pa:crtm644pa.o%s} \
+ %{mmcu=atmega645:crtm645.o%s} \
++%{mmcu=atmega645a:crtm645a.o%s} \
++%{mmcu=atmega645p:crtm645p.o%s} \
+ %{mmcu=atmega6450:crtm6450.o%s} \
++%{mmcu=atmega6450a:crtm6450a.o%s} \
++%{mmcu=atmega6450p:crtm6450p.o%s} \
+ %{mmcu=atmega649:crtm649.o%s} \
++%{mmcu=atmega649a:crtm649a.o%s} \
+ %{mmcu=atmega6490:crtm6490.o%s} \
++%{mmcu=atmega6490a:crtm6490a.o%s} \
++%{mmcu=atmega6490p:crtm6490p.o%s} \
++%{mmcu=atmega64hve:crtm64hve.o%s} \
+ %{mmcu=atmega8hva:crtm8hva.o%s} \
+ %{mmcu=atmega16hva:crtm16hva.o%s} \
++%{mmcu=atmega16hva2:crtm16hva2.o%s} \
+ %{mmcu=atmega16hvb:crtm16hvb.o%s} \
+ %{mmcu=atmega32hvb:crtm32hvb.o%s} \
+-%{mmcu=atmega4hvd:crtm4hvd.o%s} \
+-%{mmcu=atmega8hvd:crtm8hvd.o%s} \
+ %{mmcu=at90can32:crtcan32.o%s} \
+ %{mmcu=at90can64:crtcan64.o%s} \
+ %{mmcu=at90pwm216:crt90pwm216.o%s} \
+ %{mmcu=at90pwm316:crt90pwm316.o%s} \
+-%{mmcu=atmega16c1:crtm16c1.o%s} \
+ %{mmcu=atmega32c1:crtm32c1.o%s} \
+ %{mmcu=atmega64c1:crtm64c1.o%s} \
+ %{mmcu=atmega16m1:crtm16m1.o%s} \
+@@ -1075,9 +1205,7 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=atmega128rfa1:crtm128rfa1.o%s} \
+ %{mmcu=at90usb1286:crtusb1286.o%s} \
+ %{mmcu=at90usb1287:crtusb1287.o%s} \
+-%{mmcu=m3000f:crtm3000f.o%s} \
+-%{mmcu=m3000s:crtm3000s.o%s} \
+-%{mmcu=m3001b:crtm3001b.o%s} \
++%{mmcu=m3000:crtm3000.o%s} \
+ %{mmcu=atmega2560|mmcu=avr6:crtm2560.o%s} \
+ %{mmcu=atmega2561:crtm2561.o%s} \
+ %{mmcu=avrxmega2|mmcu=atxmega32d4:crtx32d4.o%s} \
+diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr
+index d375daf..af9e1de 100644
+--- a/gcc/config/avr/t-avr
++++ b/gcc/config/avr/t-avr
+@@ -82,16 +82,28 @@ MULTILIB_MATCHES = \
+ mmcu?avr25=mmcu?attiny13 \
+ mmcu?avr25=mmcu?attiny13a \
+ mmcu?avr25=mmcu?attiny2313 \
++ mmcu?avr25=mmcu?attiny2313a \
+ mmcu?avr25=mmcu?attiny24 \
++ mmcu?avr25=mmcu?attiny24a \
+ mmcu?avr25=mmcu?attiny44 \
++ mmcu?avr25=mmcu?attiny44a \
++ mmcu?avr25=mmcu?attiny45 \
+ mmcu?avr25=mmcu?attiny84 \
++ mmcu?avr25=mmcu?attiny84a \
+ mmcu?avr25=mmcu?attiny25 \
+ mmcu?avr25=mmcu?attiny45 \
+ mmcu?avr25=mmcu?attiny85 \
+ mmcu?avr25=mmcu?attiny261 \
++ mmcu?avr25=mmcu?attiny261a \
++ mmcu?avr25=mmcu?attiny4313 \
+ mmcu?avr25=mmcu?attiny461 \
++ mmcu?avr25=mmcu?attiny461a \
+ mmcu?avr25=mmcu?attiny861 \
+ mmcu?avr25=mmcu?attiny43u \
++ mmcu?avr25=mmcu?attiny84 \
++ mmcu?avr25=mmcu?attiny85 \
++ mmcu?avr25=mmcu?attiny861 \
++ mmcu?avr25=mmcu?attiny861a \
+ mmcu?avr25=mmcu?attiny87 \
+ mmcu?avr25=mmcu?attiny48 \
+ mmcu?avr25=mmcu?attiny88 \
+@@ -102,20 +114,21 @@ MULTILIB_MATCHES = \
+ mmcu?avr31=mmcu?at43usb320 \
+ mmcu?avr35=mmcu?at90usb82 \
+ mmcu?avr35=mmcu?at90usb162 \
++ mmcu?avr35=mmcu?atmega8u2 \
++ mmcu?avr35=mmcu?atmega16u2 \
++ mmcu?avr35=mmcu?atmega32u2 \
+ mmcu?avr35=mmcu?attiny167 \
+- mmcu?avr35=mmcu?attiny327 \
+ mmcu?avr4=mmcu?atmega48 \
++ mmcu?avr4=mmcu?atmega48a \
+ mmcu?avr4=mmcu?atmega48p \
+ mmcu?avr4=mmcu?atmega8 \
+ mmcu?avr4=mmcu?atmega8515 \
+ mmcu?avr4=mmcu?atmega8535 \
+ mmcu?avr4=mmcu?atmega88 \
++ mmcu?avr4=mmcu?atmega88a \
+ mmcu?avr4=mmcu?atmega88p \
++ mmcu?avr4=mmcu?atmega88pa \
+ mmcu?avr4=mmcu?atmega8hva \
+- mmcu?avr4=mmcu?atmega4hvd \
+- mmcu?avr4=mmcu?atmega8hvd \
+- mmcu?avr4=mmcu?atmega8c1 \
+- mmcu?avr4=mmcu?atmega8m1 \
+ mmcu?avr4=mmcu?at90pwm1 \
+ mmcu?avr4=mmcu?at90pwm2 \
+ mmcu?avr4=mmcu?at90pwm2b \
+@@ -123,45 +136,69 @@ MULTILIB_MATCHES = \
+ mmcu?avr4=mmcu?at90pwm3b \
+ mmcu?avr4=mmcu?at90pwm81 \
+ mmcu?avr5=mmcu?atmega16 \
++ mmcu?avr5=mmcu?atmega16a \
+ mmcu?avr5=mmcu?atmega161 \
+ mmcu?avr5=mmcu?atmega162 \
+ mmcu?avr5=mmcu?atmega163 \
++ mmcu?avr5=mmcu?atmega164a \
+ mmcu?avr5=mmcu?atmega164p \
+ mmcu?avr5=mmcu?atmega165 \
++ mmcu?avr5=mmcu?atmega165a \
+ mmcu?avr5=mmcu?atmega165p \
+ mmcu?avr5=mmcu?atmega168 \
++ mmcu?avr5=mmcu?atmega168a \
+ mmcu?avr5=mmcu?atmega168p \
+ mmcu?avr5=mmcu?atmega169 \
++ mmcu?avr5=mmcu?atmega169a \
+ mmcu?avr5=mmcu?atmega169p \
++ mmcu?avr5=mmcu?atmega169pa \
+ mmcu?avr5=mmcu?atmega32 \
+ mmcu?avr5=mmcu?atmega323 \
++ mmcu?avr5=mmcu?atmega324a \
+ mmcu?avr5=mmcu?atmega324p \
++ mmcu?avr5=mmcu?atmega324pa \
+ mmcu?avr5=mmcu?atmega325 \
++ mmcu?avr5=mmcu?atmega325a \
+ mmcu?avr5=mmcu?atmega325p \
+ mmcu?avr5=mmcu?atmega3250 \
++ mmcu?avr5=mmcu?atmega3250a \
+ mmcu?avr5=mmcu?atmega3250p \
++ mmcu?avr5=mmcu?atmega328 \
+ mmcu?avr5=mmcu?atmega328p \
+ mmcu?avr5=mmcu?atmega329 \
++ mmcu?avr5=mmcu?atmega329a \
+ mmcu?avr5=mmcu?atmega329p \
++ mmcu?avr5=mmcu?atmega329pa \
+ mmcu?avr5=mmcu?atmega3290 \
++ mmcu?avr5=mmcu?atmega3290a \
+ mmcu?avr5=mmcu?atmega3290p \
+ mmcu?avr5=mmcu?atmega406 \
+ mmcu?avr5=mmcu?atmega64 \
+ mmcu?avr5=mmcu?atmega640 \
+ mmcu?avr5=mmcu?atmega644 \
++ mmcu?avr5=mmcu?atmega644a \
+ mmcu?avr5=mmcu?atmega644p \
++ mmcu?avr5=mmcu?atmega644pa \
+ mmcu?avr5=mmcu?atmega645 \
++ mmcu?avr5=mmcu?atmega645a \
++ mmcu?avr5=mmcu?atmega645p \
+ mmcu?avr5=mmcu?atmega6450 \
++ mmcu?avr5=mmcu?atmega6450a \
++ mmcu?avr5=mmcu?atmega6450p \
+ mmcu?avr5=mmcu?atmega649 \
++ mmcu?avr5=mmcu?atmega649a \
++ mmcu?avr5=mmcu?atmega649p \
+ mmcu?avr5=mmcu?atmega6490 \
++ mmcu?avr5=mmcu?atmega6490a \
++ mmcu?avr5=mmcu?atmega6490p \
+ mmcu?avr5=mmcu?atmega16hva \
++ mmcu?avr5=mmcu?atmega16hva2 \
+ mmcu?avr5=mmcu?atmega16hvb \
+ mmcu?avr5=mmcu?atmega32hvb \
+ mmcu?avr5=mmcu?at90can32 \
+ mmcu?avr5=mmcu?at90can64 \
+ mmcu?avr5=mmcu?at90pwm216 \
+ mmcu?avr5=mmcu?at90pwm316 \
+- mmcu?avr5=mmcu?atmega16c1 \
+ mmcu?avr5=mmcu?atmega32c1 \
+ mmcu?avr5=mmcu?atmega64c1 \
+ mmcu?avr5=mmcu?atmega16m1 \
+@@ -170,6 +207,7 @@ MULTILIB_MATCHES = \
+ mmcu?avr5=mmcu?atmega16u4 \
+ mmcu?avr5=mmcu?atmega32u4 \
+ mmcu?avr5=mmcu?atmega32u6 \
++ mmcu?avr5=mmcu?atmega64hve \
+ mmcu?avr5=mmcu?at90scr100 \
+ mmcu?avr5=mmcu?at90usb646 \
+ mmcu?avr5=mmcu?at90usb647 \
+@@ -182,10 +220,8 @@ MULTILIB_MATCHES = \
+ mmcu?avr51=mmcu?at90can128 \
+ mmcu?avr51=mmcu?at90usb1286 \
+ mmcu?avr51=mmcu?at90usb1287 \
+- mmcu?avr51=mmcu?m3000f \
+- mmcu?avr51=mmcu?m3000s \
+- mmcu?avr51=mmcu?m3001b \
+- mmcu?avr6=mmcu?atmega2560 \
++ mmcu?avr51=mmcu?m3000 \
++ mmcu?avr6=mmcu?atmega2560 \
+ mmcu?avr6=mmcu?atmega2561 \
+ mmcu?avr6=mmcu?atmega2560 \
+ mmcu?avr6=mmcu?atmega2561 \
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0006-Add-support-for-more-XMEGA-devices.patch b/patches/gcc-4.4.3/atmel/0006-Add-support-for-more-XMEGA-devices.patch
new file mode 100644
index 0000000..b30f04b
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0006-Add-support-for-more-XMEGA-devices.patch
@@ -0,0 +1,145 @@
+From e2eb8b58916a5a6ba7d0ac2063d6b196f5fd350b Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Wed, 20 Apr 2011 22:05:41 +0200
+Subject: [PATCH 06/10] Add support for more XMEGA devices
+
+Add support for a bunch of new AVR XMEGA devices: atxmega64a1u,
+atxmega128a1u, atxmega128b1, atxmega16x1, atxmega32x1
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/53-gcc-4.4.3-xmega128a1u-64a1u.patch
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/63-gcc-4.4.3-atxmega16x1-32x1.patch
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/64-gcc-4.4.3-atxmega128b1.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr.c | 5 +++++
+ gcc/config/avr/avr.h | 20 +++++++++++++++-----
+ gcc/config/avr/t-avr | 7 ++++++-
+ 3 files changed, 26 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index 9c410a5..b818c8b 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -366,8 +366,10 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ { "avrxmega2", ARCH_AVRXMEGA2, NULL },
+ { "atxmega16a4", ARCH_AVRXMEGA2, "__AVR_ATxmega16A4__" },
+ { "atxmega16d4", ARCH_AVRXMEGA2, "__AVR_ATxmega16D4__" },
++ { "atxmega16x1", ARCH_AVRXMEGA2, "__AVR_ATxmega16X1__" },
+ { "atxmega32a4", ARCH_AVRXMEGA2, "__AVR_ATxmega32A4__" },
+ { "atxmega32d4", ARCH_AVRXMEGA2, "__AVR_ATxmega32D4__" },
++ { "atxmega32x1", ARCH_AVRXMEGA2, "__AVR_ATxmega32X1__" },
+ /* Xmega, > 8K, <= 64K FLASH, > 64K RAM. */
+ /* { "avrxmega3", ARCH_AVRXMEGA3, NULL }, */
+ /* Xmega, > 64K, <= 128K FLASH, <= 64K RAM. */
+@@ -377,9 +379,11 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ /* Xmega, > 64K, <= 128K FLASH, > 64K RAM. */
+ { "avrxmega5", ARCH_AVRXMEGA5, NULL },
+ { "atxmega64a1", ARCH_AVRXMEGA5, "__AVR_ATxmega64A1__" },
++ { "atxmega64a1u", ARCH_AVRXMEGA5, "__AVR_ATxmega64A1U__" },
+ /* Xmega, > 128K, <= 256K FLASH, <= 64K RAM. */
+ { "avrxmega6", ARCH_AVRXMEGA6, NULL },
+ { "atxmega128a3", ARCH_AVRXMEGA6, "__AVR_ATxmega128A3__" },
++ { "atxmega128b1", ARCH_AVRXMEGA6, "__AVR_ATxmega128B1__" },
+ { "atxmega128d3", ARCH_AVRXMEGA6, "__AVR_ATxmega128D3__" },
+ { "atxmega192a3", ARCH_AVRXMEGA6, "__AVR_ATxmega192A3__" },
+ { "atxmega192d3", ARCH_AVRXMEGA6, "__AVR_ATxmega192D3__" },
+@@ -389,6 +393,7 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ /* Xmega, > 128K, <= 256K FLASH, > 64K RAM. */
+ { "avrxmega7", ARCH_AVRXMEGA7, NULL },
+ { "atxmega128a1", ARCH_AVRXMEGA7, "__AVR_ATxmega128A1__" },
++ { "atxmega128a1u",ARCH_AVRXMEGA7, "__AVR_ATxmega128A1U__" },
+ /* Assembler only. */
+ { "avr1", ARCH_AVR1, NULL },
+ { "at90s1200", ARCH_AVR1, "__AVR_AT90S1200__" },
+diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
+index 3cbe842..b0d5d60 100644
+--- a/gcc/config/avr/avr.h
++++ b/gcc/config/avr/avr.h
+@@ -975,18 +975,23 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=atmega256*:-m avr6}\
+ %{mmcu=atxmega16a4|\
+ mmcu=atxmega16d4|\
++ mmcu=atxmega16x1|\
+ mmcu=atxmega32d4|\
+- mmcu=atxmega32a4:-m avrxmega2} \
++ mmcu=atxmega32a4|\
++ mmcu=atxmega32x1:-m avrxmega2} \
+ %{mmcu=atxmega64a3|\
+ mmcu=atxmega64d3:-m avrxmega4} \
+-%{mmcu=atxmega64a1:-m avrxmega5} \
++%{mmcu=atxmega64a1|\
++ mmcu=atxmega64a1u:-m avrxmega5} \
+ %{mmcu=atxmega128a3|\
++ mmcu=atxmega128b1|\
+ mmcu=atxmega128d3|\
+ mmcu=atxmega192a3|\
+ mmcu=atxmega192d3|\
+ mmcu=atxmega256a3*|\
+ mmcu=atxmega256d3:-m avrxmega6} \
+-%{mmcu=atxmega128a1:-m avrxmega7} \
++%{mmcu=atxmega128a1|\
++ mmcu=atxmega128a1u:-m avrxmega7} \
+ %{mmcu=atmega324*|\
+ mmcu=atmega325*|\
+ mmcu=atmega328|\
+@@ -1211,18 +1216,23 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=avrxmega2|mmcu=atxmega32d4:crtx32d4.o%s} \
+ %{mmcu=atxmega16a4:crtx16a4.o%s} \
+ %{mmcu=atxmega16d4:crtx16d4.o%s} \
+-%{mmcu=atxmega3|mmcu=atxmega32a4:crtx32a4.o%s} \
++%{mmcu=atxmega16x1:crtx16x1.o%s} \
++%{mmcu=atxmega32a4:crtx32a4.o%s} \
++%{mmcu=atxmega32x1:crtx32x1.o%s} \
+ %{mmcu=atxmega4|mmcu=atxmega64a3:crtx64a3.o%s} \
+ %{mmcu=atxmega64d3:crtx64d3.o%s} \
+ %{mmcu=atxmega5|mmcu=atxmega64a1:crtx64a1.o%s} \
++%{mmcu=atxmega64a1u:crtx64a1u.o%s} \
+ %{mmcu=atxmega6|mmcu=atxmega128a3:crtx128a3.o%s} \
++%{mmcu=atxmega128b1:crtx128b1.o%s}\
+ %{mmcu=atxmega128d3:crtx128d3.o%s}\
+ %{mmcu=atxmega192a3:crtx192a3.o%s}\
+ %{mmcu=atxmega192d3:crtx192d3.o%s}\
+ %{mmcu=atxmega256a3:crtx256a3.o%s} \
+ %{mmcu=atxmega256a3b:crtx256a3b.o%s} \
+ %{mmcu=atxmega256d3:crtx256d3.o%s} \
+-%{mmcu=atxmega7|mmcu=atxmega128a1:crtx128a1.o%s}"
++%{mmcu=atxmega7|mmcu=atxmega128a1:crtx128a1.o%s} \
++%{mmcu=atxmega128a1u:crtx128a1u.o%s}"
+
+ #define EXTRA_SPECS {"crt_binutils", CRT_BINUTILS_SPECS},
+
+diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr
+index af9e1de..b068d3c 100644
+--- a/gcc/config/avr/t-avr
++++ b/gcc/config/avr/t-avr
+@@ -227,19 +227,24 @@ MULTILIB_MATCHES = \
+ mmcu?avr6=mmcu?atmega2561 \
+ mmcu?avrxmega2=mmcu?atxmega16a4 \
+ mmcu?avrxmega2=mmcu?atxmega16d4 \
++ mmcu?avrxmega2=mmcu?atxmega16x1 \
+ mmcu?avrxmega2=mmcu?atxmega32d4 \
+ mmcu?avrxmega2=mmcu?atxmega32a4 \
++ mmcu?avrxmega2=mmcu?atxmega32x1 \
+ mmcu?avrxmega4=mmcu?atxmega64a3 \
+ mmcu?avrxmega4=mmcu?atxmega64d3 \
+ mmcu?avrxmega5=mmcu?atxmega64a1 \
++ mmcu?avrxmega5=mmcu?atxmega64a1u \
+ mmcu?avrxmega6=mmcu?atxmega128a3 \
++ mmcu?avrxmega6=mmcu?atxmega128b1 \
+ mmcu?avrxmega6=mmcu?atxmega128d3 \
+ mmcu?avrxmega6=mmcu?atxmega192a3 \
+ mmcu?avrxmega6=mmcu?atxmega192d3 \
+ mmcu?avrxmega6=mmcu?atxmega256a3 \
+ mmcu?avrxmega6=mmcu?atxmega256a3b \
+ mmcu?avrxmega6=mmcu?atxmega256d3 \
+- mmcu?avrxmega7=mmcu?atxmega128a1
++ mmcu?avrxmega7=mmcu?atxmega128a1 \
++ mmcu?avrxmega7=mmcu?atxmega128a1u
+
+ MULTILIB_EXCEPTIONS =
+
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0007-Add-support-for-devices-with-16-gp-registers.patch b/patches/gcc-4.4.3/atmel/0007-Add-support-for-devices-with-16-gp-registers.patch
new file mode 100644
index 0000000..42e5e49
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0007-Add-support-for-devices-with-16-gp-registers.patch
@@ -0,0 +1,2520 @@
+From 564ff03f509767b2b1e34ad906d634e0b5c1fdcf Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Wed, 20 Apr 2011 22:29:45 +0200
+Subject: [PATCH 07/10] Add support for devices with 16 gp registers
+
+Add support for a bunch of new AVR devices, especially tiny devices
+with 16 gp registers: attiny4, attiny5, attiny9, attiny10, attiny20,
+attiny40
+
+Compiler generates STS and LDS instructions for IO space less than
+0x40 for attiny10, attiny20 and attiny40 families.
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/54-gcc-4.4.3-avrtiny10.patch
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/55-gcc-4.4.3-avrtiny10-bug-12510.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr.c | 1370 +++++++++++++++++++++++++++++++----------
+ gcc/config/avr/avr.h | 55 ++-
+ gcc/config/avr/avr.md | 7 +-
+ gcc/config/avr/libgcc-fixed.S | 203 ++++---
+ gcc/config/avr/libgcc.S | 44 ++-
+ gcc/config/avr/predicates.md | 9 +-
+ gcc/config/avr/t-avr | 12 +-
+ 7 files changed, 1274 insertions(+), 426 deletions(-)
+
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index b818c8b..422fad9 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -119,27 +119,28 @@ const struct base_arch_s *avr_current_arch;
+ section *progmem_section;
+
+ static const struct base_arch_s avr_arch_types[] = {
+- { 1, 0, 0, 0, 0, 0, 0, 0, 0, NULL }, /* Unknown device specified. */
+- { 1, 0, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=1" },
+- { 0, 0, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=2" },
+- { 0, 0, 0, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=25" },
+- { 0, 0, 1, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=3" },
+- { 0, 0, 1, 0, 1, 0, 0, 0, 0, "__AVR_ARCH__=31" },
+- { 0, 0, 1, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=35" },
+- { 0, 1, 0, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=4" },
+- { 0, 1, 1, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=5" },
+- { 0, 1, 1, 1, 1, 1, 0, 0, 0, "__AVR_ARCH__=51" },
+- { 0, 1, 1, 1, 1, 1, 1, 0, 0, "__AVR_ARCH__=6" },
+- { 0, 1, 0, 1, 0, 0, 0, 1, 0, "__AVR_ARCH__=101" },
+- { 0, 1, 1, 1, 0, 0, 0, 1, 0, "__AVR_ARCH__=102" },
+- { 0, 1, 1, 1, 0, 0, 0, 1, 1, "__AVR_ARCH__=103" },
+- { 0, 1, 1, 1, 1, 1, 0, 1, 0, "__AVR_ARCH__=104" },
+- { 0, 1, 1, 1, 1, 1, 0, 1, 1, "__AVR_ARCH__=105" },
+- { 0, 1, 1, 1, 1, 1, 1, 1, 0, "__AVR_ARCH__=106" },
+- { 0, 1, 1, 1, 1, 1, 1, 1, 1, "__AVR_ARCH__=107" }
++ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL }, /* Unknown device specified. */
++ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=1" },
++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=2" },
++ { 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=25" },
++ { 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=3" },
++ { 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, "__AVR_ARCH__=31" },
++ { 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=35" },
++ { 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=4" },
++ { 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, "__AVR_ARCH__=5" },
++ { 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, "__AVR_ARCH__=51" },
++ { 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, "__AVR_ARCH__=6" },
++ { 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, "__AVR_ARCH__=101" },
++ { 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, "__AVR_ARCH__=102" },
++ { 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, "__AVR_ARCH__=103" },
++ { 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, "__AVR_ARCH__=104" },
++ { 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, "__AVR_ARCH__=105" },
++ { 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, "__AVR_ARCH__=106" },
++ { 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, "__AVR_ARCH__=107" },
++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, "__AVR_ARCH__=201" }
+ };
+
+-/* These names are used as the index into the avr_arch_types[] table
++/* These names are used as the index into the avr_arch_types[] table
+ above. */
+
+ enum avr_arch
+@@ -161,7 +162,8 @@ enum avr_arch
+ ARCH_AVRXMEGA4,
+ ARCH_AVRXMEGA5,
+ ARCH_AVRXMEGA6,
+- ARCH_AVRXMEGA7
++ ARCH_AVRXMEGA7,
++ ARCH_AVRTINY10
+ };
+
+ struct mcu_type_s {
+@@ -394,6 +396,14 @@ static const struct mcu_type_s avr_mcu_types[] = {
+ { "avrxmega7", ARCH_AVRXMEGA7, NULL },
+ { "atxmega128a1", ARCH_AVRXMEGA7, "__AVR_ATxmega128A1__" },
+ { "atxmega128a1u",ARCH_AVRXMEGA7, "__AVR_ATxmega128A1U__" },
++ /* tiny10 family */
++ { "avrtiny10", ARCH_AVRTINY10, NULL },
++ { "attiny4", ARCH_AVRTINY10, "__AVR_ATtiny4__" },
++ { "attiny5", ARCH_AVRTINY10, "__AVR_ATtiny5__" },
++ { "attiny9", ARCH_AVRTINY10, "__AVR_ATtiny9__" },
++ { "attiny10", ARCH_AVRTINY10, "__AVR_ATtiny10__" },
++ { "attiny20", ARCH_AVRTINY10, "__AVR_ATtiny20__" },
++ { "attiny40", ARCH_AVRTINY10, "__AVR_ATtiny40__" },
+ /* Assembler only. */
+ { "avr1", ARCH_AVR1, NULL },
+ { "at90s1200", ARCH_AVR1, "__AVR_AT90S1200__" },
+@@ -510,10 +520,8 @@ avr_override_options (void)
+ inform (input_location, "use the -fno-jump-tables switch instead");
+ warned_no_tablejump_deprecated = true;
+ }
+-
+- tmp_reg_rtx = gen_rtx_REG (QImode, TMP_REGNO);
+- zero_reg_rtx = gen_rtx_REG (QImode, ZERO_REGNO);
+-
++ tmp_reg_rtx = gen_rtx_REG (QImode, AVR_TINY ? TMP_REGNO_AVRTINY10 : TMP_REGNO);
++ zero_reg_rtx = gen_rtx_REG (QImode, AVR_TINY ? ZERO_REGNO_AVRTINY10 : ZERO_REGNO);
+ init_machine_status = avr_init_machine_status;
+ }
+
+@@ -1863,7 +1871,7 @@ avr_simplify_comparison_p (enum machine_mode mode, RTX_CODE op, rtx x)
+ int
+ function_arg_regno_p(int r)
+ {
+- return (r >= 8 && r <= 25);
++ return (AVR_TINY ? r >= 20 && r <= 25 : r >= 8 && r <= 25);
+ }
+
+ /* Initializing the variable cum for the state at the beginning
+@@ -1873,7 +1881,11 @@ void
+ init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype, rtx libname,
+ tree fndecl ATTRIBUTE_UNUSED)
+ {
++ if (AVR_TINY)
++ cum->nregs = 6;
++ else
+ cum->nregs = 18;
++
+ cum->regno = FIRST_CUM_REG;
+ if (!libname && fntype)
+ {
+@@ -1897,9 +1909,8 @@ avr_num_arg_regs (enum machine_mode mode, tree type)
+ else
+ size = GET_MODE_SIZE (mode);
+
+- /* Align all function arguments to start in even-numbered registers.
++ /* if not AVR_TINY, Align all function arguments to start in even-numbered registers.
+ Odd-sized arguments leave holes above them. */
+-
+ return (size + 1) & ~1;
+ }
+
+@@ -2231,9 +2242,19 @@ out_movqi_r_mr (rtx insn, rtx op[], int *l)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+- return *l = 3, (AS2 (adiw,r28,%o1-63) CR_TAB
+- AS2 (ldd,%0,Y+63) CR_TAB
+- AS2 (sbiw,r28,%o1-63));
++ return *l = 3, AVR_TINY ? (AS2 (subi,r28,lo8(-(%o1-63))) CR_TAB
++ AS2 (sbci,r29,hi8(-(%o1-63))) CR_TAB
++ AS2 (subi,r28,lo8(-63)) CR_TAB
++ AS2 (sbci,r29,hi8(-63)) CR_TAB
++ AS2 (ld,%0,Y) CR_TAB
++ AS2 (subi,r28,lo8(63)) CR_TAB
++ AS2 (sbci,r29,hi8(63)) CR_TAB
++ AS2 (subi,r28,lo8(%o1-63)) CR_TAB
++ AS2 (sbci,r29,hi8(%o1-63)))
++ : (AS2 (adiw,r28,%o1-63) CR_TAB
++ AS2 (ldd,%0,Y+63) CR_TAB
++ AS2 (sbiw,r28,%o1-63));
++
+
+ return *l = 5, (AS2 (subi,r28,lo8(-%o1)) CR_TAB
+ AS2 (sbci,r29,hi8(-%o1)) CR_TAB
+@@ -2247,15 +2268,38 @@ out_movqi_r_mr (rtx insn, rtx op[], int *l)
+ it but I have this situation with extremal optimizing options. */
+ if (reg_overlap_mentioned_p (dest, XEXP (x,0))
+ || reg_unused_after (insn, XEXP (x,0)))
+- return *l = 2, (AS2 (adiw,r26,%o1) CR_TAB
+- AS2 (ld,%0,X));
+-
+- return *l = 3, (AS2 (adiw,r26,%o1) CR_TAB
+- AS2 (ld,%0,X) CR_TAB
+- AS2 (sbiw,r26,%o1));
++ return *l = 2, AVR_TINY ? (AS2 (subi,r26,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%0,X))
++ : (AS2 (adiw,r26,%o1) CR_TAB
++ AS2 (ld,%0,X));
++
++ return *l = 3, AVR_TINY ? (AS2 (subi,r26,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%0,X) CR_TAB
++ AS2 (subi,r26,lo8(%o1)) CR_TAB
++ AS2 (sbci,r27,hi8(%o1)))
++ : (AS2 (adiw,r26,%o1) CR_TAB
++ AS2 (ld,%0,X) CR_TAB
++ AS2 (sbiw,r26,%o1));
+ }
++
+ *l = 1;
+- return AS2 (ldd,%0,%1);
++ op[2] = XEXP(x, 0);
++ if(REGNO(op[2]) == REG_Y)
++ return AVR_TINY ? ( AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%0,Y) CR_TAB
++ AS2 (subi,%A2,lo8(%o1)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o1)))
++ : AS2 (ldd,%0,%1);
++ if(REGNO(op[2]) == REG_Z)
++ return AVR_TINY ? ( AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%0,Z) CR_TAB
++ AS2 (subi,%A2,lo8(%o1)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o1)))
++ : AS2 (ldd,%0,%1);
+ }
+ *l = 1;
+ return AS2 (ld,%0,%1);
+@@ -2295,15 +2339,35 @@ out_movhi_r_mr (rtx insn, rtx op[], int *l)
+ AS2 (ld,%B0,X));
+ }
+ *l = 3;
+- return (AS2 (ld,%A0,X+) CR_TAB
+- AS2 (ld,%B0,X) CR_TAB
+- AS2 (sbiw,r26,1));
++ return AVR_TINY ? (AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X) CR_TAB
++ AS2 (subi,r26,lo8(1)) CR_TAB
++ AS2 (sbci,r27,hi8(1)))
++ : (AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X) CR_TAB
++ AS2 (sbiw,r26,1));
+ }
+ else /* (R) */
+ {
+ *l = 2;
+- return (AS2 (ld,%A0,%1) CR_TAB
+- AS2 (ldd,%B0,%1+1));
++ if(reg_base == REG_Y)
++ return AVR_TINY ? (AS2 (ld,%A0,%1) CR_TAB
++ AS2 (subi,r28,lo8((-1))) CR_TAB
++ AS2 (sbci,r29,hi8((-1))) CR_TAB
++ AS2 (ld,%B0,%1) CR_TAB
++ AS2 (subi,r28,lo8(1)) CR_TAB
++ AS2 (sbci,r29,hi8(1)))
++ : (AS2 (ld,%A0,%1) CR_TAB
++ AS2 (ldd,%B0,%1+1));
++ if(reg_base == REG_Z)
++ return AVR_TINY ? (AS2 (ld,%A0,%1) CR_TAB
++ AS2 (subi,r30,lo8((-1))) CR_TAB
++ AS2 (sbci,r31,hi8((-1))) CR_TAB
++ AS2 (ld,%B0,%1) CR_TAB
++ AS2 (subi,r30,lo8(1)) CR_TAB
++ AS2 (sbci,r31,hi8(1)))
++ : (AS2 (ld,%A0,%1) CR_TAB
++ AS2 (ldd,%B0,%1+1));
+ }
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+@@ -2317,17 +2381,35 @@ out_movhi_r_mr (rtx insn, rtx op[], int *l)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+- return *l = 4, (AS2 (adiw,r28,%o1-62) CR_TAB
+- AS2 (ldd,%A0,Y+62) CR_TAB
+- AS2 (ldd,%B0,Y+63) CR_TAB
+- AS2 (sbiw,r28,%o1-62));
+-
+- return *l = 6, (AS2 (subi,r28,lo8(-%o1)) CR_TAB
+- AS2 (sbci,r29,hi8(-%o1)) CR_TAB
+- AS2 (ld,%A0,Y) CR_TAB
+- AS2 (ldd,%B0,Y+1) CR_TAB
+- AS2 (subi,r28,lo8(%o1)) CR_TAB
+- AS2 (sbci,r29,hi8(%o1)));
++ return *l = 4, AVR_TINY ? (AS2 (subi,r28,lo8(-(%o1-62))) CR_TAB
++ AS2 (sbci,r29,hi8(-(%o1-62))) CR_TAB
++ AS2 (subi,r28,lo8(-62)) CR_TAB
++ AS2 (sbci,r29,hi8(-62)) CR_TAB
++ AS2 (ld,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y) CR_TAB
++ AS2 (subi,r28,lo8(63)) CR_TAB
++ AS2 (sbci,r29,hi8(63)) CR_TAB
++ AS2 (subi,r28,lo8(%o1-62)) CR_TAB
++ AS2 (sbci,r29,hi8(%o1-62)))
++ : (AS2 (adiw,r28,%o1-62) CR_TAB
++ AS2 (ldd,%A0,Y+62) CR_TAB
++ AS2 (ldd,%B0,Y+63) CR_TAB
++ AS2 (sbiw,r28,%o1-62));
++
++ return *l = 6, AVR_TINY ? (AS2 (subi,r28,lo8(-%o1)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o1)) CR_TAB
++ AS2 (ld,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y) CR_TAB
++ AS2 (subi,r28,lo8(1)) CR_TAB
++ AS2 (sbci,r29,hi8(1)) CR_TAB
++ AS2 (subi,r28,lo8(%o1)) CR_TAB
++ AS2 (sbci,r29,hi8(%o1)))
++ : (AS2 (subi,r28,lo8(-%o1)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o1)) CR_TAB
++ AS2 (ld,%A0,Y) CR_TAB
++ AS2 (ldd,%B0,Y+1) CR_TAB
++ AS2 (subi,r28,lo8(%o1)) CR_TAB
++ AS2 (sbci,r29,hi8(%o1)));
+ }
+ if (reg_base == REG_X)
+ {
+@@ -2337,28 +2419,79 @@ out_movhi_r_mr (rtx insn, rtx op[], int *l)
+
+ *l = 4;
+ if (reg_base == reg_dest)
+- return (AS2 (adiw,r26,%o1) CR_TAB
+- AS2 (ld,__tmp_reg__,X+) CR_TAB
+- AS2 (ld,%B0,X) CR_TAB
+- AS2 (mov,%A0,__tmp_reg__));
+-
+- return (AS2 (adiw,r26,%o1) CR_TAB
+- AS2 (ld,%A0,X+) CR_TAB
+- AS2 (ld,%B0,X) CR_TAB
+- AS2 (sbiw,r26,%o1+1));
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-%o1)) CR_TAB
++ AS2 (sbci,r27,hi8(-%o1)) CR_TAB
++ AS2 (ld,__tmp_reg__,X+) CR_TAB
++ AS2 (ld,%B0,X) CR_TAB
++ AS2 (mov,%A0,__tmp_reg__))
++ : (AS2 (adiw,r26,%o1) CR_TAB
++ AS2 (ld,__tmp_reg__,X+) CR_TAB
++ AS2 (ld,%B0,X) CR_TAB
++ AS2 (mov,%A0,__tmp_reg__));
++
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-%o1)) CR_TAB
++ AS2 (sbci,r27,hi8(-%o1)) CR_TAB
++ AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X) CR_TAB
++ AS2 (subi,r26,lo8(%o1+1)) CR_TAB
++ AS2 (sbci,r27,hi8(%o1+1)))
++ : (AS2 (adiw,r26,%o1) CR_TAB
++ AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X) CR_TAB
++ AS2 (sbiw,r26,%o1+1));
+ }
+
+ if (reg_base == reg_dest)
+ {
+ *l = 3;
+- return (AS2 (ldd,__tmp_reg__,%A1) CR_TAB
+- AS2 (ldd,%B0,%B1) CR_TAB
+- AS2 (mov,%A0,__tmp_reg__));
++ op[2] = XEXP(base, 0);
++
++ if(REGNO(op[2]) == REG_Y)
++ return AVR_TINY ? ( AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,__tmp_reg__,Y+) CR_TAB
++ AS2 (ld,%B0,Y) CR_TAB
++ AS2 (subi,%A2,lo8(%o1+1)) CR_TAB
++ AS2 (subi,%B2,hi8(%o1+1)) CR_TAB
++ AS2 (mov,%A0,__tmp_reg__))
++ : (AS2 (ldd,__tmp_reg__,%A1) CR_TAB
++ AS2 (ldd,%B0,%B1) CR_TAB
++ AS2 (mov,%A0,__tmp_reg__));
++ if(REGNO(op[2]) == REG_Z)
++ return AVR_TINY ? ( AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,__tmp_reg__,Z+) CR_TAB
++ AS2 (ld,%B0,Z) CR_TAB
++ AS2 (subi,%A2,lo8(%o1+1)) CR_TAB
++ AS2 (subi,%B2,hi8(%o1+1)) CR_TAB
++ AS2 (mov,%A0,__tmp_reg__))
++ : (AS2 (ldd,__tmp_reg__,%A1) CR_TAB
++ AS2 (ldd,%B0,%B1) CR_TAB
++ AS2 (mov,%A0,__tmp_reg__));
+ }
+-
+ *l = 2;
+- return (AS2 (ldd,%A0,%A1) CR_TAB
+- AS2 (ldd,%B0,%B1));
++
++ op[2] = XEXP(base, 0);
++
++ if(REGNO(op[2]) == REG_Y)
++ return AVR_TINY ? ( AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y) CR_TAB
++ AS2 (subi,%A2,lo8(%o1+1)) CR_TAB
++ AS2 (subi,%B2,hi8(%o1+1)))
++ : (AS2 (ldd,%A0,%A1) CR_TAB
++ AS2 (ldd,%B0,%B1));
++ if(REGNO(op[2]) == REG_Z)
++ return AVR_TINY ? ( AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%A0,Z+) CR_TAB
++ AS2 (ld,%B0,Z) CR_TAB
++ AS2 (subi,%A2,lo8(%o1+1)) CR_TAB
++ AS2 (subi,%B2,hi8(%o1+1)))
++ : (AS2 (ldd,%A0,%A1) CR_TAB
++ AS2 (ldd,%B0,%B1));
++
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ {
+@@ -2370,17 +2503,32 @@ out_movhi_r_mr (rtx insn, rtx op[], int *l)
+ if (REGNO (XEXP (base, 0)) == REG_X)
+ {
+ *l = 4;
+- return (AS2 (sbiw,r26,2) CR_TAB
+- AS2 (ld,%A0,X+) CR_TAB
+- AS2 (ld,%B0,X) CR_TAB
+- AS2 (sbiw,r26,1));
++ return AVR_TINY ? (AS2 (subi,r26,lo8(2)) CR_TAB
++ AS2 (sbci,r27,hi8(2)) CR_TAB
++ AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X) CR_TAB
++ AS2 (subi,r26,lo8(1)) CR_TAB
++ AS2 (sbci,r27,hi8(1)))
++ : (AS2 (sbiw,r26,2) CR_TAB
++ AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X) CR_TAB
++ AS2 (sbiw,r26,1));
+ }
+ else
+ {
+ *l = 3;
+- return (AS2 (sbiw,%r1,2) CR_TAB
+- AS2 (ld,%A0,%p1) CR_TAB
+- AS2 (ldd,%B0,%p1+1));
++ //FIXME:check the code once again for AVR_TINY
++ return AVR_TINY ? (AS2 (subi,%A1,lo8(3)) CR_TAB
++ AS2 (sbci,%B1,hi8(3)) CR_TAB
++ AS2 (ld,%A0,%p1) CR_TAB
++ AS2 (subi,%A1,lo8(-1)) CR_TAB
++ AS2 (sbci,%B1,hi8(-1)) CR_TAB
++ AS2 (ld,%B0,%p1) CR_TAB
++ AS2 (subi,%A1,lo8(1)) CR_TAB
++ AS2 (sbci,%B1,hi8(1)))
++ : (AS2 (sbiw,%r1,2) CR_TAB
++ AS2 (ld,%A0,%p1) CR_TAB
++ AS2 (ldd,%B0,%p1+1));
+ }
+ }
+
+@@ -2434,13 +2582,23 @@ out_movsi_r_mr (rtx insn, rtx op[], int *l)
+ {
+ if (reg_dest == REG_X)
+ /* "ld r26,-X" is undefined */
+- return *l=7, (AS2 (adiw,r26,3) CR_TAB
+- AS2 (ld,r29,X) CR_TAB
+- AS2 (ld,r28,-X) CR_TAB
+- AS2 (ld,__tmp_reg__,-X) CR_TAB
+- AS2 (sbiw,r26,1) CR_TAB
+- AS2 (ld,r26,X) CR_TAB
+- AS2 (mov,r27,__tmp_reg__));
++ return *l=7, AVR_TINY ? (AS2 (subi,r26,lo8(-3)) CR_TAB
++ AS2 (sbci,r27,hi8(-3)) CR_TAB
++ AS2 (ld,r29,X) CR_TAB
++ AS2 (ld,r28,-X) CR_TAB
++ AS2 (ld,__tmp_reg__,-X) CR_TAB
++ AS2 (subi,r26,lo8(1)) CR_TAB
++ AS2 (sbci,r27,hi8(1)) CR_TAB
++ AS2 (ld,r26,X) CR_TAB
++ AS2 (mov,r27,__tmp_reg__))
++ : (AS2 (adiw,r26,3) CR_TAB
++ AS2 (ld,r29,X) CR_TAB
++ AS2 (ld,r28,-X) CR_TAB
++ AS2 (ld,__tmp_reg__,-X) CR_TAB
++ AS2 (sbiw,r26,1) CR_TAB
++ AS2 (ld,r26,X) CR_TAB
++ AS2 (mov,r27,__tmp_reg__));
++
+ else if (reg_dest == REG_X - 2)
+ return *l=5, (AS2 (ld,%A0,X+) CR_TAB
+ AS2 (ld,%B0,X+) CR_TAB
+@@ -2453,33 +2611,114 @@ out_movsi_r_mr (rtx insn, rtx op[], int *l)
+ AS2 (ld,%C0,X+) CR_TAB
+ AS2 (ld,%D0,X));
+ else
+- return *l=5, (AS2 (ld,%A0,X+) CR_TAB
+- AS2 (ld,%B0,X+) CR_TAB
+- AS2 (ld,%C0,X+) CR_TAB
+- AS2 (ld,%D0,X) CR_TAB
+- AS2 (sbiw,r26,3));
++ return *l=5, AVR_TINY ? (AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X+) CR_TAB
++ AS2 (ld,%C0,X+) CR_TAB
++ AS2 (ld,%D0,X) CR_TAB
++ AS2 (subi,r26,lo8(3)) CR_TAB
++ AS2 (sbci,r27,hi8(3)))
++ : (AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X+) CR_TAB
++ AS2 (ld,%C0,X+) CR_TAB
++ AS2 (ld,%D0,X) CR_TAB
++ AS2 (sbiw,r26,3));
+ }
+ else
+ {
+ if (reg_dest == reg_base)
+- return *l=5, (AS2 (ldd,%D0,%1+3) CR_TAB
+- AS2 (ldd,%C0,%1+2) CR_TAB
+- AS2 (ldd,__tmp_reg__,%1+1) CR_TAB
+- AS2 (ld,%A0,%1) CR_TAB
+- AS2 (mov,%B0,__tmp_reg__));
++ {
++ if(reg_base == REG_Y)
++ return *l=5, AVR_TINY ? (AS2 (subi,r28,lo8(-3)) CR_TAB
++ AS2 (sbci,r29,hi8(-3)) CR_TAB
++ AS2 (ld,%D0,Y) CR_TAB
++ AS2 (ld,%C0,-Y) CR_TAB
++ AS2 (subi,r28,lo8(1)) CR_TAB
++ AS2 (sbci,r29,hi8(1)) CR_TAB
++ AS2 (ld,__tmp_reg__,%1) CR_TAB
++ AS2 (subi,r28,lo8(1)) CR_TAB
++ AS2 (sbci,r29,hi8(1)) CR_TAB
++ AS2 (ld,%A0,%1) CR_TAB
++ AS2 (mov,%B0,__tmp_reg__))
++ : (AS2 (ldd,%D0,%1+3) CR_TAB
++ AS2 (ldd,%C0,%1+2) CR_TAB
++ AS2 (ldd,__tmp_reg__,%1+1) CR_TAB
++ AS2 (ld,%A0,%1) CR_TAB
++ AS2 (mov,%B0,__tmp_reg__));
++ if(reg_base == REG_Z)
++ return *l=5, AVR_TINY ? (AS2 (subi,r30,lo8(-3)) CR_TAB
++ AS2 (sbci,r31,hi8(-3)) CR_TAB
++ AS2 (ld,%D0,Z) CR_TAB
++ AS2 (ld,%C0,-Z) CR_TAB
++ AS2 (subi,r30,lo8(1)) CR_TAB
++ AS2 (sbci,r31,hi8(1)) CR_TAB
++ AS2 (ld,__tmp_reg__,%1) CR_TAB
++ AS2 (subi,r30,lo8(1)) CR_TAB
++ AS2 (sbci,r31,hi8(1)) CR_TAB
++ AS2 (ld,%A0,%1) CR_TAB
++ AS2 (mov,%B0,__tmp_reg__))
++ : (AS2 (ldd,%D0,%1+3) CR_TAB
++ AS2 (ldd,%C0,%1+2) CR_TAB
++ AS2 (ldd,__tmp_reg__,%1+1) CR_TAB
++ AS2 (ld,%A0,%1) CR_TAB
++ AS2 (mov,%B0,__tmp_reg__));
++ }
++
+ else if (reg_base == reg_dest + 2)
+- return *l=5, (AS2 (ld ,%A0,%1) CR_TAB
+- AS2 (ldd,%B0,%1+1) CR_TAB
+- AS2 (ldd,__tmp_reg__,%1+2) CR_TAB
+- AS2 (ldd,%D0,%1+3) CR_TAB
+- AS2 (mov,%C0,__tmp_reg__));
++ {
++ if(reg_base == REG_Y)
++ return *l=5, AVR_TINY ? (AS2 (ld ,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y+) CR_TAB
++ AS2 (ld,__tmp_reg__,Y+) CR_TAB
++ AS2 (ld,%D0,Y) CR_TAB
++ AS2 (subi,r28,lo8(3)) CR_TAB
++ AS2 (sbci,r29,hi8(3)) CR_TAB
++ AS2 (mov,%C0,__tmp_reg__))
++ : (AS2 (ld ,%A0,%1) CR_TAB
++ AS2 (ldd,%B0,%1+1) CR_TAB
++ AS2 (ldd,__tmp_reg__,%1+2) CR_TAB
++ AS2 (ldd,%D0,%1+3) CR_TAB
++ AS2 (mov,%C0,__tmp_reg__));
++ if(reg_base == REG_Z)
++ return *l=5, AVR_TINY ? (AS2 (ld ,%A0,Z+) CR_TAB
++ AS2 (ld,%B0,Z+) CR_TAB
++ AS2 (ld,__tmp_reg__,Z+) CR_TAB
++ AS2 (ld,%D0,Z) CR_TAB
++ AS2 (subi,r30,lo8(3)) CR_TAB
++ AS2 (sbci,r31,hi8(3)) CR_TAB
++ AS2 (mov,%C0,__tmp_reg__))
++ : (AS2 (ld ,%A0,%1) CR_TAB
++ AS2 (ldd,%B0,%1+1) CR_TAB
++ AS2 (ldd,__tmp_reg__,%1+2) CR_TAB
++ AS2 (ldd,%D0,%1+3) CR_TAB
++ AS2 (mov,%C0,__tmp_reg__));
++ }
+ else
+- return *l=4, (AS2 (ld ,%A0,%1) CR_TAB
+- AS2 (ldd,%B0,%1+1) CR_TAB
+- AS2 (ldd,%C0,%1+2) CR_TAB
+- AS2 (ldd,%D0,%1+3));
++ {
++ if(reg_base == REG_Y)
++ return *l=4, AVR_TINY ? (AS2 (ld ,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y+) CR_TAB
++ AS2 (ld,%C0,Y+) CR_TAB
++ AS2 (ld,%D0,Y) CR_TAB
++ AS2 (subi,r28,lo8(3)) CR_TAB
++ AS2 (sbci,r29,hi8(3)))
++ : (AS2 (ld ,%A0,%1) CR_TAB
++ AS2 (ldd,%B0,%1+1) CR_TAB
++ AS2 (ldd,%C0,%1+2) CR_TAB
++ AS2 (ldd,%D0,%1+3));
++ if(reg_base == REG_Z)
++ return *l=4, AVR_TINY ? (AS2 (ld ,%A0,Z+) CR_TAB
++ AS2 (ld,%B0,Z+) CR_TAB
++ AS2 (ld,%C0,Z+) CR_TAB
++ AS2 (ld,%D0,Z) CR_TAB
++ AS2 (subi,r30,lo8(3)) CR_TAB
++ AS2 (sbci,r31,hi8(3)))
++ : (AS2 (ld ,%A0,%1) CR_TAB
++ AS2 (ldd,%B0,%1+1) CR_TAB
++ AS2 (ldd,%C0,%1+2) CR_TAB
++ AS2 (ldd,%D0,%1+3));
+ }
+ }
++ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+@@ -2490,21 +2729,43 @@ out_movsi_r_mr (rtx insn, rtx op[], int *l)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+- return *l = 6, (AS2 (adiw,r28,%o1-60) CR_TAB
+- AS2 (ldd,%A0,Y+60) CR_TAB
+- AS2 (ldd,%B0,Y+61) CR_TAB
+- AS2 (ldd,%C0,Y+62) CR_TAB
+- AS2 (ldd,%D0,Y+63) CR_TAB
+- AS2 (sbiw,r28,%o1-60));
+-
+- return *l = 8, (AS2 (subi,r28,lo8(-%o1)) CR_TAB
+- AS2 (sbci,r29,hi8(-%o1)) CR_TAB
+- AS2 (ld,%A0,Y) CR_TAB
+- AS2 (ldd,%B0,Y+1) CR_TAB
+- AS2 (ldd,%C0,Y+2) CR_TAB
+- AS2 (ldd,%D0,Y+3) CR_TAB
+- AS2 (subi,r28,lo8(%o1)) CR_TAB
+- AS2 (sbci,r29,hi8(%o1)));
++ return *l = 6, AVR_TINY ? (AS2 (subi,r28,lo8(-(%o1-60))) CR_TAB
++ AS2 (sbci,r29,hi8(-(%o1-60))) CR_TAB
++ AS2 (subi,r28,lo8(-60)) CR_TAB
++ AS2 (sbci,r29,hi8(-60)) CR_TAB
++ AS2 (ld,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y+) CR_TAB
++ AS2 (ld,%C0,Y+) CR_TAB
++ AS2 (ld,%D0,Y) CR_TAB
++ AS2 (subi,r28,lo8(63)) CR_TAB
++ AS2 (sbci,r29,hi8(63)) CR_TAB
++ AS2 (subi,r28,lo8(%o1-60)) CR_TAB
++ AS2 (sbci,r29,hi8(%o1-60)))
++ : (AS2 (adiw,r28,%o1-60) CR_TAB
++ AS2 (ldd,%A0,Y+60) CR_TAB
++ AS2 (ldd,%B0,Y+61) CR_TAB
++ AS2 (ldd,%C0,Y+62) CR_TAB
++ AS2 (ldd,%D0,Y+63) CR_TAB
++ AS2 (sbiw,r28,%o1-60));
++
++ return *l = 8, AVR_TINY ? (AS2 (subi,r28,lo8(-%o1)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o1)) CR_TAB
++ AS2 (ld,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y+) CR_TAB
++ AS2 (ld,%C0,Y+) CR_TAB
++ AS2 (ld,%D0,Y) CR_TAB
++ AS2 (subi,r28,lo8(3)) CR_TAB
++ AS2 (sbci,r29,hi8(3)) CR_TAB
++ AS2 (subi,r28,lo8(%o1)) CR_TAB
++ AS2 (sbci,r29,hi8(%o1)))
++ : (AS2 (subi,r28,lo8(-%o1)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o1)) CR_TAB
++ AS2 (ld,%A0,Y) CR_TAB
++ AS2 (ldd,%B0,Y+1) CR_TAB
++ AS2 (ldd,%C0,Y+2) CR_TAB
++ AS2 (ldd,%D0,Y+3) CR_TAB
++ AS2 (subi,r28,lo8(%o1)) CR_TAB
++ AS2 (sbci,r29,hi8(%o1)));
+ }
+
+ reg_base = true_regnum (XEXP (base, 0));
+@@ -2515,46 +2776,151 @@ out_movsi_r_mr (rtx insn, rtx op[], int *l)
+ {
+ *l = 7;
+ /* "ld r26,-X" is undefined */
+- return (AS2 (adiw,r26,%o1+3) CR_TAB
+- AS2 (ld,r29,X) CR_TAB
+- AS2 (ld,r28,-X) CR_TAB
+- AS2 (ld,__tmp_reg__,-X) CR_TAB
+- AS2 (sbiw,r26,1) CR_TAB
+- AS2 (ld,r26,X) CR_TAB
+- AS2 (mov,r27,__tmp_reg__));
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-(%o1+3))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o1+3))) CR_TAB
++ AS2 (ld,r29,X) CR_TAB
++ AS2 (ld,r28,-X) CR_TAB
++ AS2 (ld,__tmp_reg__,-X) CR_TAB
++ AS2 (subi,r26,lo8(1)) CR_TAB
++ AS2 (sbci,r27,hi8(1)) CR_TAB
++ AS2 (ld,r26,X) CR_TAB
++ AS2 (mov,r27,__tmp_reg__))
++ : (AS2 (adiw,r26,%o1+3) CR_TAB
++ AS2 (ld,r29,X) CR_TAB
++ AS2 (ld,r28,-X) CR_TAB
++ AS2 (ld,__tmp_reg__,-X) CR_TAB
++ AS2 (sbiw,r26,1) CR_TAB
++ AS2 (ld,r26,X) CR_TAB
++ AS2 (mov,r27,__tmp_reg__));
+ }
+ *l = 6;
+ if (reg_dest == REG_X - 2)
+- return (AS2 (adiw,r26,%o1) CR_TAB
+- AS2 (ld,r24,X+) CR_TAB
+- AS2 (ld,r25,X+) CR_TAB
+- AS2 (ld,__tmp_reg__,X+) CR_TAB
+- AS2 (ld,r27,X) CR_TAB
+- AS2 (mov,r26,__tmp_reg__));
+-
+- return (AS2 (adiw,r26,%o1) CR_TAB
+- AS2 (ld,%A0,X+) CR_TAB
+- AS2 (ld,%B0,X+) CR_TAB
+- AS2 (ld,%C0,X+) CR_TAB
+- AS2 (ld,%D0,X) CR_TAB
+- AS2 (sbiw,r26,%o1+3));
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o1))) CR_TAB
++ AS2 (ld,r24,X+) CR_TAB
++ AS2 (ld,r25,X+) CR_TAB
++ AS2 (ld,__tmp_reg__,X+) CR_TAB
++ AS2 (ld,r27,X) CR_TAB
++ AS2 (mov,r26,__tmp_reg__))
++ : (AS2 (adiw,r26,%o1) CR_TAB
++ AS2 (ld,r24,X+) CR_TAB
++ AS2 (ld,r25,X+) CR_TAB
++ AS2 (ld,__tmp_reg__,X+) CR_TAB
++ AS2 (ld,r27,X) CR_TAB
++ AS2 (mov,r26,__tmp_reg__));
++
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-(%o1))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X+) CR_TAB
++ AS2 (ld,%C0,X+) CR_TAB
++ AS2 (ld,%D0,X) CR_TAB
++ AS2 (subi,r26,lo8(%o1+3)) CR_TAB
++ AS2 (sbci,r27,hi8(%o1+3)))
++ : (AS2 (adiw,r26,%o1) CR_TAB
++ AS2 (ld,%A0,X+) CR_TAB
++ AS2 (ld,%B0,X+) CR_TAB
++ AS2 (ld,%C0,X+) CR_TAB
++ AS2 (ld,%D0,X) CR_TAB
++ AS2 (sbiw,r26,%o1+3));
+ }
+ if (reg_dest == reg_base)
+- return *l=5, (AS2 (ldd,%D0,%D1) CR_TAB
+- AS2 (ldd,%C0,%C1) CR_TAB
+- AS2 (ldd,__tmp_reg__,%B1) CR_TAB
+- AS2 (ldd,%A0,%A1) CR_TAB
+- AS2 (mov,%B0,__tmp_reg__));
++ {
++ op[2] = XEXP(base, 0);
++
++ if(REGNO(op[2]) == REG_Y)
++ return *l=5, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o1+4))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o1+4))) CR_TAB
++ AS2 (ld,%D0,-Y) CR_TAB
++ AS2 (ld,%C0,-Y) CR_TAB
++ AS2 (ld,__tmp_reg__,-Y) CR_TAB
++ AS2 (ld,%A0,-Y) CR_TAB
++ AS2 (subi,%A2,lo8(%o1)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o1)) CR_TAB
++ AS2 (mov,%B0,__tmp_reg__))
++ : (AS2 (ldd,%D0,%D1) CR_TAB
++ AS2 (ldd,%C0,%C1) CR_TAB
++ AS2 (ldd,__tmp_reg__,%B1) CR_TAB
++ AS2 (ldd,%A0,%A1) CR_TAB
++ AS2 (mov,%B0,__tmp_reg__));
++ if(REGNO(op[2]) == REG_Z)
++ return *l=5, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o1+4))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o1+4))) CR_TAB
++ AS2 (ld,%D0,-Z) CR_TAB
++ AS2 (ld,%C0,-Z) CR_TAB
++ AS2 (ld,__tmp_reg__,-Z) CR_TAB
++ AS2 (ld,%A0,-Z) CR_TAB
++ AS2 (subi,%A2,lo8(%o1)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o1)) CR_TAB
++ AS2 (mov,%B0,__tmp_reg__))
++ : (AS2 (ldd,%D0,%D1) CR_TAB
++ AS2 (ldd,%C0,%C1) CR_TAB
++ AS2 (ldd,__tmp_reg__,%B1) CR_TAB
++ AS2 (ldd,%A0,%A1) CR_TAB
++ AS2 (mov,%B0,__tmp_reg__));
++ }
+ else if (reg_dest == reg_base - 2)
+- return *l=5, (AS2 (ldd,%A0,%A1) CR_TAB
+- AS2 (ldd,%B0,%B1) CR_TAB
+- AS2 (ldd,__tmp_reg__,%C1) CR_TAB
+- AS2 (ldd,%D0,%D1) CR_TAB
+- AS2 (mov,%C0,__tmp_reg__));
+- return *l=4, (AS2 (ldd,%A0,%A1) CR_TAB
+- AS2 (ldd,%B0,%B1) CR_TAB
+- AS2 (ldd,%C0,%C1) CR_TAB
+- AS2 (ldd,%D0,%D1));
++ {
++ op[2] = XEXP(base, 0);
++
++ if(REGNO(op[2]) == REG_Y)
++ return *l=5, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (subi,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y+) CR_TAB
++ AS2 (ld,__tmp_reg__,Y+) CR_TAB
++ AS2 (ld,%D0,Y) CR_TAB
++ AS2 (subi,%A2,lo8(%o1+3)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o1+3)) CR_TAB
++ AS2 (mov,%C0,__tmp_reg__))
++ : (AS2 (ldd,%A0,%A1) CR_TAB
++ AS2 (ldd,%B0,%B1) CR_TAB
++ AS2 (ldd,__tmp_reg__,%C1) CR_TAB
++ AS2 (ldd,%D0,%D1) CR_TAB
++ AS2 (mov,%C0,__tmp_reg__));
++ if(REGNO(op[2]) == REG_Z)
++ return *l=5, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (subi,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%A0,Z+) CR_TAB
++ AS2 (ld,%B0,Z+) CR_TAB
++ AS2 (ld,__tmp_reg__,Z+) CR_TAB
++ AS2 (ld,%D0,Z) CR_TAB
++ AS2 (subi,%A2,lo8(%o1+3)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o1+3)) CR_TAB
++ AS2 (mov,%C0,__tmp_reg__))
++ : (AS2 (ldd,%A0,%A1) CR_TAB
++ AS2 (ldd,%B0,%B1) CR_TAB
++ AS2 (ldd,__tmp_reg__,%C1) CR_TAB
++ AS2 (ldd,%D0,%D1) CR_TAB
++ AS2 (mov,%C0,__tmp_reg__));
++ }
++ op[2] = XEXP(base, 0);
++ if(REGNO(op[2]) == REG_Y)
++ return *l=4, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (subi,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%A0,Y+) CR_TAB
++ AS2 (ld,%B0,Y+) CR_TAB
++ AS2 (ld,%C0,Y+) CR_TAB
++ AS2 (ld,%D0,Y) CR_TAB
++ AS2 (subi,%A2,lo8(%o1+3)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o1+3)))
++ : (AS2 (ldd,%A0,%A1) CR_TAB
++ AS2 (ldd,%B0,%B1) CR_TAB
++ AS2 (ldd,%C0,%C1) CR_TAB
++ AS2 (ldd,%D0,%D1));
++ if(REGNO(op[2]) == REG_Z)
++ return *l=4, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o1))) CR_TAB
++ AS2 (subi,%B2,hi8(-(%o1))) CR_TAB
++ AS2 (ld,%A0,Z+) CR_TAB
++ AS2 (ld,%B0,Z+) CR_TAB
++ AS2 (ld,%C0,Z+) CR_TAB
++ AS2 (ld,%D0,Z) CR_TAB
++ AS2 (subi,%A2,lo8(%o1+3)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o1+3)))
++ : (AS2 (ldd,%A0,%A1) CR_TAB
++ AS2 (ldd,%B0,%B1) CR_TAB
++ AS2 (ldd,%C0,%C1) CR_TAB
++ AS2 (ldd,%D0,%D1));
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return *l=4, (AS2 (ld,%D0,%1) CR_TAB
+@@ -2602,20 +2968,36 @@ out_movsi_mr_r (rtx insn, rtx op[], int *l)
+ {
+ /* "st X+,r26" is undefined */
+ if (reg_unused_after (insn, base))
+- return *l=6, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+- AS2 (st,X,r26) CR_TAB
+- AS2 (adiw,r26,1) CR_TAB
+- AS2 (st,X+,__tmp_reg__) CR_TAB
+- AS2 (st,X+,r28) CR_TAB
+- AS2 (st,X,r29));
++ return *l=6, AVR_TINY ? (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (subi,r26,lo8(-1)) CR_TAB
++ AS2 (sbci,r27,hi8(-1)) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X+,r28) CR_TAB
++ AS2 (st,X,r29))
++ : (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (adiw,r26,1) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X+,r28) CR_TAB
++ AS2 (st,X,r29));
+ else
+- return *l=7, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+- AS2 (st,X,r26) CR_TAB
+- AS2 (adiw,r26,1) CR_TAB
+- AS2 (st,X+,__tmp_reg__) CR_TAB
+- AS2 (st,X+,r28) CR_TAB
+- AS2 (st,X,r29) CR_TAB
+- AS2 (sbiw,r26,3));
++ return *l=7, AVR_TINY ? (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (subi,r26,lo8(-1)) CR_TAB
++ AS2 (sbci,r27,hi8(-1)) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X+,r28) CR_TAB
++ AS2 (st,X,r29) CR_TAB
++ AS2 (subi,r26,lo8(3)) CR_TAB
++ AS2 (sbci,r27,hi8(3)))
++ : (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (adiw,r26,1) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X+,r28) CR_TAB
++ AS2 (st,X,r29) CR_TAB
++ AS2 (sbiw,r26,3));
+ }
+ else if (reg_base == reg_src + 2)
+ {
+@@ -2628,26 +3010,61 @@ out_movsi_mr_r (rtx insn, rtx op[], int *l)
+ AS2 (st,%0,__tmp_reg__) CR_TAB
+ AS1 (clr,__zero_reg__));
+ else
+- return *l=8, (AS2 (mov,__zero_reg__,%C1) CR_TAB
+- AS2 (mov,__tmp_reg__,%D1) CR_TAB
+- AS2 (st,%0+,%A1) CR_TAB
+- AS2 (st,%0+,%B1) CR_TAB
+- AS2 (st,%0+,__zero_reg__) CR_TAB
+- AS2 (st,%0,__tmp_reg__) CR_TAB
+- AS1 (clr,__zero_reg__) CR_TAB
+- AS2 (sbiw,r26,3));
++ return *l=8, AVR_TINY ? (AS2 (mov,__zero_reg__,%C1) CR_TAB
++ AS2 (mov,__tmp_reg__,%D1) CR_TAB
++ AS2 (st,%0+,%A1) CR_TAB
++ AS2 (st,%0+,%B1) CR_TAB
++ AS2 (st,%0+,__zero_reg__) CR_TAB
++ AS2 (st,%0,__tmp_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (subi,r26,lo8(3)) CR_TAB
++ AS2 (sbci,r27,hi8(3)))
++ : (AS2 (mov,__zero_reg__,%C1) CR_TAB
++ AS2 (mov,__tmp_reg__,%D1) CR_TAB
++ AS2 (st,%0+,%A1) CR_TAB
++ AS2 (st,%0+,%B1) CR_TAB
++ AS2 (st,%0+,__zero_reg__) CR_TAB
++ AS2 (st,%0,__tmp_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (sbiw,r26,3));
+ }
+- return *l=5, (AS2 (st,%0+,%A1) CR_TAB
+- AS2 (st,%0+,%B1) CR_TAB
+- AS2 (st,%0+,%C1) CR_TAB
+- AS2 (st,%0,%D1) CR_TAB
+- AS2 (sbiw,r26,3));
++ return *l=5, AVR_TINY ? (AS2 (st,%0+,%A1) CR_TAB
++ AS2 (st,%0+,%B1) CR_TAB
++ AS2 (st,%0+,%C1) CR_TAB
++ AS2 (st,%0,%D1) CR_TAB
++ AS2 (subi,r26,lo8(3)) CR_TAB
++ AS2 (sbci,r27,hi8(3)))
++ : (AS2 (st,%0+,%A1) CR_TAB
++ AS2 (st,%0+,%B1) CR_TAB
++ AS2 (st,%0+,%C1) CR_TAB
++ AS2 (st,%0,%D1) CR_TAB
++ AS2 (sbiw,r26,3));
+ }
+ else
+- return *l=4, (AS2 (st,%0,%A1) CR_TAB
+- AS2 (std,%0+1,%B1) CR_TAB
+- AS2 (std,%0+2,%C1) CR_TAB
+- AS2 (std,%0+3,%D1));
++ {
++ if(reg_base == REG_Y)
++ return *l=4, AVR_TINY ? (AS2 (st,Y+,%A1) CR_TAB
++ AS2 (st,Y+,%B1) CR_TAB
++ AS2 (st,Y+,%C1) CR_TAB
++ AS2 (st,Y,%D1) CR_TAB
++ AS2 (subi,r28,lo8(3)) CR_TAB
++ AS2 (sbci,r29,lo8(3)))
++ : (AS2 (st,%0,%A1) CR_TAB
++ AS2 (std,%0+1,%B1) CR_TAB
++ AS2 (std,%0+2,%C1) CR_TAB
++ AS2 (std,%0+3,%D1));
++ if(reg_base == REG_Z)
++ return *l=4, AVR_TINY ? (AS2 (st,Z+,%A1) CR_TAB
++ AS2 (st,Z+,%B1) CR_TAB
++ AS2 (st,Z+,%C1) CR_TAB
++ AS2 (st,Z,%D1) CR_TAB
++ AS2 (subi,r30,lo8(3)) CR_TAB
++ AS2 (sbci,r31,lo8(3)))
++ : (AS2 (st,%0,%A1) CR_TAB
++ AS2 (std,%0+1,%B1) CR_TAB
++ AS2 (std,%0+2,%C1) CR_TAB
++ AS2 (std,%0+3,%D1));
++ }
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+@@ -2659,21 +3076,42 @@ out_movsi_mr_r (rtx insn, rtx op[], int *l)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+- return *l = 6, (AS2 (adiw,r28,%o0-60) CR_TAB
+- AS2 (std,Y+60,%A1) CR_TAB
+- AS2 (std,Y+61,%B1) CR_TAB
+- AS2 (std,Y+62,%C1) CR_TAB
+- AS2 (std,Y+63,%D1) CR_TAB
+- AS2 (sbiw,r28,%o0-60));
+-
+- return *l = 8, (AS2 (subi,r28,lo8(-%o0)) CR_TAB
+- AS2 (sbci,r29,hi8(-%o0)) CR_TAB
+- AS2 (st,Y,%A1) CR_TAB
+- AS2 (std,Y+1,%B1) CR_TAB
+- AS2 (std,Y+2,%C1) CR_TAB
+- AS2 (std,Y+3,%D1) CR_TAB
+- AS2 (subi,r28,lo8(%o0)) CR_TAB
+- AS2 (sbci,r29,hi8(%o0)));
++ return *l = 6, AVR_TINY ? (AS2 (subi,r28,lo8(-(%o0-60))) CR_TAB
++ AS2 (sbci,r29,hi8(-(%o0-60))) CR_TAB
++ AS2 (subi,r28,lo8(-60)) CR_TAB
++ AS2 (sbci,r29,lo8(-60)) CR_TAB
++ AS2 (st,Y+,%A1) CR_TAB
++ AS2 (st,Y+,%B1) CR_TAB
++ AS2 (st,Y+,%C1) CR_TAB
++ AS2 (st,Y,%D1) CR_TAB
++ AS2 (subi,r28,lo8(63)) CR_TAB
++ AS2 (sbci,r29,lo8(63)) CR_TAB
++ AS2 (subi,r28,lo8(%o0-60)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0-60)))
++ : (AS2 (adiw,r28,%o0-60) CR_TAB
++ AS2 (std,Y+60,%A1) CR_TAB
++ AS2 (std,Y+61,%B1) CR_TAB
++ AS2 (std,Y+62,%C1) CR_TAB
++ AS2 (std,Y+63,%D1) CR_TAB
++ AS2 (sbiw,r28,%o0-60));
++ return *l = 8, AVR_TINY ? (AS2 (subi,r28,lo8(-%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
++ AS2 (st,Y+,%A1) CR_TAB
++ AS2 (st,Y+,%B1) CR_TAB
++ AS2 (st,Y+,%C1) CR_TAB
++ AS2 (st,Y,%D1) CR_TAB
++ AS2 (subi,r28,lo8(3)) CR_TAB
++ AS2 (sbci,r29,lo8(3)) CR_TAB
++ AS2 (subi,r28,lo8(%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0)))
++ : (AS2 (subi,r28,lo8(-%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
++ AS2 (st,Y,%A1) CR_TAB
++ AS2 (std,Y+1,%B1) CR_TAB
++ AS2 (std,Y+2,%C1) CR_TAB
++ AS2 (std,Y+3,%D1) CR_TAB
++ AS2 (subi,r28,lo8(%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0)));
+ }
+ if (reg_base == REG_X)
+ {
+@@ -2681,41 +3119,95 @@ out_movsi_mr_r (rtx insn, rtx op[], int *l)
+ if (reg_src == REG_X)
+ {
+ *l = 9;
+- return (AS2 (mov,__tmp_reg__,r26) CR_TAB
+- AS2 (mov,__zero_reg__,r27) CR_TAB
+- AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X+,__tmp_reg__) CR_TAB
+- AS2 (st,X+,__zero_reg__) CR_TAB
+- AS2 (st,X+,r28) CR_TAB
+- AS2 (st,X,r29) CR_TAB
+- AS1 (clr,__zero_reg__) CR_TAB
+- AS2 (sbiw,r26,%o0+3));
++ return AVR_TINY ? (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X+,__zero_reg__) CR_TAB
++ AS2 (st,X+,r28) CR_TAB
++ AS2 (st,X,r29) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (subi,r26,lo8(%o0+3)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0+3)))
++ : (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X+,__zero_reg__) CR_TAB
++ AS2 (st,X+,r28) CR_TAB
++ AS2 (st,X,r29) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (sbiw,r26,%o0+3));
+ }
+ else if (reg_src == REG_X - 2)
+ {
+ *l = 9;
+- return (AS2 (mov,__tmp_reg__,r26) CR_TAB
+- AS2 (mov,__zero_reg__,r27) CR_TAB
+- AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X+,r24) CR_TAB
+- AS2 (st,X+,r25) CR_TAB
+- AS2 (st,X+,__tmp_reg__) CR_TAB
+- AS2 (st,X,__zero_reg__) CR_TAB
+- AS1 (clr,__zero_reg__) CR_TAB
+- AS2 (sbiw,r26,%o0+3));
++ return AVR_TINY ? (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X+,r24) CR_TAB
++ AS2 (st,X+,r25) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X,__zero_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (subi,r26,lo8(%o0+3)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0+3)))
++ : (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X+,r24) CR_TAB
++ AS2 (st,X+,r25) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X,__zero_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (sbiw,r26,%o0+3));
+ }
+ *l = 6;
+- return (AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X+,%A1) CR_TAB
+- AS2 (st,X+,%B1) CR_TAB
+- AS2 (st,X+,%C1) CR_TAB
+- AS2 (st,X,%D1) CR_TAB
+- AS2 (sbiw,r26,%o0+3));
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X+,%A1) CR_TAB
++ AS2 (st,X+,%B1) CR_TAB
++ AS2 (st,X+,%C1) CR_TAB
++ AS2 (st,X,%D1) CR_TAB
++ AS2 (subi,r26,lo8(%o0+3)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0+3)))
++ : (AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X+,%A1) CR_TAB
++ AS2 (st,X+,%B1) CR_TAB
++ AS2 (st,X+,%C1) CR_TAB
++ AS2 (st,X,%D1) CR_TAB
++ AS2 (sbiw,r26,%o0+3));
+ }
+- return *l=4, (AS2 (std,%A0,%A1) CR_TAB
+- AS2 (std,%B0,%B1) CR_TAB
+- AS2 (std,%C0,%C1) CR_TAB
+- AS2 (std,%D0,%D1));
++ op[2] = XEXP(base, 0);
++ if(REGNO(op[2]) == REG_Y)
++ return *l=4, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o0))) CR_TAB
++ AS2 (st,Y+,%A1) CR_TAB
++ AS2 (st,Y+,%B1) CR_TAB
++ AS2 (st,Y+,%C1) CR_TAB
++ AS2 (st,Y,%D1) CR_TAB
++ AS2 (subi,%A2,lo8(%o0+3)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o0+3)))
++ : (AS2 (std,%A0,%A1) CR_TAB
++ AS2 (std,%B0,%B1) CR_TAB
++ AS2 (std,%C0,%C1) CR_TAB
++ AS2 (std,%D0,%D1));
++
++ if(REGNO(op[2]) == REG_Z)
++ return *l=4, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o0))) CR_TAB
++ AS2 (st,Z+,%A1) CR_TAB
++ AS2 (st,Z+,%B1) CR_TAB
++ AS2 (st,Z+,%C1) CR_TAB
++ AS2 (st,Z,%D1) CR_TAB
++ AS2 (subi,%A2,lo8(%o0+3)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o0+3)))
++ : (AS2 (std,%A0,%A1) CR_TAB
++ AS2 (std,%B0,%B1) CR_TAB
++ AS2 (std,%C0,%C1) CR_TAB
++ AS2 (std,%D0,%D1));
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return *l=4, (AS2 (st,%0,%D1) CR_TAB
+@@ -2929,9 +3421,18 @@ out_movqi_mr_r (rtx insn, rtx op[], int *l)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+- return *l = 3, (AS2 (adiw,r28,%o0-63) CR_TAB
+- AS2 (std,Y+63,%1) CR_TAB
+- AS2 (sbiw,r28,%o0-63));
++ return *l = 3, AVR_TINY ? (AS2 (subi,r28,lo8(-(%o0-63))) CR_TAB
++ AS2 (sbci,r29,hi8(-(%o0-63))) CR_TAB
++ AS2 (subi,r28,lo8(-63)) CR_TAB
++ AS2 (sbci,r29,hi8(-63)) CR_TAB
++ AS2 (st,Y,%1) CR_TAB
++ AS2 (subi,r28,lo8(63)) CR_TAB
++ AS2 (sbci,r29,hi8(63)) CR_TAB
++ AS2 (subi,r28,lo8(%o0-63)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0-63)))
++ : (AS2 (adiw,r28,%o0-63) CR_TAB
++ AS2 (std,Y+63,%1) CR_TAB
++ AS2 (sbiw,r28,%o0-63));
+
+ return *l = 5, (AS2 (subi,r28,lo8(-%o0)) CR_TAB
+ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
+@@ -2944,28 +3445,60 @@ out_movqi_mr_r (rtx insn, rtx op[], int *l)
+ if (reg_overlap_mentioned_p (src, XEXP (x, 0)))
+ {
+ if (reg_unused_after (insn, XEXP (x,0)))
+- return *l = 3, (AS2 (mov,__tmp_reg__,%1) CR_TAB
+- AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X,__tmp_reg__));
+-
+- return *l = 4, (AS2 (mov,__tmp_reg__,%1) CR_TAB
+- AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X,__tmp_reg__) CR_TAB
+- AS2 (sbiw,r26,%o0));
++ return *l = 3, AVR_TINY ? (AS2 (mov,__tmp_reg__,%1) CR_TAB
++ AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X,__tmp_reg__))
++ : (AS2 (mov,__tmp_reg__,%1) CR_TAB
++ AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X,__tmp_reg__));
++
++ return *l = 4, AVR_TINY ? (AS2 (mov,__tmp_reg__,%1) CR_TAB
++ AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X,__tmp_reg__) CR_TAB
++ AS2 (subi,r26,lo8(%o0)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0)))
++ : (AS2 (mov,__tmp_reg__,%1) CR_TAB
++ AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X,__tmp_reg__) CR_TAB
++ AS2 (sbiw,r26,%o0));
+ }
+ else
+ {
+ if (reg_unused_after (insn, XEXP (x,0)))
+- return *l = 2, (AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X,%1));
+-
+- return *l = 3, (AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X,%1) CR_TAB
+- AS2 (sbiw,r26,%o0));
++ return *l = 2, AVR_TINY ? (AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X,%1))
++ : (AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X,%1));
++
++ return *l = 3, AVR_TINY ? (AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X,%1) CR_TAB
++ AS2 (subi,r26,lo8(%o0)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0)))
++ : (AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X,%1) CR_TAB
++ AS2 (sbiw,r26,%o0));
+ }
+ }
+ *l = 1;
+- return AS2 (std,%0,%1);
++ op[2] = XEXP(x, 0);
++ if(REGNO(op[2]) == REG_Y)
++ return AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o0))) CR_TAB
++ AS2 (st,Y,%1) CR_TAB
++ AS2 (subi,%A2,lo8(%o0)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o0)))
++ : AS2 (std,%0,%1);
++ if(REGNO(op[2]) == REG_Z)
++ return AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o0))) CR_TAB
++ AS2 (st,Z,%1) CR_TAB
++ AS2 (subi,%A2,lo8(%o0)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o0)))
++ : AS2 (std,%0,%1);
+ }
+ *l = 1;
+ return AS2 (st,%0,%1);
+@@ -3014,24 +3547,43 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ {
+ /* "st X+,r26" and "st -X,r26" are undefined. */
+ if (!mem_volatile_p && reg_unused_after (insn, src))
+- return *l=4, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+- AS2 (st,X,r26) CR_TAB
+- AS2 (adiw,r26,1) CR_TAB
+- AS2 (st,X,__tmp_reg__));
++ return *l=4, AVR_TINY ? (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (subi,r26,lo8(-1)) CR_TAB
++ AS2 (sbci,r27,hi8(-1)) CR_TAB
++ AS2 (st,X,__tmp_reg__))
++ : (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (adiw,r26,1) CR_TAB
++ AS2 (st,X,__tmp_reg__));
+ else
+ {
+ if (!AVR_XMEGA)
+- return *l=5, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+- AS2 (adiw,r26,1) CR_TAB
+- AS2 (st,X,__tmp_reg__) CR_TAB
+- AS2 (sbiw,r26,1) CR_TAB
+- AS2 (st,X,r26));
++ return *l=5, AVR_TINY ? (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (subi,r26,lo8(-1)) CR_TAB
++ AS2 (sbci,r27,hi8(-1)) CR_TAB
++ AS2 (st,X,__tmp_reg__) CR_TAB
++ AS2 (subi,r26,lo8(1)) CR_TAB
++ AS2 (sbci,r27,hi8(1)) CR_TAB
++ AS2 (st,X,r26))
++ : (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (adiw,r26,1) CR_TAB
++ AS2 (st,X,__tmp_reg__) CR_TAB
++ AS2 (sbiw,r26,1) CR_TAB
++ AS2 (st,X,r26));
+ else
+- return *l=5, (AS2 (mov,__tmp_reg__,r27) CR_TAB
+- AS2 (st,X,r26) CR_TAB
+- AS2 (adiw,r26,1) CR_TAB
+- AS2 (st,X,__tmp_reg__) CR_TAB
+- AS2 (sbiw,r26,1));
++ return *l=5, AVR_TINY ? (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (subi,r26,lo8(-1)) CR_TAB
++ AS2 (sbci,r27,hi8(-1)) CR_TAB
++ AS2 (st,X,__tmp_reg__) CR_TAB
++ AS2 (subi,r26,lo8(1)) CR_TAB
++ AS2 (sbci,r27,hi8(1)))
++ : (AS2 (mov,__tmp_reg__,r27) CR_TAB
++ AS2 (st,X,r26) CR_TAB
++ AS2 (adiw,r26,1) CR_TAB
++ AS2 (st,X,__tmp_reg__) CR_TAB
++ AS2 (sbiw,r26,1));
+ }
+ }
+ else
+@@ -3042,26 +3594,62 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ else
+ {
+ if (!AVR_XMEGA)
+- return *l=3, (AS2 (adiw,r26,1) CR_TAB
+- AS2 (st,X,%B1) CR_TAB
+- AS2 (st,-X,%A1));
++ return *l=3, AVR_TINY ? (AS2 (subi,r26,lo8(-1)) CR_TAB
++ AS2 (sbci,r27,hi8(-1)) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (st,-X,%A1))
++ : (AS2 (adiw,r26,1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (st,-X,%A1));
+ else
+- return *l=3, (AS2 (st,X+,%A1) CR_TAB
+- AS2 (st,X,%B1) CR_TAB
+- AS2 (sbiw,r26,1));
++ return *l=3, AVR_TINY ? (AS2 (st,X+,%A1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (subi,r26,lo8(1)) CR_TAB
++ AS2 (sbci,r27,hi8(1)))
++ : (AS2 (st,X+,%A1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (sbiw,r26,1));
+ }
+ }
+ }
+ else
+ {
+ if (!AVR_XMEGA)
+- return *l=2, (AS2 (std,%0+1,%B1) CR_TAB
+- AS2 (st,%0,%A1));
++ {
++ if(reg_base == REG_Y)
++ return *l=2, AVR_TINY ? (AS2 (subi,r28,lo8(-1)) CR_TAB
++ AS2 (sbci,r29,hi8(-1)) CR_TAB
++ AS2 (st,Y,%B1) CR_TAB
++ AS2 (st,-Y,%A1))
++ : (AS2 (std,%0+1,%B1) CR_TAB
++ AS2 (st,%0,%A1));
++ if(reg_base == REG_Z)
++ return *l=2, AVR_TINY ? (AS2 (subi,r30,lo8(-1)) CR_TAB
++ AS2 (sbci,r31,hi8(-1)) CR_TAB
++ AS2 (st,Z,%B1) CR_TAB
++ AS2 (st,-Z,%A1))
++ : (AS2 (std,%0+1,%B1) CR_TAB
++ AS2 (st,%0,%A1));
++ }
+ else
+- return *l=2, (AS2 (st,%0,%A1) CR_TAB
+- AS2 (std,%0+1,%B1));
++ {
++ if(reg_base == REG_Y)
++ return *l=2, AVR_TINY ? (AS2 (st,Y+,%A1) CR_TAB
++ AS2 (st,Y,%B1) CR_TAB
++ AS2 (subi,r28,lo8(1)) CR_TAB
++ AS2 (sbci,r29,hi8(1)))
++ : (AS2 (st,%0,%A1) CR_TAB
++ AS2 (std,%0+1,%B1));
++ if(reg_base == REG_Z)
++ return *l=2, AVR_TINY ? (AS2 (st,Z+,%A1) CR_TAB
++ AS2 (st,Z,%B1) CR_TAB
++ AS2 (subi,r30,lo8(1)) CR_TAB
++ AS2 (sbci,r31,hi8(1)))
++ : (AS2 (st,%0,%A1) CR_TAB
++ AS2 (std,%0+1,%B1));
+ }
+ }
++ }
+ else if (GET_CODE (base) == PLUS)
+ {
+ int disp = INTVAL (XEXP (base, 1));
+@@ -3074,32 +3662,68 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ if (!AVR_XMEGA)
+ {
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+- return *l = 4, (AS2 (adiw,r28,%o0-62) CR_TAB
+- AS2 (std,Y+63,%B1) CR_TAB
+- AS2 (std,Y+62,%A1) CR_TAB
+- AS2 (sbiw,r28,%o0-62));
+-
+- return *l = 6, (AS2 (subi,r28,lo8(-%o0)) CR_TAB
+- AS2 (sbci,r29,hi8(-%o0)) CR_TAB
+- AS2 (std,Y+1,%B1) CR_TAB
+- AS2 (st,Y,%A1) CR_TAB
+- AS2 (subi,r28,lo8(%o0)) CR_TAB
+- AS2 (sbci,r29,hi8(%o0)));
++ return *l = 4, AVR_TINY ? (AS2 (subi,r28,lo8(-(%o0-62))) CR_TAB
++ AS2 (sbci,r29,hi8(-(%o0-62))) CR_TAB
++ AS2 (subi,r28,lo8(-63)) CR_TAB
++ AS2 (sbci,r29,hi8(-63)) CR_TAB
++ AS2 (st,Y,%B1) CR_TAB
++ AS2 (st,-Y,%A1) CR_TAB
++ AS2 (subi,r28,lo8(62)) CR_TAB
++ AS2 (sbci,r29,hi8(62)) CR_TAB
++ AS2 (subi,r28,lo8(%o0-62)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0-62)))
++ : (AS2 (adiw,r28,%o0-62) CR_TAB
++ AS2 (std,Y+63,%B1) CR_TAB
++ AS2 (std,Y+62,%A1) CR_TAB
++ AS2 (sbiw,r28,%o0-62));
++
++ return *l = 6, AVR_TINY ? (AS2 (subi,r28,lo8(-%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
++ AS2 (subi,r28,lo8(-1)) CR_TAB
++ AS2 (sbci,r29,hi8(-1)) CR_TAB
++ AS2 (st,Y,%B1) CR_TAB
++ AS2 (st,-Y,%A1) CR_TAB
++ AS2 (subi,r28,lo8(%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0)))
++ : (AS2 (subi,r28,lo8(-%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
++ AS2 (std,Y+1,%B1) CR_TAB
++ AS2 (st,Y,%A1) CR_TAB
++ AS2 (subi,r28,lo8(%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0)));
+ }
+ else
+ {
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+- return *l = 4, (AS2 (adiw,r28,%o0-62) CR_TAB
+- AS2 (std,Y+62,%A1) CR_TAB
+- AS2 (std,Y+63,%B1) CR_TAB
+- AS2 (sbiw,r28,%o0-62));
+-
+- return *l = 6, (AS2 (subi,r28,lo8(-%o0)) CR_TAB
+- AS2 (sbci,r29,hi8(-%o0)) CR_TAB
+- AS2 (st,Y,%A1) CR_TAB
+- AS2 (std,Y+1,%B1) CR_TAB
+- AS2 (subi,r28,lo8(%o0)) CR_TAB
+- AS2 (sbci,r29,hi8(%o0)));
++ return *l = 4, AVR_TINY ? (AS2 (subi,r28,lo8(-(%o0-62))) CR_TAB
++ AS2 (sbci,r29,hi8(-(%o0-62))) CR_TAB
++ AS2 (subi,r28,lo8(-62)) CR_TAB
++ AS2 (sbci,r29,hi8(-62)) CR_TAB
++ AS2 (st,Y+,%A1) CR_TAB
++ AS2 (st,Y,%B1) CR_TAB
++ AS2 (subi,r28,lo8(63)) CR_TAB
++ AS2 (sbci,r29,hi8(63)) CR_TAB
++ AS2 (subi,r28,lo8(%o0-62)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0-62)))
++ : (AS2 (adiw,r28,%o0-62) CR_TAB
++ AS2 (std,Y+62,%A1) CR_TAB
++ AS2 (std,Y+63,%B1) CR_TAB
++ AS2 (sbiw,r28,%o0-62));
++
++ return *l = 6, AVR_TINY ? (AS2 (subi,r28,lo8(-%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
++ AS2 (st,Y+,%A1) CR_TAB
++ AS2 (st,Y,%B1) CR_TAB
++ AS2 (subi,r28,lo8(1)) CR_TAB
++ AS2 (sbci,r29,hi8(1)) CR_TAB
++ AS2 (subi,r28,lo8(%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0)))
++ : (AS2 (subi,r28,lo8(-%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(-%o0)) CR_TAB
++ AS2 (st,Y,%A1) CR_TAB
++ AS2 (std,Y+1,%B1) CR_TAB
++ AS2 (subi,r28,lo8(%o0)) CR_TAB
++ AS2 (sbci,r29,hi8(%o0)));
+ }
+ }
+ if (reg_base == REG_X)
+@@ -3110,50 +3734,119 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ if (!AVR_XMEGA)
+ {
+ *l = 7;
+- return (AS2 (mov,__tmp_reg__,r26) CR_TAB
+- AS2 (mov,__zero_reg__,r27) CR_TAB
+- AS2 (adiw,r26,%o0+1) CR_TAB
+- AS2 (st,X,__zero_reg__) CR_TAB
+- AS2 (st,-X,__tmp_reg__) CR_TAB
+- AS1 (clr,__zero_reg__) CR_TAB
+- AS2 (sbiw,r26,%o0));
++ return AVR_TINY ? (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (subi,r26,lo8(-(%o0+1))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0+1))) CR_TAB
++ AS2 (st,X,__zero_reg__) CR_TAB
++ AS2 (st,-X,__tmp_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (subi,r26,lo8(%o0)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0)))
++ : (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (adiw,r26,%o0+1) CR_TAB
++ AS2 (st,X,__zero_reg__) CR_TAB
++ AS2 (st,-X,__tmp_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (sbiw,r26,%o0));
+ }
+ else
+ {
+ *l = 7;
+- return (AS2 (mov,__tmp_reg__,r26) CR_TAB
+- AS2 (mov,__zero_reg__,r27) CR_TAB
+- AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X+,__tmp_reg__) CR_TAB
+- AS2 (st,X,__zero_reg__) CR_TAB
+- AS1 (clr,__zero_reg__) CR_TAB
+- AS2 (sbiw,r26,%o0+1));
++ return AVR_TINY ? (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X,__zero_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (subi,r26,lo8(%o0+1)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0+1)))
++ : (AS2 (mov,__tmp_reg__,r26) CR_TAB
++ AS2 (mov,__zero_reg__,r27) CR_TAB
++ AS2 (adiw,r26,%o0+1) CR_TAB
++ AS2 (st,X+,__tmp_reg__) CR_TAB
++ AS2 (st,X,__zero_reg__) CR_TAB
++ AS1 (clr,__zero_reg__) CR_TAB
++ AS2 (sbiw,r26,%o0));
++
+ }
+ }
+ if (!AVR_XMEGA)
+ {
+ *l = 4;
+- return (AS2 (adiw,r26,%o0+1) CR_TAB
+- AS2 (st,X,%B1) CR_TAB
+- AS2 (st,-X,%A1) CR_TAB
+- AS2 (sbiw,r26,%o0));
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-(%o0+1))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0+1))) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (st,-X,%A1) CR_TAB
++ AS2 (subi,r26,lo8(%o0)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0)))
++ : (AS2 (adiw,r26,%o0+1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (st,-X,%A1) CR_TAB
++ AS2 (sbiw,r26,%o0));
+ }
+ else
+ {
+ *l = 4;
+- return (AS2 (adiw,r26,%o0) CR_TAB
+- AS2 (st,X+,%A1) CR_TAB
+- AS2 (st,X,%B1) CR_TAB
+- AS2 (sbiw,r26,%o0+1));
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,r27,hi8(-(%o0))) CR_TAB
++ AS2 (st,X+,%A1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (subi,r26,lo8(%o0)) CR_TAB
++ AS2 (sbci,r27,hi8(%o0)))
++ : (AS2 (adiw,r26,%o0) CR_TAB
++ AS2 (st,X+,%A1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (sbiw,r26,%o0+1));
+ }
+ }
+
+ if (!AVR_XMEGA)
+- return *l=2, (AS2 (std,%B0,%B1) CR_TAB
+- AS2 (std,%A0,%A1));
++ {
++ op[2] = XEXP(base, 0);
++ if(REGNO(op[2]) == REG_Y)
++ return *l=2, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o0+2))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o0+2))) CR_TAB
++ AS2 (st,-Y,%B1) CR_TAB
++ AS2 (st,-Y,%A1) CR_TAB
++ AS2 (subi,%A2,lo8(%o0)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o0)))
++ : (AS2 (std,%B0,%B1) CR_TAB
++ AS2 (std,%A0,%A1));
++ if(REGNO(op[2]) == REG_Z)
++ return *l=2, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o0+1))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o0+1))) CR_TAB
++ AS2 (st,-Z,%B1) CR_TAB
++ AS2 (st,-Z,%A1) CR_TAB
++ AS2 (subi,%A2,lo8(%o0)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o0)))
++ : (AS2 (std,%B0,%B1) CR_TAB
++ AS2 (std,%A0,%A1));
++ }
+ else
+- return *l=2, (AS2 (std,%A0,%A1) CR_TAB
+- AS2 (std,%B0,%B1));
++ {
++ op[2] = XEXP(base, 0);
++ if(REGNO(op[2]) == REG_Y)
++ return *l=2, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o0))) CR_TAB
++ AS2 (st,Y+,%A1) CR_TAB
++ AS2 (st,Y,%B1) CR_TAB
++ AS2 (subi,%A2,lo8(%o0+1)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o0+1)))
++ : (AS2 (std,%A0,%A1) CR_TAB
++ AS2 (std,%B0,%B1));
++ if(REGNO(op[2]) == REG_Z)
++ return *l=2, AVR_TINY ? (AS2 (subi,%A2,lo8(-(%o0))) CR_TAB
++ AS2 (sbci,%B2,hi8(-(%o0))) CR_TAB
++ AS2 (st,Z+,%A1) CR_TAB
++ AS2 (st,Z,%B1) CR_TAB
++ AS2 (subi,%A2,lo8(%o0+1)) CR_TAB
++ AS2 (sbci,%B2,hi8(%o0+1)))
++ : (AS2 (std,%A0,%A1) CR_TAB
++ AS2 (std,%B0,%B1));
++ }
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ {
+@@ -3173,17 +3866,32 @@ out_movhi_mr_r (rtx insn, rtx op[], int *l)
+ if (REGNO (XEXP (base, 0)) == REG_X)
+ {
+ *l = 4;
+- return (AS2 (adiw,r26,1) CR_TAB
+- AS2 (st,X,%B1) CR_TAB
+- AS2 (st,-X,%A1) CR_TAB
+- AS2 (adiw,r26,2));
++ return AVR_TINY ? (AS2 (subi,r26,lo8(-1)) CR_TAB
++ AS2 (sbci,r27,hi8(-1)) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (st,-X,%A1) CR_TAB
++ AS2 (subi,r26,lo8(-2)) CR_TAB
++ AS2 (sbci,r27,hi8(-2)))
++ : (AS2 (adiw,r26,1) CR_TAB
++ AS2 (st,X,%B1) CR_TAB
++ AS2 (st,-X,%A1) CR_TAB
++ AS2 (adiw,r26,2));
+ }
+ else
+ {
++ //FIXME:check the code once again for AVR_TINY
+ *l = 3;
+- return (AS2 (std,%p0+1,%B1) CR_TAB
+- AS2 (st,%p0,%A1) CR_TAB
+- AS2 (adiw,%r0,2));
++ return AVR_TINY ? (AS2 (subi,%A0,lo8(-1)) CR_TAB
++ AS2 (sbci,%B0,hi8(-1)) CR_TAB
++ AS2 (st,%p0,%B1) CR_TAB
++ AS2 (subi,%A0,lo8(1)) CR_TAB
++ AS2 (sbci,%B0,hi8(1)) CR_TAB
++ AS2 (st,%p0,%A1) CR_TAB
++ AS2 (subi,%A0,lo8(-3)) CR_TAB
++ AS2 (sbci,%B0,hi8(-3)))
++ : (AS2 (std,%p0+1,%B1) CR_TAB
++ AS2 (st,%p0,%A1) CR_TAB
++ AS2 (adiw,%r0,2));
+ }
+ }
+
+@@ -3271,7 +3979,9 @@ out_tsthi (rtx insn, int *l)
+ if (test_hard_reg_class (ADDW_REGS, SET_SRC (PATTERN (insn))))
+ {
+ if (l) *l = 1;
+- return AS2 (sbiw,%0,0);
++ return AVR_TINY ? (AS2 (subi,%A0,lo8(0)) CR_TAB
++ AS2 (sbci,%B0,hi8(0)))
++ : AS2 (sbiw,%0,0);
+ }
+ if (l) *l = 2;
+ return (AS2 (cp,%A0,__zero_reg__) CR_TAB
+@@ -3292,9 +4002,13 @@ out_tstsi (rtx insn, int *l)
+ if (test_hard_reg_class (ADDW_REGS, SET_SRC (PATTERN (insn))))
+ {
+ if (l) *l = 3;
+- return (AS2 (sbiw,%A0,0) CR_TAB
+- AS2 (cpc,%C0,__zero_reg__) CR_TAB
+- AS2 (cpc,%D0,__zero_reg__));
++ return AVR_TINY ? (AS2 (subi,%A0,lo8(-(-0))) CR_TAB
++ AS2 (sbci,%B0,hi8(-(-0))) CR_TAB
++ AS2 (cpc,%C0,__zero_reg__) CR_TAB
++ AS2 (cpc,%D0,__zero_reg__))
++ : (AS2 (sbiw,%A0,0) CR_TAB
++ AS2 (cpc,%C0,__zero_reg__) CR_TAB
++ AS2 (cpc,%D0,__zero_reg__));
+ }
+ if (l) *l = 4;
+ return (AS2 (cp,%A0,__zero_reg__) CR_TAB
+@@ -5560,10 +6274,12 @@ avr_file_start (void)
+ /* fprintf (asm_out_file, "\t.arch %s\n", avr_mcu_name);*/
+ fputs ("__SREG__ = 0x3f\n"
+ "__SP_H__ = 0x3e\n"
+- "__SP_L__ = 0x3d\n"
+- "__CCP__ = 0x34\n", asm_out_file);
++ "__SP_L__ = 0x3d\n", asm_out_file);
++
++ AVR_TINY ? fputs ("__CCP__ = 0x3c\n", asm_out_file) : fputs ("__CCP__ = 0x34\n", asm_out_file);
+
+- fputs ("__tmp_reg__ = 0\n"
++ AVR_TINY ? fputs ("__tmp_reg__ = 16\n"
++ "__zero_reg__ = 17\n", asm_out_file) : fputs ("__tmp_reg__ = 0\n"
+ "__zero_reg__ = 1\n", asm_out_file);
+ }
+
+diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
+index b0d5d60..c947c39 100644
+--- a/gcc/config/avr/avr.h
++++ b/gcc/config/avr/avr.h
+@@ -50,6 +50,9 @@ struct base_arch_s {
+ /* Core have RAMPX, RAMPY and RAMPD registers. */
+ int have_rampx_y_d;
+
++ /* Core is in tiny 10/20/9/4/5/40. */
++ int avrtiny;
++
+ const char *const macro;
+ };
+
+@@ -102,6 +105,10 @@ extern const struct base_arch_s *avr_current_arch;
+ builtin_define ("__AVR_XMEGA__"); \
+ builtin_define ("__AVR_HAVE_SPMX__"); \
+ } \
++ if (avr_current_arch->avrtiny) \
++ { \
++ builtin_define ("__AVR_TINY__"); \
++ } \
+ if (avr_current_arch->have_rampx_y_d) \
+ { \
+ builtin_define ("__AVR_HAVE_RAMPX__");\
+@@ -124,12 +131,13 @@ extern GTY(()) section *progmem_section;
+ #define AVR_HAVE_RAMPZ (avr_current_arch->have_elpm)
+ #define AVR_HAVE_EIJMP_EICALL (avr_current_arch->have_eijmp_eicall)
+ #define AVR_XMEGA (avr_current_arch->xmega)
++#define AVR_TINY (avr_current_arch->avrtiny)
+ #define AVR_HAVE_RAMPX_Y_D (avr_current_arch->have_rampx_y_d)
+
+ #define AVR_2_BYTE_PC (!AVR_HAVE_EIJMP_EICALL)
+ #define AVR_3_BYTE_PC (AVR_HAVE_EIJMP_EICALL)
+
+-#define AVR_IO_OFFSET (AVR_XMEGA ? 0 : 0x20)
++#define AVR_IO_OFFSET ((AVR_XMEGA || AVR_TINY) ? 0 : 0x20)
+ #define AVR_RAMPD_ADDR (AVR_XMEGA ? 0x38 : 0)
+ #define AVR_RAMPX_ADDR (AVR_XMEGA ? 0x39 : 0)
+ #define AVR_RAMPY_ADDR (AVR_XMEGA ? 0x3A : 0)
+@@ -248,7 +256,6 @@ extern GTY(()) section *progmem_section;
+
+ #define ORDER_REGS_FOR_LOCAL_ALLOC order_regs_for_local_alloc ()
+
+-
+ #define HARD_REGNO_NREGS(REGNO, MODE) ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+ #define HARD_REGNO_MODE_OK(REGNO, MODE) avr_hard_regno_mode_ok(REGNO, MODE)
+@@ -312,6 +319,41 @@ enum reg_class {
+ {0xffffffff,0x00000003} /* ALL_REGS */ \
+ }
+
++/* Zero or more C statements that may conditionally modify five variables
++ fixed_regs, call_used_regs, global_regs, reg_names, and reg_class_contents,
++ to take into account any dependence of these register sets on target flags.
++ The first three of these are of type char [] (interpreted as Boolean
++ vectors). global_regs is a const char *[], and reg_class_contents is a
++ HARD_REG_SET. Before the macro is called, fixed_regs, call_used_regs,
++ reg_class_contents, and reg_names have been initialized from
++ FIXED_REGISTERS, CALL_USED_REGISTERS, REG_CLASS_CONTENTS, and
++ REGISTER_NAMES, respectively. global_regs has been cleared, and any
++ ‘-ffixed-reg’, ‘-fcall-used-reg’ and ‘-fcall-saved-reg’ command options
++ have been applied.
++
++ You need not define this macro if it has no work to do.
++
++ If the usage of an entire class of registers depends on the target flags,
++ you may indicate this to GCC by using this macro to modify fixed_regs and
++ call_used_regs to 1 for each of the registers in the classes which should
++ not be used by GCC. Also define the macro REG_CLASS_FROM_LETTER /
++ REG_CLASS_FROM_CONSTRAINT to return NO_REGS if it is called with a letter
++ for a class that shouldn’t be used. (However, if this class is not included
++ in GENERAL_REGS and all of the insn patterns whose constraints permit this
++ class are controlled by target switches, then GCC will automatically avoid
++ using these registers when the target switches are opposed to them.) */
++
++#define CONDITIONAL_REGISTER_USAGE \
++ if (AVR_TINY) { \
++ int i; \
++ for (i = 0; i <= 17; i++) { \
++ fixed_regs[i] = 1; \
++ call_used_regs[i] = 1; \
++ } \
++ CLEAR_HARD_REG_SET(reg_class_contents[(int)ADDW_REGS]); \
++ CLEAR_HARD_REG_SET(reg_class_contents[(int)NO_LD_REGS]); \
++ }
++
+ #define REGNO_REG_CLASS(R) avr_regno_reg_class(R)
+
+ /* The following macro defines cover classes for Integrated Register
+@@ -1232,7 +1274,14 @@ mmcu=*:-mmcu=%*}"
+ %{mmcu=atxmega256a3b:crtx256a3b.o%s} \
+ %{mmcu=atxmega256d3:crtx256d3.o%s} \
+ %{mmcu=atxmega7|mmcu=atxmega128a1:crtx128a1.o%s} \
+-%{mmcu=atxmega128a1u:crtx128a1u.o%s}"
++%{mmcu=atxmega128a1u:crtx128a1u.o%s} \
++%{mmcu=attiny4:crttn4.o%s} \
++%{mmcu=attiny5:crttn5.o%s} \
++%{mmcu=attiny9:crttn9.o%s} \
++%{mmcu=avrtiny10|mmcu=attiny10:crttn10.o%s} \
++%{mmcu=attiny20:crttn20.o%s} \
++%{mmcu=attiny40:crttn40.o%s}"
++
+
+ #define EXTRA_SPECS {"crt_binutils", CRT_BINUTILS_SPECS},
+
+diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md
+index f91e98c..96ef48e 100644
+--- a/gcc/config/avr/avr.md
++++ b/gcc/config/avr/avr.md
+@@ -181,6 +181,9 @@
+ DONE;
+ })
+
++(define_constants
++ [(TMP_REGNO_AVRTINY10 16) ; temporary register r16
++ (ZERO_REGNO_AVRTINY10 17)]) ; zero register r17
+
+ (define_insn "*push<ALLQ:mode>"
+ [(set (mem:ALLQ (post_dec (reg:HI REG_SP)))
+@@ -474,7 +477,7 @@
+ rtx addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+
+ /* Create rtx for tmp register - we use this as scratch. */
+- rtx tmp_reg_rtx = gen_rtx_REG (QImode, TMP_REGNO);
++ rtx tmp_reg_rtx = gen_rtx_REG (QImode, AVR_TINY ? TMP_REGNO_AVRTINY10 : TMP_REGNO);
+
+ if (GET_CODE (operands[2]) != CONST_INT)
+ FAIL;
+@@ -2955,7 +2958,7 @@
+ UNSPEC_INDEX_JMP))
+ (use (label_ref (match_operand 1 "" "")))
+ (clobber (match_dup 0))]
+- "AVR_HAVE_JMP_CALL && !AVR_HAVE_EIJMP_EICALL"
++ "(AVR_HAVE_JMP_CALL && !AVR_HAVE_EIJMP_EICALL)"
+ "lsl r30
+ rol r31
+ lpm
+diff --git a/gcc/config/avr/libgcc-fixed.S b/gcc/config/avr/libgcc-fixed.S
+index a694ee6..73d0ad1 100644
+--- a/gcc/config/avr/libgcc-fixed.S
++++ b/gcc/config/avr/libgcc-fixed.S
+@@ -29,13 +29,17 @@ Boston, MA 02110-1301, USA. */
+
+ /* Fixed point library routines for avr. */
+
++#if defined (__AVR_TINY__)
++#define __zero_reg__ r17
++#define __tmp_reg__ r16
++#else
+ #define __zero_reg__ r1
+ #define __tmp_reg__ r0
++#endif
+ #define __SREG__ 0x3f
+ #define __SP_H__ 0x3e
+ #define __SP_L__ 0x3d
+ #define __RAMPZ__ 0x3B
+-
+ /* Conversions to float. */
+ #if defined (L_fractqqsf)
+ .global __fractqqsf
+@@ -281,15 +285,15 @@ __muluqq3_exit:
+ .func __mulhq3
+ __mulhq3:
+ fmuls r_arg1H, r_arg2H
+- movw r_resL, r0
++ movw r_resL, __tmp_reg__
+ fmulsu r_arg2H, r_arg1L
+ clr r_arg1L
+ sbc r_resH, r_arg1L
+- add r_resL, r1
++ add r_resL, __zero_reg__
+ adc r_resH, r_arg1L
+ fmulsu r_arg1H, r_arg2L
+ sbc r_resH, r_arg1L
+- add r_resL, r1
++ add r_resL, __zero_reg__
+ adc r_resH, r_arg1L
+ clr __zero_reg__
+ ret
+@@ -301,13 +305,13 @@ __mulhq3:
+ .func __muluhq3
+ __muluhq3:
+ mul r_arg1H, r_arg2H
+- movw r_resL, r0
++ movw r_resL, __tmp_reg__
+ mul r_arg1H, r_arg2L
+- add r_resL, r1
++ add r_resL, __zero_reg__
+ clr __zero_reg__
+ adc r_resH, __zero_reg__
+ mul r_arg1L, r_arg2H
+- add r_resL, r1
++ add r_resL, __zero_reg__
+ clr __zero_reg__
+ adc r_resH, __zero_reg__
+ ret
+@@ -401,15 +405,15 @@ __muluhq3_skip:
+ .func __mulha3
+ __mulha3:
+ mul r_arg1L, r_arg2L
+- mov r_resL, r1
++ mov r_resL, __zero_reg__
+ muls r_arg1H, r_arg2H
+- mov r_resH, r0
++ mov r_resH, __tmp_reg__
+ mulsu r_arg1H, r_arg2L
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ mulsu r_arg2H, r_arg1L
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ clr __zero_reg__
+ ret
+ .endfunc
+@@ -420,15 +424,15 @@ __mulha3:
+ .func __muluha3
+ __muluha3:
+ mul r_arg1L, r_arg2L
+- mov r_resL, r1
++ mov r_resL, __zero_reg__
+ mul r_arg1H, r_arg2H
+- mov r_resH, r0
++ mov r_resH, __tmp_reg__
+ mul r_arg1H, r_arg2L
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ mul r_arg1L, r_arg2H
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ clr __zero_reg__
+ ret
+ .endfunc
+@@ -442,8 +446,8 @@ __muluha3:
+ #define r_arg2H r23 /* multiplicand High */
+ #define r_resL r18 /* result Low */
+ #define r_resH r19 /* result High */
+-#define r_scratchL r0 /* scratch Low */
+-#define r_scratchH r1
++#define r_scratchL __tmp_reg__ /* scratch Low */
++#define r_scratchH __zero_reg__
+
+ #if defined (L_mulha3)
+ .global __mulha3
+@@ -480,8 +484,8 @@ __mulha3_exit:
+ __muluha3:
+ clr r_resL ; clear result
+ clr r_resH
+- mov_l r0, r_arg1L ; save multiplicand
+- mov_h r1, r_arg1H
++ mov_l __tmp_reg__, r_arg1L ; save multiplicand
++ mov_h __zero_reg__, r_arg1H
+ __muluha3_loop1:
+ sbrs r_arg2H,0
+ rjmp __muluha3_skip1
+@@ -490,7 +494,12 @@ __muluha3_loop1:
+ __muluha3_skip1:
+ lsl r_arg1L ; shift multiplicand
+ rol r_arg1H
++#if defined (__AVR_TINY__)
++ subi r_arg1L, lo8(0)
++ sbci r_arg1L, hi8(0)
++#else
+ sbiw r_arg1L,0
++#endif
+ breq __muluha3_loop1_done ; exit multiplicand = 0
+ lsr r_arg2H
+ brne __muluha3_loop1 ; exit multiplier = 0
+@@ -500,7 +509,12 @@ __muluha3_loop1_done:
+ __muluha3_loop2:
+ lsr r_arg1H ; shift multiplicand
+ ror r_arg1L
++#if defined (__AVR_TINY__)
++ subi r_arg1L, lo8(0)
++ sbci r_arg1L, hi8(0)
++#else
+ sbiw r_arg1L,0
++#endif
+ breq __muluha3_exit ; exit if multiplicand = 0
+ sbrs r_arg2L,7
+ rjmp __muluha3_skip2
+@@ -556,53 +570,53 @@ __mulsa3:
+ clr r_resHL
+ clr r_resHH
+ mul r_arg1H, r_arg2L
+- mov r_resL, r1
++ mov r_resL, __zero_reg__
+ mul r_arg1L, r_arg2H
+- add r_resL, r1
++ add r_resL, __zero_reg__
+ adc r_resH, r_clr
+ mul r_arg1L, r_arg2HL
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ adc r_resHL, r_clr
+ mul r_arg1H, r_arg2H
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ adc r_resHL, r_clr
+ mul r_arg1HL, r_arg2L
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ adc r_resHL, r_clr
+ mulsu r_arg2HH, r_arg1L
+ sbc r_resHH, r_clr
+- add r_resH, r0
+- adc r_resHL, r1
++ add r_resH, __tmp_reg__
++ adc r_resHL, __zero_reg__
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HL
+- add r_resH, r0
+- adc r_resHL, r1
++ add r_resH, __tmp_reg__
++ adc r_resHL, __zero_reg__
+ adc r_resHH, r_clr
+ mul r_arg1HL, r_arg2H
+- add r_resH, r0
+- adc r_resHL, r1
++ add r_resH, __tmp_reg__
++ adc r_resHL, __zero_reg__
+ adc r_resHH, r_clr
+ mulsu r_arg1HH, r_arg2L
+ sbc r_resHH, r_clr
+- add r_resH, r0
+- adc r_resHL, r1
++ add r_resH, __tmp_reg__
++ adc r_resHL, __zero_reg__
+ adc r_resHH, r_clr
+ mulsu r_arg2HH, r_arg1H
+- add r_resHL, r0
+- adc r_resHH, r1
++ add r_resHL, __tmp_reg__
++ adc r_resHH, __zero_reg__
+ mul r_arg1HL, r_arg2HL
+- add r_resHL, r0
+- adc r_resHH, r1
++ add r_resHL, __tmp_reg__
++ adc r_resHH, __zero_reg__
+ mulsu r_arg1HH, r_arg2H
+- add r_resHL, r0
+- adc r_resHH, r1
++ add r_resHL, __tmp_reg__
++ adc r_resHH, __zero_reg__
+ mulsu r_arg2HH, r_arg1HL
+- add r_resHH, r0
++ add r_resHH, __tmp_reg__
+ mulsu r_arg1HH, r_arg2HL
+- add r_resHH, r0
++ add r_resHH, __tmp_reg__
+ clr __zero_reg__
+ ret
+ .endfunc
+@@ -617,51 +631,51 @@ __mulusa3:
+ clr r_resHL
+ clr r_resHH
+ mul r_arg1H, r_arg2L
+- mov r_resL, r1
++ mov r_resL, __zero_reg__
+ mul r_arg1L, r_arg2H
+- add r_resL, r1
++ add r_resL, __zero_reg__
+ adc r_resH, r_clr
+ mul r_arg1L, r_arg2HL
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ adc r_resHL, r_clr
+ mul r_arg1H, r_arg2H
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ adc r_resHL, r_clr
+ mul r_arg1HL, r_arg2L
+- add r_resL, r0
+- adc r_resH, r1
++ add r_resL, __tmp_reg__
++ adc r_resH, __zero_reg__
+ adc r_resHL, r_clr
+ mul r_arg1L, r_arg2HH
+- add r_resH, r0
+- adc r_resHL, r1
++ add r_resH, __tmp_reg__
++ adc r_resHL, __zero_reg__
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HL
+- add r_resH, r0
+- adc r_resHL, r1
++ add r_resH, __tmp_reg__
++ adc r_resHL, __zero_reg__
+ adc r_resHH, r_clr
+ mul r_arg1HL, r_arg2H
+- add r_resH, r0
+- adc r_resHL, r1
++ add r_resH, __tmp_reg__
++ adc r_resHL, __zero_reg__
+ adc r_resHH, r_clr
+ mul r_arg1HH, r_arg2L
+- add r_resH, r0
+- adc r_resHL, r1
++ add r_resH, __tmp_reg__
++ adc r_resHL, __zero_reg__
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HH
+- add r_resHL, r0
+- adc r_resHH, r1
++ add r_resHL, __tmp_reg__
++ adc r_resHH, __zero_reg__
+ mul r_arg1HL, r_arg2HL
+- add r_resHL, r0
+- adc r_resHH, r1
++ add r_resHL, __tmp_reg__
++ adc r_resHH, __zero_reg__
+ mul r_arg1HH, r_arg2H
+- add r_resHL, r0
+- adc r_resHH, r1
++ add r_resHL, __tmp_reg__
++ adc r_resHH, __zero_reg__
+ mul r_arg1HL, r_arg2HH
+- add r_resHH, r0
++ add r_resHH, __tmp_reg__
+ mul r_arg1HH, r_arg2HL
+- add r_resHH, r0
++ add r_resHH, __tmp_reg__
+ clr __zero_reg__
+ ret
+ .endfunc
+@@ -680,13 +694,20 @@ __mulusa3:
+ #define r_arg2HL r26
+ #define r_arg2HH r27 /* multiplicand High */
+
++#if defined (__AVR_TINY__)
++#define r_resL r28 /* result Low */
++#define r_resH r29
++#define r_resHL r30
++#define r_resHH r31 /* result High */
++#else
+ #define r_resL r14 /* result Low */
+ #define r_resH r15
+ #define r_resHL r16
+ #define r_resHH r17 /* result High */
++#endif
+
+-#define r_scratchL r0 /* scratch Low */
+-#define r_scratchH r1
++#define r_scratchL __tmp_reg__ /* scratch Low */
++#define r_scratchH __zero_reg__
+ #define r_scratchHL r22
+ #define r_scratchHH r23 /* scratch High */
+
+@@ -758,7 +779,12 @@ __mulusa3_skip1:
+ rol r_arg1HH
+ lsr r_arg2HH
+ ror r_arg2HL
++#if defined (__AVR_TINY__)
++ subi r_arg2HL, lo8(0)
++ sbci r_arg2HL, hi8(0)
++#else
+ sbiw r_arg2HL,0
++#endif
+ brne __mulusa3_loop1 ; exit multiplier = 0
+ __mulusa3_loop1_done:
+ mov_l r_arg1L, r_scratchL ; restore multiplicand
+@@ -779,7 +805,12 @@ __mulusa3_loop2:
+ __mulusa3_skip2:
+ lsl r_arg2L
+ rol r_arg2H
++#if defined (__AVR_TINY__)
++ subi r_arg2L, lo8(0)
++ sbci r_arg2L, hi8(0)
++#else
+ sbiw r_arg2L,0
++#endif
+ brne __mulusa3_loop2 ; exit if multiplier = 0
+ __mulusa3_exit:
+ clr __zero_reg__ ; got clobbered
+@@ -791,9 +822,7 @@ __mulusa3_exit:
+ #undef r_scratchH
+ #undef r_scratchHL
+ #undef r_scratchHH
+-
+ #endif
+-
+ #undef r_arg1L
+ #undef r_arg1H
+ #undef r_arg1HL
+@@ -821,8 +850,8 @@ __mulusa3_exit:
+ .global __divqq3
+ .func __divqq3
+ __divqq3:
+- mov r0, r_divd
+- eor r0, r_div
++ mov __tmp_reg__, r_divd
++ eor __tmp_reg__, r_div
+ sbrc r_div, 7
+ neg r_div
+ sbrc r_divd, 7
+@@ -831,7 +860,7 @@ __divqq3:
+ breq __divqq3_minus1 ; if equal return -1
+ rcall __udivuqq3
+ lsr r_quo
+- sbrc r0, 7 ; negate result if needed
++ sbrc __tmp_reg__, 7 ; negate result if needed
+ neg r_quo
+ ret
+ __divqq3_minus1:
+@@ -886,8 +915,8 @@ __udivuqq3_cont:
+ .global __divhq3
+ .func __divhq3
+ __divhq3:
+- mov r0, r_divdH
+- eor r0, r_divH
++ mov __tmp_reg__, r_divdH
++ eor __tmp_reg__, r_divH
+ sbrs r_divH, 7
+ rjmp __divhq3_divpos
+ com r_divH
+@@ -906,7 +935,7 @@ __divhq3_divdpos:
+ rcall __udivuhq3
+ lsr r_quoH
+ ror r_quoL
+- sbrs r0, 7 ; negate result if needed
++ sbrs __tmp_reg__, 7 ; negate result if needed
+ ret
+ com r_quoH
+ neg r_quoL
+@@ -958,8 +987,8 @@ __udivuhq3_cont:
+ .global __divha3
+ .func __divha3
+ __divha3:
+- mov r0, r_divdH
+- eor r0, r_divH
++ mov __tmp_reg__, r_divdH
++ eor __tmp_reg__, r_divH
+ sbrs r_divH, 7
+ rjmp __divha3_divpos
+ com r_divH
+@@ -973,7 +1002,7 @@ __divha3_divpos:
+ sbci r_divdH,-1
+ __divha3_divdpos:
+ rcall __udivuha3
+- sbrs r0, 7 ; negate result if needed
++ sbrs __tmp_reg__, 7 ; negate result if needed
+ ret
+ com r_quoH
+ neg r_quoL
+@@ -1027,8 +1056,8 @@ __udivuha3:
+ .global __divsa3
+ .func __divsa3
+ __divsa3:
+- mov r0, r27
+- eor r0, r_divHH
++ mov __tmp_reg__, r27
++ eor __tmp_reg__, r_divHH
+ sbrs r_divHH, 7
+ rjmp __divsa3_divpos
+ com r_divHH
+@@ -1050,7 +1079,7 @@ __divsa3_divpos:
+ sbci r_arg1HH,-1
+ __divsa3_arg1pos:
+ rcall __udivusa3
+- sbrs r0, 7 ; negate result if needed
++ sbrs __tmp_reg__, 7 ; negate result if needed
+ ret
+ com r_quoHH
+ com r_quoHL
+diff --git a/gcc/config/avr/libgcc.S b/gcc/config/avr/libgcc.S
+index 5a711a8..eda75a1 100644
+--- a/gcc/config/avr/libgcc.S
++++ b/gcc/config/avr/libgcc.S
+@@ -22,8 +22,13 @@ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
++#if defined (__AVR_TINY__)
++#define __zero_reg__ r17
++#define __tmp_reg__ r16
++#else
+ #define __zero_reg__ r1
+ #define __tmp_reg__ r0
++#endif
+ #define __SREG__ 0x3f
+ #define __SP_H__ 0x3e
+ #define __SP_L__ 0x3d
+@@ -140,7 +145,12 @@ __mulhi3_skip1:
+
+ lsr r_arg1H ; gets LSB of multiplier
+ ror r_arg1L
++#if defined (__AVR_TINY__)
++ subi r_arg1L, lo8(0)
++ sbci r_arg1L, hi8(0)
++#else
+ sbiw r_arg1L,0
++#endif
+ brne __mulhi3_loop ; exit if multiplier = 0
+ __mulhi3_exit:
+ mov r_arg1H,r_resH ; result to return register
+@@ -304,7 +314,12 @@ __mulsi3_skip1:
+ ror r_arg1H
+ ror r_arg1L
+ brne __mulsi3_loop
++#if defined (__AVR_TINY__)
++ subi r_arg1HL, lo8(0)
++ sbci r_arg1HL, hi8(0)
++#else
+ sbiw r_arg1HL,0
++#endif
+ cpc r_arg1H,r_arg1L
+ brne __mulsi3_loop ; exit if multiplier = 0
+ __mulsi3_exit:
+@@ -610,6 +625,7 @@ __divmodsi4_neg1:
+ /**********************************
+ * This is a prologue subroutine
+ **********************************/
++#if !defined (__AVR_TINY__)
+ #if defined (L_prologue)
+
+ .global __prologue_saves__
+@@ -663,7 +679,6 @@ __prologue_saves__:
+ * This is an epilogue subroutine
+ */
+ #if defined (L_epilogue)
+-
+ .global __epilogue_restores__
+ .func __epilogue_restores__
+ __epilogue_restores__:
+@@ -704,6 +719,7 @@ __epilogue_restores__:
+ ret
+ .endfunc
+ #endif /* defined (L_epilogue) */
++#endif /* !defined (__AVR_TINY__) */
+
+ #ifdef L_exit
+ .section .fini9,"ax",@progbits
+@@ -730,6 +746,7 @@ _cleanup:
+ .endfunc
+ #endif /* defined (L_cleanup) */
+
++#if !defined(__AVR_TINY__)
+ #ifdef L_tablejump
+ .global __tablejump2__
+ .func __tablejump2__
+@@ -762,7 +779,9 @@ __tablejump__:
+ #endif
+ .endfunc
+ #endif /* defined (L_tablejump) */
++#endif
+
++#if !defined(__AVR_TINY__)
+ #ifdef L_copy_data
+ .section .init4,"ax",@progbits
+ .global __do_copy_data
+@@ -824,6 +843,7 @@ __do_copy_data:
+ brne .L__do_copy_data_loop
+ #endif /* !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__) */
+ #endif /* L_copy_data */
++#endif
+
+ /* __do_clear_bss is only necessary if there is anything in .bss section. */
+
+@@ -864,7 +884,12 @@ __do_global_ctors:
+ ldi r20, hh8(__ctors_end)
+ rjmp .L__do_global_ctors_start
+ .L__do_global_ctors_loop:
++#if defined (__AVR_TINY__)
++ subi r28, lo8(2)
++ sbci r29, hi8(2)
++#else
+ sbiw r28, 2
++#endif
+ sbc r20, __zero_reg__
+ mov_h r31, r29
+ mov_l r30, r28
+@@ -882,7 +907,12 @@ __do_global_ctors:
+ ldi r29, hi8(__ctors_end)
+ rjmp .L__do_global_ctors_start
+ .L__do_global_ctors_loop:
++#if defined (__AVR_TINY__)
++ subi r28, lo8(2)
++ sbci r29, hi8(2)
++#else
+ sbiw r28, 2
++#endif
+ mov_h r31, r29
+ mov_l r30, r28
+ XCALL __tablejump__
+@@ -905,7 +935,12 @@ __do_global_dtors:
+ ldi r20, hh8(__dtors_start)
+ rjmp .L__do_global_dtors_start
+ .L__do_global_dtors_loop:
++#if defined (__AVR_TINY__)
++ subi r28, lo8(2)
++ sbci r29, hi8(2)
++#else
+ sbiw r28, 2
++#endif
+ sbc r20, __zero_reg__
+ mov_h r31, r29
+ mov_l r30, r28
+@@ -926,7 +961,12 @@ __do_global_dtors:
+ mov_h r31, r29
+ mov_l r30, r28
+ XCALL __tablejump__
++#if defined (__AVR_TINY__)
++ subi r28, lo8(-2)
++ sbci r29, hi8(-2)
++#else
+ adiw r28, 2
++#endif
+ .L__do_global_dtors_start:
+ cpi r28, lo8(__dtors_end)
+ cpc r29, r17
+@@ -934,6 +974,7 @@ __do_global_dtors:
+ #endif /* defined(__AVR_HAVE_RAMPZ__) */
+ #endif /* L_dtors */
+
++#if !defined (__AVR_TINY__)
+ #ifdef L_tablejump_elpm
+ .global __tablejump_elpm__
+ .func __tablejump_elpm__
+@@ -963,5 +1004,6 @@ __tablejump_elpm__:
+ #endif /* defined (__AVR_HAVE_ELPM__) */
+ .endfunc
+ #endif /* defined (L_tablejump_elpm) */
++#endif /* !defined (__AVR_TINY__) */
+
+ #include "libgcc-fixed.S"
+diff --git a/gcc/config/avr/predicates.md b/gcc/config/avr/predicates.md
+index aca33d7..3ff85fd 100755
+--- a/gcc/config/avr/predicates.md
++++ b/gcc/config/avr/predicates.md
+@@ -45,21 +45,24 @@
+ ;; Return true if OP is a valid address for lower half of I/O space.
+ (define_predicate "low_io_address_operand"
+ (and (match_code "const_int")
+- (if_then_else (match_test "AVR_XMEGA")
++ (if_then_else (ior (match_test "AVR_XMEGA")
++ (match_test "AVR_TINY"))
+ (match_test "IN_RANGE((INTVAL (op)), 0x00, 0x1F)")
+ (match_test "IN_RANGE((INTVAL (op)), 0x20, 0x3F)"))))
+
+ ;; Return true if OP is a valid address for high half of I/O space.
+ (define_predicate "high_io_address_operand"
+ (and (match_code "const_int")
+- (if_then_else (match_test "AVR_XMEGA")
++ (if_then_else (ior (match_test "AVR_XMEGA")
++ (match_test "AVR_TINY"))
+ (match_test "IN_RANGE((INTVAL (op)), 0x20, 0x3F)")
+ (match_test "IN_RANGE((INTVAL (op)), 0x40, 0x5F)"))))
+
+ ;; Return true if OP is a valid address of I/O space.
+ (define_predicate "io_address_operand"
+ (and (match_code "const_int")
+- (if_then_else (match_test "AVR_XMEGA")
++ (if_then_else (ior (match_test "AVR_XMEGA")
++ (match_test "AVR_TINY"))
+ (match_test "IN_RANGE((INTVAL (op)), 0x0, (0x40 - GET_MODE_SIZE(mode)))")
+ (match_test "IN_RANGE((INTVAL (op)), 0x20, (0x60 - GET_MODE_SIZE(mode)))"))))
+
+diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr
+index b068d3c..644195b 100644
+--- a/gcc/config/avr/t-avr
++++ b/gcc/config/avr/t-avr
+@@ -73,8 +73,8 @@ fp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/avr/t-avr
+
+ FPBIT = fp-bit.c
+
+-MULTILIB_OPTIONS = mmcu=avr2/mmcu=avr25/mmcu=avr3/mmcu=avr31/mmcu=avr35/mmcu=avr4/mmcu=avr5/mmcu=avr51/mmcu=avr6/mmcu=avrxmega2/mmcu=avrxmega4/mmcu=avrxmega5/mmcu=avrxmega6/mmcu=avrxmega7
+-MULTILIB_DIRNAMES = avr2 avr25 avr3 avr31 avr35 avr4 avr5 avr51 avr6 avrxmega2 avrxmega4 avrxmega5 avrxmega6 avrxmega7
++MULTILIB_OPTIONS = mmcu=avr2/mmcu=avr25/mmcu=avr3/mmcu=avr31/mmcu=avr35/mmcu=avr4/mmcu=avr5/mmcu=avr51/mmcu=avr6/mmcu=avrxmega2/mmcu=avrxmega4/mmcu=avrxmega5/mmcu=avrxmega6/mmcu=avrxmega7/mmcu=avrtiny10
++MULTILIB_DIRNAMES = avr2 avr25 avr3 avr31 avr35 avr4 avr5 avr51 avr6 avrxmega2 avrxmega4 avrxmega5 avrxmega6 avrxmega7 avrtiny10
+
+ # The many avr2 matches are not listed here - this is the default.
+ MULTILIB_MATCHES = \
+@@ -244,7 +244,13 @@ MULTILIB_MATCHES = \
+ mmcu?avrxmega6=mmcu?atxmega256a3b \
+ mmcu?avrxmega6=mmcu?atxmega256d3 \
+ mmcu?avrxmega7=mmcu?atxmega128a1 \
+- mmcu?avrxmega7=mmcu?atxmega128a1u
++ mmcu?avrxmega7=mmcu?atxmega128a1u \
++ mmcu?avrtiny10=mmcu?attiny4 \
++ mmcu?avrtiny10=mmcu?attiny5 \
++ mmcu?avrtiny10=mmcu?attiny9 \
++ mmcu?avrtiny10=mmcu?attiny10 \
++ mmcu?avrtiny10=mmcu?attiny20 \
++ mmcu?avrtiny10=mmcu?attiny40
+
+ MULTILIB_EXCEPTIONS =
+
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0008-Adds-OS_main-attribute-feature.patch b/patches/gcc-4.4.3/atmel/0008-Adds-OS_main-attribute-feature.patch
new file mode 100644
index 0000000..f5e446e
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0008-Adds-OS_main-attribute-feature.patch
@@ -0,0 +1,78 @@
+From 453085e747e76605743415e3cb671f4eb3fd3f70 Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Wed, 20 Apr 2011 22:40:10 +0200
+Subject: [PATCH 08/10] Adds OS_main attribute feature
+
+Not yet complete committed patch written by Anatoly Sokolov.
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/60-gcc-4.4.3-osmain.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr.c | 8 +++++---
+ gcc/function.c | 8 ++++++++
+ gcc/rtl.h | 1 +
+ 3 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index 422fad9..0175fae 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -1032,7 +1032,8 @@ expand_prologue (void)
+ }
+ else if ((!AVR_XMEGA && TARGET_NO_INTERRUPTS)
+ || (!AVR_XMEGA && cfun->machine->is_signal)
+- || (!AVR_XMEGA && cfun->machine->is_OS_main))
++ || (!AVR_XMEGA && cfun->machine->is_OS_main)
++ || (AVR_XMEGA && cfun->machine->is_nmi))
+ {
+ insn =
+ emit_insn (gen_movhi_sp_r_irq_off (stack_pointer_rtx,
+@@ -1205,9 +1206,10 @@ expand_epilogue (void)
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ }
+ else if ((!AVR_XMEGA && TARGET_NO_INTERRUPTS)
+- || (!AVR_XMEGA && cfun->machine->is_signal))
++ || (!AVR_XMEGA && cfun->machine->is_signal)
++ || (AVR_XMEGA && cfun->machine->is_nmi))
+ {
+- emit_insn (gen_movhi_sp_r_irq_off (stack_pointer_rtx,
++ emit_insn (gen_movhi_sp_r_irq_off (stack_pointer_rtx,
+ frame_pointer_rtx));
+ }
+ else if (!AVR_XMEGA && cfun->machine->is_interrupt)
+diff --git a/gcc/function.c b/gcc/function.c
+index d582fea..9c71ae0 100644
+--- a/gcc/function.c
++++ b/gcc/function.c
+@@ -4936,6 +4936,14 @@ contains (const_rtx insn, VEC(int,heap) **vec)
+ }
+
+ int
++prologue_contains (const_rtx insn)
++{
++ if (contains (insn, &prologue))
++ return 1;
++ return 0;
++}
++
++int
+ prologue_epilogue_contains (const_rtx insn)
+ {
+ if (contains (insn, &prologue))
+diff --git a/gcc/rtl.h b/gcc/rtl.h
+index 838146b..38bcc13 100644
+--- a/gcc/rtl.h
++++ b/gcc/rtl.h
+@@ -2181,6 +2181,7 @@ extern void print_inline_rtx (FILE *, const_rtx, int);
+
+ /* In function.c */
+ extern void reposition_prologue_and_epilogue_notes (void);
++extern int prologue_contains (const_rtx);
+ extern int prologue_epilogue_contains (const_rtx);
+ extern int sibcall_epilogue_contains (const_rtx);
+ extern void mark_temp_addr_taken (rtx);
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0009-Adds-AVR-builtin-functions.patch b/patches/gcc-4.4.3/atmel/0009-Adds-AVR-builtin-functions.patch
new file mode 100644
index 0000000..8d93cd5
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0009-Adds-AVR-builtin-functions.patch
@@ -0,0 +1,611 @@
+From 6a39c24ac33a36787b8daa2cb76eda1200b565e4 Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Wed, 20 Apr 2011 22:48:45 +0200
+Subject: [PATCH 09/10] Adds AVR builtin functions
+
+Not yet committed patch written by Anatoly Sokolov and Eric
+Weddington.
+
+Assimilable post by Eric Weddington (2011-03-23):
+http://www.mail-archive.com/gcc-patches@gcc.gnu.org/msg01354.html
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/61-gcc-4.4.3-builtins-v6.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr.c | 244 +++++++++++++++++++++++++++++++++++++++++
+ gcc/config/avr/avr.md | 248 ++++++++++++++++++++++++++++++++++++++++--
+ gcc/config/avr/predicates.md | 5 +
+ 3 files changed, 485 insertions(+), 12 deletions(-)
+
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index 0175fae..edbfd17 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -30,6 +30,7 @@
+ #include "insn-config.h"
+ #include "conditions.h"
+ #include "insn-attr.h"
++#include "insn-codes.h"
+ #include "flags.h"
+ #include "reload.h"
+ #include "tree.h"
+@@ -39,7 +40,9 @@
+ #include "obstack.h"
+ #include "function.h"
+ #include "recog.h"
++#include "optabs.h"
+ #include "ggc.h"
++#include "langhooks.h"
+ #include "tm_p.h"
+ #include "target.h"
+ #include "target-def.h"
+@@ -92,6 +95,8 @@ static bool avr_rtx_costs (rtx, int, int, int *, bool);
+ static int avr_address_cost (rtx, bool);
+ static bool avr_return_in_memory (const_tree, const_tree);
+ static struct machine_function * avr_init_machine_status (void);
++static void avr_init_builtins (void);
++static rtx avr_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
+ static rtx avr_builtin_setjmp_frame_value (void);
+ static bool avr_hard_regno_scratch_ok (unsigned int);
+
+@@ -477,6 +482,12 @@ int avr_case_values_threshold = 30000;
+ #undef TARGET_HARD_REGNO_SCRATCH_OK
+ #define TARGET_HARD_REGNO_SCRATCH_OK avr_hard_regno_scratch_ok
+
++#undef TARGET_INIT_BUILTINS
++#define TARGET_INIT_BUILTINS avr_init_builtins
++
++#undef TARGET_EXPAND_BUILTIN
++#define TARGET_EXPAND_BUILTIN avr_expand_builtin
++
+ #undef TARGET_SCALAR_MODE_SUPPORTED_P
+ #define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
+
+@@ -7459,4 +7470,237 @@ avr_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+ return false;
+ }
+
++/* Codes for all the AVR builtins. */
++
++enum avr_builtins
++{
++ AVR_BUILTIN_SEI,
++ AVR_BUILTIN_CLI,
++ AVR_BUILTIN_WDR,
++ AVR_BUILTIN_SLEEP,
++ AVR_BUILTIN_SWAP,
++ AVR_BUILTIN_FMUL,
++ AVR_BUILTIN_FMULS,
++ AVR_BUILTIN_FMULSU,
++ AVR_BUILTIN_DELAY_CYCLES
++};
++
++#define def_builtin(NAME, TYPE, CODE) \
++do { \
++ add_builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
++ NULL, NULL_TREE); \
++} while (0)
++
++/* Set up all builtin functions for this target. */
++
++static void
++avr_init_builtins (void)
++{
++ tree void_ftype_void
++ = build_function_type (void_type_node, void_list_node);
++ tree uchar_ftype_uchar
++ = build_function_type_list (unsigned_char_type_node,
++ unsigned_char_type_node,
++ NULL_TREE);
++ tree uint_ftype_uchar_uchar
++ = build_function_type_list (unsigned_type_node,
++ unsigned_char_type_node,
++ unsigned_char_type_node,
++ NULL_TREE);
++ tree int_ftype_char_char
++ = build_function_type_list (integer_type_node,
++ char_type_node,
++ char_type_node,
++ NULL_TREE);
++ tree int_ftype_char_uchar
++ = build_function_type_list (integer_type_node,
++ char_type_node,
++ unsigned_char_type_node,
++ NULL_TREE);
++ tree void_ftype_ulong
++ = build_function_type_list (void_type_node,
++ long_unsigned_type_node,
++ NULL_TREE);
++
++ def_builtin ("__builtin_avr_sei", void_ftype_void, AVR_BUILTIN_SEI);
++ def_builtin ("__builtin_avr_cli", void_ftype_void, AVR_BUILTIN_CLI);
++ def_builtin ("__builtin_avr_wdr", void_ftype_void, AVR_BUILTIN_WDR);
++ def_builtin ("__builtin_avr_sleep", void_ftype_void, AVR_BUILTIN_SLEEP);
++
++ if (AVR_HAVE_MUL)
++ {
++ def_builtin ("__builtin_avr_fmul", uint_ftype_uchar_uchar,
++ AVR_BUILTIN_FMUL);
++ def_builtin ("__builtin_avr_fmuls", int_ftype_char_char,
++ AVR_BUILTIN_FMULS);
++ def_builtin ("__builtin_avr_fmulsu", int_ftype_char_uchar,
++ AVR_BUILTIN_FMULSU);
++ }
++
++ def_builtin ("__builtin_avr_swap", uchar_ftype_uchar, AVR_BUILTIN_SWAP);
++ def_builtin ("__builtin_avr_delay_cycles", void_ftype_ulong,
++ AVR_BUILTIN_DELAY_CYCLES);
++}
++
++struct builtin_description
++{
++ const enum insn_code icode;
++ const char *const name;
++ const enum avr_builtins code;
++};
++
++static const struct builtin_description bdesc_1arg[] =
++{
++ { CODE_FOR_swap, "__builtin_avr_swap", AVR_BUILTIN_SWAP }
++};
++
++static const struct builtin_description bdesc_2arg[] =
++{
++ { CODE_FOR_fmul, "__builtin_avr_fmul", AVR_BUILTIN_FMUL },
++ { CODE_FOR_fmuls, "__builtin_avr_fmuls", AVR_BUILTIN_FMULS },
++ { CODE_FOR_fmulsu, "__builtin_avr_fmulsu", AVR_BUILTIN_FMULSU }
++};
++
++/* Subroutine of avr_expand_builtin to take care of unop insns. */
++
++static rtx
++avr_expand_unop_builtin (enum insn_code icode, tree exp,
++ rtx target)
++{
++ rtx pat;
++ tree arg0 = CALL_EXPR_ARG (exp, 0);
++ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ enum machine_mode op0mode = GET_MODE (op0);
++ enum machine_mode tmode = insn_data[icode].operand[0].mode;
++ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
++
++ if (! target
++ || GET_MODE (target) != tmode
++ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ if (op0mode == SImode && mode0 == HImode)
++ {
++ op0mode = HImode;
++ op0 = gen_lowpart (HImode, op0);
++ }
++ gcc_assert (op0mode == mode0 || op0mode == VOIDmode);
++
++ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
++ op0 = copy_to_mode_reg (mode0, op0);
++
++ pat = GEN_FCN (icode) (target, op0);
++ if (! pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
++
++/* Subroutine of avr_expand_builtin to take care of binop insns. */
++
++static rtx
++avr_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
++{
++ rtx pat;
++ tree arg0 = CALL_EXPR_ARG (exp, 0);
++ tree arg1 = CALL_EXPR_ARG (exp, 1);
++ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ enum machine_mode op0mode = GET_MODE (op0);
++ enum machine_mode op1mode = GET_MODE (op1);
++ enum machine_mode tmode = insn_data[icode].operand[0].mode;
++ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
++ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
++
++ if (! target
++ || GET_MODE (target) != tmode
++ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ if ((op0mode == SImode || op0mode == VOIDmode) && mode0 == HImode)
++ {
++ op0mode = HImode;
++ op0 = gen_lowpart (HImode, op0);
++ }
++ if ((op1mode == SImode || op1mode == VOIDmode) && mode1 == HImode)
++ {
++ op1mode = HImode;
++ op1 = gen_lowpart (HImode, op1);
++ }
++ /* In case the insn wants input operands in modes different from
++ the result, abort. */
++ gcc_assert ((op0mode == mode0 || op0mode == VOIDmode)
++ && (op1mode == mode1 || op1mode == VOIDmode));
++
++ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
++ op0 = copy_to_mode_reg (mode0, op0);
++ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
++ op1 = copy_to_mode_reg (mode1, op1);
++
++ pat = GEN_FCN (icode) (target, op0, op1);
++ if (! pat)
++ return 0;
++
++ emit_insn (pat);
++ return target;
++}
++
++/* Expand an expression EXP that calls a built-in function,
++ with result going to TARGET if that's convenient
++ (and in mode MODE if that's convenient).
++ SUBTARGET may be used as the target for computing one of EXP's operands.
++ IGNORE is nonzero if the value is to be ignored. */
++
++static rtx
++avr_expand_builtin (tree exp, rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ size_t i;
++ const struct builtin_description *d;
++ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
++ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
++ rtx pat;
++ tree arg0;
++ rtx op0;
++
++ switch (fcode)
++ {
++ case AVR_BUILTIN_SEI:
++ emit_insn (gen_enable_interrupt ());
++ return 0;
++ case AVR_BUILTIN_CLI:
++ emit_insn (gen_disable_interrupt ());
++ return 0;
++ case AVR_BUILTIN_WDR:
++ emit_insn (gen_wdr ());
++ return 0;
++ case AVR_BUILTIN_SLEEP:
++ emit_insn (gen_sleep ());
++ return 0;
++ case AVR_BUILTIN_DELAY_CYCLES:
++ {
++ arg0 = CALL_EXPR_ARG (exp, 0);
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++
++ if (!CONSTANT_P (op0))
++ error ("__builtin_avr_delay_cycles expects an integer constant.");
++
++ emit_insn (gen_delay_cycles (op0));
++ return 0;
++ }
++ }
++
++ for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
++ if (d->code == fcode)
++ return avr_expand_unop_builtin (d->icode, exp, target);
++
++ for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
++ if (d->code == fcode)
++ return avr_expand_binop_builtin (d->icode, exp, target);
++
++ gcc_unreachable ();
++}
++
+ #include "gt-avr.h"
+diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md
+index 96ef48e..45426ce 100644
+--- a/gcc/config/avr/avr.md
++++ b/gcc/config/avr/avr.md
+@@ -49,14 +49,29 @@
+
+ (UNSPEC_STRLEN 0)
+ (UNSPEC_INDEX_JMP 1)
+- (UNSPEC_SEI 2)
+- (UNSPEC_CLI 3)
++ (UNSPEC_SWAP 2)
++ (UNSPEC_FMUL 3)
++ (UNSPEC_FMULS 4)
++ (UNSPEC_FMULSU 5)
++
+
+ (UNSPECV_PROLOGUE_SAVES 0)
+ (UNSPECV_EPILOGUE_RESTORES 1)
+ (UNSPECV_WRITE_SP_IRQ_ON 2)
+ (UNSPECV_WRITE_SP_IRQ_OFF 3)
+- (UNSPECV_GOTO_RECEIVER 4)])
++ (UNSPECV_GOTO_RECEIVER 4)
++ (UNSPECV_SEI 5)
++ (UNSPECV_CLI 6)
++ (UNSPECV_NOP 7)
++ (UNSPECV_NOP2 8)
++ (UNSPECV_SLEEP 9)
++ (UNSPECV_WDR 10)
++
++ (UNSPECV_DELAY_CYCLES 100)
++ (UNSPECV_DELAY_CYCLES_1 101)
++ (UNSPECV_DELAY_CYCLES_2 102)
++ (UNSPECV_DELAY_CYCLES_3 103)
++ (UNSPECV_DELAY_CYCLES_4 104)])
+
+ (include "predicates.md")
+ (include "constraints.md")
+@@ -2888,13 +2903,6 @@
+ (const_int 1))
+ (const_int 3)])])
+
+-(define_insn "nop"
+- [(const_int 0)]
+- ""
+- "nop"
+- [(set_attr "cc" "none")
+- (set_attr "length" "1")])
+-
+ ; indirect jump
+ (define_insn "indirect_jump"
+ [(set (pc) (match_operand:HI 0 "register_operand" "!z,*r"))]
+@@ -3274,7 +3282,7 @@
+
+ ;; Enable Interrupts
+ (define_insn "enable_interrupt"
+- [(unspec [(const_int 0)] UNSPEC_SEI)]
++ [(unspec_volatile [(const_int 0)] UNSPECV_SEI)]
+ ""
+ "sei"
+ [(set_attr "length" "1")
+@@ -3283,7 +3291,7 @@
+
+ ;; Disable Interrupts
+ (define_insn "disable_interrupt"
+- [(unspec [(const_int 0)] UNSPEC_CLI)]
++ [(unspec_volatile [(const_int 0)] UNSPECV_CLI)]
+ ""
+ "cli"
+ [(set_attr "length" "1")
+@@ -3383,3 +3391,219 @@
+ expand_epilogue ();
+ DONE;
+ }")
++
++;;delay_cycles_delay_cycles_delay_cycles_delay_cycles_delay_cycles_delay
++;; delay_cycles
++
++(define_expand "delay_cycles"
++ [(unspec_volatile [(match_operand:SI 0 "const_int_operand" "i")]
++ UNSPECV_DELAY_CYCLES)]
++ ""
++ "
++ rtx loop_reg;
++ unsigned int cycles = INTVAL (operands[0]);
++ if (IN_RANGE(cycles, 83886082, 0xFFFFFFFF))
++ {
++ unsigned int loop_count = ((cycles - 9) / 6) + 1;
++ unsigned int cycles_used = (((loop_count - 1) * 6) + 9);
++ emit_insn (gen_delay_cycles_4 (gen_int_mode (loop_count, SImode)));
++ cycles -= cycles_used;
++ }
++ if (IN_RANGE(cycles, 262145, 83886081))
++ {
++ unsigned int loop_count = ((cycles - 7) / 5) + 1;
++ if (loop_count > 0xFFFFFF)
++ loop_count = 0xFFFFFF;
++ unsigned int cycles_used = (((loop_count - 1) * 5) + 7);
++ emit_insn (gen_delay_cycles_3 (gen_int_mode (loop_count, SImode)));
++ cycles -= cycles_used;
++ }
++ if (IN_RANGE(cycles, 768, 262144))
++ {
++ unsigned int loop_count = ((cycles - 5) / 4) + 1;
++ if (loop_count > 0xFFFF)
++ loop_count = 0xFFFF;
++ unsigned int cycles_used = (((loop_count - 1) * 4) + 5);
++ emit_insn (gen_delay_cycles_2 (gen_int_mode (loop_count, HImode)));
++ cycles -= cycles_used;
++ }
++ if (IN_RANGE(cycles, 6, 767))
++ {
++ unsigned int loop_count = (cycles/ 3);
++ if (loop_count > 255)
++ loop_count = 255;
++ unsigned int cycles_used = (loop_count * 3);
++ emit_insn (gen_delay_cycles_1 (gen_int_mode (loop_count, QImode)));
++ cycles -= cycles_used;
++ }
++ while (cycles >= 2)
++ {
++ emit_insn (gen_nop2 ());
++ cycles -= 2;
++ }
++ if (cycles == 1)
++ {
++ emit_insn (gen_nop ());
++ cycles--;
++ }
++ DONE;
++ ")
++
++(define_insn "delay_cycles_1"
++[(unspec_volatile [(const_int 0)] UNSPECV_DELAY_CYCLES_1)
++ (match_operand:QI 0 "immediate_operand" "")
++ (clobber (match_scratch:QI 1 "=&d"))]
++ ""
++ " ldi %1,lo8(%0)
++ 1:dec %1
++ brne 1b"
++ [(set_attr "length" "3")
++ (set_attr "cc" "clobber")])
++
++(define_insn "delay_cycles_2"
++ [(unspec_volatile [(const_int 0)] UNSPECV_DELAY_CYCLES_2)
++ (match_operand:HI 0 "immediate_operand" "")
++ (clobber (match_scratch:HI 1 "=&w"))]
++ ""
++ " ldi %A1,lo8(%0)
++ ldi %B1,hi8(%0)
++ 1:sbiw %A1,1
++ brne 1b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "clobber")])
++
++(define_insn "delay_cycles_3"
++ [(unspec_volatile [(const_int 0)] UNSPECV_DELAY_CYCLES_3)
++ (match_operand:SI 0 "immediate_operand" "")
++ (clobber (match_scratch:SI 1 "=&d"))]
++ ""
++ " ldi %A1,lo8(%0)
++ ldi %B1,hi8(%0)
++ ldi %C1,hlo8(%0)
++ 1:subi %A1,1
++ sbci %B1,0
++ sbci %C1,0
++ brne 1b"
++ [(set_attr "length" "7")
++ (set_attr "cc" "clobber")])
++
++(define_insn "delay_cycles_4"
++ [(unspec_volatile [(const_int 0)] UNSPECV_DELAY_CYCLES_4)
++ (match_operand:SI 0 "immediate_operand" "")
++ (clobber (match_scratch:SI 1 "=&d"))]
++ ""
++ " ldi %A1,lo8(%0)
++ ldi %B1,hi8(%0)
++ ldi %C1,hlo8(%0)
++ ldi %D1,hhi8(%0)
++ 1:subi %A1,1
++ sbci %B1,0
++ sbci %C1,0
++ sbci %D1,0
++ brne 1b"
++ [(set_attr "length" "9")
++ (set_attr "cc" "clobber")])
++
++;; CPU instructions
++
++;; NOP
++(define_insn "nop"
++ [(unspec_volatile [(const_int 0)] UNSPECV_NOP)]
++ ""
++ "nop"
++ [(set_attr "length" "1")
++ (set_attr "cc" "none")])
++
++;; NOP2
++(define_insn "nop2"
++ [(unspec_volatile [(const_int 0)] UNSPECV_NOP2)]
++ ""
++ "rjmp ."
++ [(set_attr "length" "1")
++ (set_attr "cc" "none")])
++
++;; SEI, Enable Interrupts
++;(define_insn "sei"
++; [(unspec_volatile [(const_int 0)] UNSPECV_SEI)]
++; ""
++; "sei"
++; [(set_attr "length" "1")
++; (set_attr "cc" "none")
++; ])
++
++;; CLI, Disable Interrupts
++;(define_insn "cli"
++; [(unspec_volatile [(const_int 0)] UNSPECV_CLI)]
++; ""
++; "cli"
++; [(set_attr "length" "1")
++; (set_attr "cc" "none")
++; ])
++
++;; SLEEP
++(define_insn "sleep"
++ [(unspec_volatile [(const_int 0)] UNSPECV_SLEEP)]
++ ""
++ "sleep"
++ [(set_attr "length" "1")
++ (set_attr "cc" "none")
++ ])
++
++;; WDR
++(define_insn "wdr"
++ [(unspec_volatile [(const_int 0)] UNSPECV_WDR)]
++ ""
++ "wdr"
++ [(set_attr "length" "1")
++ (set_attr "cc" "none")
++ ])
++
++;; SWAP
++(define_insn "swap"
++ [(set (match_operand:QI 0 "register_operand" "=r")
++ (unspec:QI [(match_operand:QI 1 "register_operand" "0")]
++ UNSPEC_SWAP))]
++ ""
++ "swap %0"
++ [(set_attr "length" "1")
++ (set_attr "cc" "none")])
++
++;; FMUL
++(define_insn "fmul"
++ [(set (match_operand:HI 0 "a_register_operand" "=r")
++ (unspec:HI [(match_operand:QI 1 "a_register_operand" "r")
++ (match_operand:QI 2 "a_register_operand" "r")]
++ UNSPEC_FMUL))]
++ "AVR_HAVE_MUL"
++ "fmul %1,%2
++ movw %0,r0
++ clr r1"
++ [(set_attr "length" "3")
++ (set_attr "cc" "clobber")])
++
++;; FMULS
++(define_insn "fmuls"
++ [(set (match_operand:HI 0 "a_register_operand" "=r")
++ (unspec:HI [(match_operand:QI 1 "a_register_operand" "r")
++ (match_operand:QI 2 "a_register_operand" "r")]
++ UNSPEC_FMULS))]
++ "AVR_HAVE_MUL"
++ "fmuls %1,%2
++ movw %0,r0
++ clr r1"
++ [(set_attr "length" "3")
++ (set_attr "cc" "clobber")])
++
++;; FMULSU
++(define_insn "fmulsu"
++ [(set (match_operand:HI 0 "a_register_operand" "=r")
++ (unspec:HI [(match_operand:QI 1 "a_register_operand" "r")
++ (match_operand:QI 2 "a_register_operand" "r")]
++ UNSPEC_FMULSU))]
++ "AVR_HAVE_MUL"
++ "fmulsu %1,%2
++ movw %0,r0
++ clr r1"
++ [(set_attr "length" "3")
++ (set_attr "cc" "clobber")])
++
+diff --git a/gcc/config/avr/predicates.md b/gcc/config/avr/predicates.md
+index 3ff85fd..f3f181c 100755
+--- a/gcc/config/avr/predicates.md
++++ b/gcc/config/avr/predicates.md
+@@ -27,6 +27,11 @@
+ (and (match_code "reg")
+ (match_test "REGNO (op) >= 16 && REGNO (op) <= 31")))
+
++;; Registers from r16 to 24.
++(define_predicate "a_register_operand"
++ (and (match_code "reg")
++ (match_test "REGNO (op) >= 16 && REGNO (op) <= 24")))
++
+ (define_predicate "even_register_operand"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) <= 31")
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/atmel/0010-Disable-fixed-point-support-for-avrtiny10-family.patch b/patches/gcc-4.4.3/atmel/0010-Disable-fixed-point-support-for-avrtiny10-family.patch
new file mode 100644
index 0000000..e3ef43b
--- /dev/null
+++ b/patches/gcc-4.4.3/atmel/0010-Disable-fixed-point-support-for-avrtiny10-family.patch
@@ -0,0 +1,84 @@
+From 5a7ed15db57921c434b1b3002ecd6dfaf61d8d7e Mon Sep 17 00:00:00 2001
+From: Stephan Linz <linz@li-pro.net>
+Date: Wed, 27 Apr 2011 19:51:30 +0200
+Subject: [PATCH 10/10] Disable fixed point support for avrtiny10 family
+
+Not yet committed patch written by Eric Weddington.
+
+Atmel adds preliminary support for fixed point arithmetic. This is
+not supported for all the devices, even for avrtiny10 familiy.
+
+Original ATMEL patch from:
+http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/62-gcc-4.4.3-avrtiny10-non-fixedpoint.patch
+
+Signed-off-by: Stephan Linz <linz@li-pro.net>
+---
+ gcc/config/avr/avr.c | 19 ++++++++++++++++---
+ gcc/config/fixed-bit.c | 4 ++--
+ 2 files changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
+index edbfd17..f7c6256 100644
+--- a/gcc/config/avr/avr.c
++++ b/gcc/config/avr/avr.c
+@@ -429,6 +429,16 @@ avr_scalar_mode_supported_p (enum machine_mode mode)
+ return default_scalar_mode_supported_p (mode);
+ }
+
++/* Implement TARGET_FIXED_POINT_SUPPORTED_P. */
++static bool
++avr_fixed_point_supported_p ()
++{
++ if (AVR_TINY)
++ return false;
++
++ return default_fixed_point_supported_p ();
++}
++
+ int avr_case_values_threshold = 30000;
+
+ /* Initialize the GCC target structure. */
+@@ -482,15 +492,18 @@ int avr_case_values_threshold = 30000;
+ #undef TARGET_HARD_REGNO_SCRATCH_OK
+ #define TARGET_HARD_REGNO_SCRATCH_OK avr_hard_regno_scratch_ok
+
++#undef TARGET_SCALAR_MODE_SUPPORTED_P
++#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
++
++#undef TARGET_FIXED_POINT_SUPPORTED_P
++#define TARGET_FIXED_POINT_SUPPORTED_P avr_fixed_point_supported_p
++
+ #undef TARGET_INIT_BUILTINS
+ #define TARGET_INIT_BUILTINS avr_init_builtins
+
+ #undef TARGET_EXPAND_BUILTIN
+ #define TARGET_EXPAND_BUILTIN avr_expand_builtin
+
+-#undef TARGET_SCALAR_MODE_SUPPORTED_P
+-#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
+-
+ struct gcc_target targetm = TARGET_INITIALIZER;
+
+ void
+diff --git a/gcc/config/fixed-bit.c b/gcc/config/fixed-bit.c
+index 40ac2e2..3513cf9 100644
+--- a/gcc/config/fixed-bit.c
++++ b/gcc/config/fixed-bit.c
+@@ -41,7 +41,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ Floating-point: SF, DF
+ Ex: If we define FROM_QQ and TO_SI, the conversion from QQ to SI is
+ generated. */
+-
++#if !defined (__AVR_TINY__)
+ #include "tconfig.h"
+ #include "tsystem.h"
+ #include "coretypes.h"
+@@ -1213,4 +1213,4 @@ SATFRACT (FROM_FLOAT_C_TYPE a)
+ return c;
+ }
+ #endif /* defined(SATFRACT) && FROM_TYPE == 3 && TO_TYPE == 4 */
+-
++#endif /* __AVR_TINY__ */
+--
+1.6.0.4
+
diff --git a/patches/gcc-4.4.3/series b/patches/gcc-4.4.3/series
new file mode 100644
index 0000000..269f61f
--- /dev/null
+++ b/patches/gcc-4.4.3/series
@@ -0,0 +1,12 @@
+# Atmel's patch set reviewed and pulled from:
+# http://distribute.atmel.no/tools/opensource/avr-gcc/gcc-4.4.3/
+atmel/0001-Add-support-for-fixed-point-operations.patch
+atmel/0002-Fix-incomplete-check-in-RTL-for-pm-annotation.patch
+atmel/0003-Fix-handling-of-empty-.data-or-.bss-section.patch
+atmel/0004-Add-support-for-XMEGA-devices.patch
+atmel/0005-Add-remove-support-for-devices.patch
+atmel/0006-Add-support-for-more-XMEGA-devices.patch
+atmel/0007-Add-support-for-devices-with-16-gp-registers.patch
+atmel/0008-Adds-OS_main-attribute-feature.patch
+atmel/0009-Adds-AVR-builtin-functions.patch
+atmel/0010-Disable-fixed-point-support-for-avrtiny10-family.patch