From 01c3f0a42a2a0ff0c3fed80a1a25f2641ae72554 Mon Sep 17 00:00:00 2001
From: Guenter Roeck <linux@roeck-us.net>
Date: Sun, 10 Sep 2017 13:44:47 -0700
Subject: [PATCH 01/14] sparc64: mmu_context: Add missing include files

Fix the following build errors.

In file included from arch/sparc/include/asm/mmu_context.h:4:0,
                 from include/linux/mmu_context.h:4,
		 from drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h:29,
		 from drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c:23:
arch/sparc/include/asm/mmu_context_64.h:22:37: error:
	unknown type name 'per_cpu_secondary_mm'

arch/sparc/include/asm/mmu_context_64.h: In function 'switch_mm':
arch/sparc/include/asm/mmu_context_64.h:79:2: error:
	implicit declaration of function 'smp_processor_id'

Fixes: 70539bd79500 ("drm/amd: Update MEC HQD loading code for KFD")
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
Acked-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/include/asm/mmu_context_64.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 87841d687f8d..a34315e1f0ed 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -7,9 +7,11 @@
 
 #include <linux/spinlock.h>
 #include <linux/mm_types.h>
+#include <linux/smp.h>
 
 #include <asm/spitfire.h>
 #include <asm-generic/mm_hooks.h>
+#include <asm/percpu.h>
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {

From 92c807067e9ade7d41f453befe560a362e95be7f Mon Sep 17 00:00:00 2001
From: Corentin Labbe <clabbe.montjoie@gmail.com>
Date: Mon, 18 Sep 2017 19:58:21 +0200
Subject: [PATCH 02/14] sparc: time: Remove unneeded linux/miscdevice.h include

arch/sparc/kernel/time_64.c does not contain any miscdevice so the
inclusion of linux/miscdevice.h is unnecessary.

Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/kernel/time_64.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 564f0e46ffd4..5c8524ff09d9 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -27,7 +27,6 @@
 #include <linux/jiffies.h>
 #include <linux/cpufreq.h>
 #include <linux/percpu.h>
-#include <linux/miscdevice.h>
 #include <linux/rtc/m48t59.h>
 #include <linux/kernel_stat.h>
 #include <linux/clockchips.h>

From da61e7308df868c20a41861c0a11f6eefd822993 Mon Sep 17 00:00:00 2001
From: Corentin Labbe <clabbe.montjoie@gmail.com>
Date: Mon, 18 Sep 2017 20:10:36 +0200
Subject: [PATCH 03/14] sbus: char: Move D7S_MINOR to
 include/linux/miscdevice.h

This patch move the define for D7S_MINOR to include/linux/miscdevice.h.
It's better that all minor number are in the same place.

Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 drivers/sbus/char/display7seg.c | 1 -
 include/linux/miscdevice.h      | 1 +
 2 files changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index f32765d3cbd8..5c8ed7350a04 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -22,7 +22,6 @@
 
 #include <asm/display7seg.h>
 
-#define D7S_MINOR	193
 #define DRIVER_NAME	"d7s"
 #define PFX		DRIVER_NAME ": "
 
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 58751eae5f77..05c3892b3e92 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -35,6 +35,7 @@
 #define HWRNG_MINOR		183
 #define MICROCODE_MINOR		184
 #define IRNET_MINOR		187
+#define D7S_MINOR		193
 #define VFIO_MINOR		196
 #define TUN_MINOR		200
 #define CUSE_MINOR		203

From 23198ddffb6cddb5d5824230af4dd4b46e4046a4 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@davemloft.net>
Date: Wed, 27 Sep 2017 22:38:19 -0700
Subject: [PATCH 04/14] sparc32: Add cmpxchg64().

This fixes the build with i40e driver enabled.

Reported-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/include/asm/cmpxchg_32.h |  3 +++
 arch/sparc/lib/atomic32.c           | 14 ++++++++++++++
 2 files changed, 17 insertions(+)

diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index 83ffb83c5397..a0101a58fd3f 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -62,6 +62,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
 			(unsigned long)_n_, sizeof(*(ptr)));		\
 })
 
+u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
+#define cmpxchg64(ptr, old, new)	__cmpxchg_u64(ptr, old, new)
+
 #include <asm-generic/cmpxchg-local.h>
 
 /*
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 2c373329d5cb..ddacb5aeb424 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -172,6 +172,20 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
 }
 EXPORT_SYMBOL(__cmpxchg_u32);
 
+u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
+{
+	unsigned long flags;
+	u64 prev;
+
+	spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
+	if ((prev = *ptr) == old)
+		*ptr = new;
+	spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
+
+	return prev;
+}
+EXPORT_SYMBOL(__cmpxchg_u64);
+
 unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
 {
 	unsigned long flags;

From 9a08862a5d2e266ecea1865547463da2745fc687 Mon Sep 17 00:00:00 2001
From: Nagarathnam Muthusamy <nagarathnam.muthusamy@oracle.com>
Date: Thu, 21 Sep 2017 11:05:31 -0400
Subject: [PATCH 05/14] vDSO for sparc

Following patch is based on work done by Nick Alcock on 64-bit vDSO for sparc
in Oracle linux. I have extended it to include support for 32-bit vDSO for sparc
on 64-bit kernel.

vDSO for sparc is based on the X86 implementation. This patch
provides vDSO support for both 64-bit and 32-bit programs on 64-bit kernel.
vDSO will be disabled on 32-bit linux kernel on sparc.

*) vclock_gettime.c contains all the vdso functions. Since data page is mapped
   before the vdso code page, the pointer to data page is got by subracting offset
   from an address in the vdso code page. The return address stored in
   %i7 is used for this purpose.
*) During compilation, both 32-bit and 64-bit vdso images are compiled and are
   converted into raw bytes by vdso2c program to be ready for mapping into the
   process. 32-bit images are compiled only if CONFIG_COMPAT is enabled. vdso2c
   generates two files vdso-image-64.c and vdso-image-32.c which contains the
   respective vDSO image in C structure.
*) During vdso initialization, required number of vdso pages are allocated and
   raw bytes are copied into the pages.
*) During every exec, these pages are mapped into the process through
   arch_setup_additional_pages and the location of mapping is passed on to the
   process through aux vector AT_SYSINFO_EHDR which is used by glibc.
*) A new update_vsyscall routine for sparc is added to keep the data page in
   vdso updated.
*) As vDSO cannot contain dynamically relocatable references, a new version of
   cpu_relax is added for the use of vDSO.

This change also requires a putback to glibc to use vDSO. For testing,
programs planning to try vDSO can be compiled against the generated
vdso(64/32).so in the source.

Testing:

========
[root@localhost ~]# cat vdso_test.c
int main() {
        struct timespec tv_start, tv_end;
        struct timeval tv_tmp;
	int i;
        int count = 1 * 1000 * 10000;
	long long diff;

        clock_gettime(0, &tv_start);
        for (i = 0; i < count; i++)
              gettimeofday(&tv_tmp, NULL);
        clock_gettime(0, &tv_end);
        diff = (long long)(tv_end.tv_sec -
		tv_start.tv_sec)*(1*1000*1000*1000);
        diff += (tv_end.tv_nsec - tv_start.tv_nsec);
	printf("Start sec: %d\n", tv_start.tv_sec);
	printf("End sec  : %d\n", tv_end.tv_sec);
        printf("%d cycles in %lld ns = %f ns/cycle\n", count, diff,
		(double)diff / (double)count);
        return 0;
}

[root@localhost ~]# cc vdso_test.c -o t32_without_fix -m32 -lrt
[root@localhost ~]# ./t32_without_fix
Start sec: 1502396130
End sec  : 1502396140
10000000 cycles in 9565148528 ns = 956.514853 ns/cycle
[root@localhost ~]# cc vdso_test.c -o t32_with_fix -m32 ./vdso32.so.dbg
[root@localhost ~]# ./t32_with_fix
Start sec: 1502396168
End sec  : 1502396169
10000000 cycles in 798141262 ns = 79.814126 ns/cycle
[root@localhost ~]# cc vdso_test.c -o t64_without_fix -m64 -lrt
[root@localhost ~]# ./t64_without_fix
Start sec: 1502396208
End sec  : 1502396218
10000000 cycles in 9846091800 ns = 984.609180 ns/cycle
[root@localhost ~]# cc vdso_test.c -o t64_with_fix -m64 ./vdso64.so.dbg
[root@localhost ~]# ./t64_with_fix
Start sec: 1502396257
End sec  : 1502396257
10000000 cycles in 380984048 ns = 38.098405 ns/cycle

V1 to V2 Changes:
=================
	Added hot patching code to switch the read stick instruction to read
tick instruction based on the hardware.

V2 to V3 Changes:
=================
	Merged latest changes from sparc-next and moved the initialization
of clocksource_tick.archdata.vclock_mode to time_init_early. Disabled
queued spinlock and rwlock configuration when simulating 32-bit config
to compile 32-bit VDSO.

V3 to V4 Changes:
=================
	Hardcoded the page size as 8192 in linker script for both 64-bit and
32-bit binaries. Removed unused variables in vdso2c.h. Added -mv8plus flag to
Makefile to prevent the generation of relocation entries for __lshrdi3 in 32-bit
vdso binary.

Signed-off-by: Nick Alcock <nick.alcock@oracle.com>
Signed-off-by: Nagarathnam Muthusamy <nagarathnam.muthusamy@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/Kbuild                       |   1 +
 arch/sparc/Kconfig                      |   2 +
 arch/sparc/Makefile                     |   4 +
 arch/sparc/include/asm/clocksource.h    |  17 ++
 arch/sparc/include/asm/elf_64.h         |  14 ++
 arch/sparc/include/asm/mmu_64.h         |   1 +
 arch/sparc/include/asm/processor_64.h   |   8 +
 arch/sparc/include/asm/vdso.h           |  24 +++
 arch/sparc/include/asm/vvar.h           |  74 +++++++
 arch/sparc/include/uapi/asm/auxvec.h    |   4 +
 arch/sparc/kernel/Makefile              |   1 +
 arch/sparc/kernel/time_64.c             |  11 +-
 arch/sparc/kernel/vdso.c                |  70 +++++++
 arch/sparc/vdso/.gitignore              |   3 +
 arch/sparc/vdso/Makefile                | 149 +++++++++++++
 arch/sparc/vdso/vclock_gettime.c        | 264 +++++++++++++++++++++++
 arch/sparc/vdso/vdso-layout.lds.S       | 104 +++++++++
 arch/sparc/vdso/vdso-note.S             |  12 ++
 arch/sparc/vdso/vdso.lds.S              |  25 +++
 arch/sparc/vdso/vdso2c.c                | 234 +++++++++++++++++++++
 arch/sparc/vdso/vdso2c.h                | 143 +++++++++++++
 arch/sparc/vdso/vdso32/.gitignore       |   1 +
 arch/sparc/vdso/vdso32/vclock_gettime.c |  26 +++
 arch/sparc/vdso/vdso32/vdso-note.S      |  12 ++
 arch/sparc/vdso/vdso32/vdso32.lds.S     |  24 +++
 arch/sparc/vdso/vma.c                   | 268 ++++++++++++++++++++++++
 26 files changed, 1494 insertions(+), 2 deletions(-)
 create mode 100644 arch/sparc/include/asm/clocksource.h
 create mode 100644 arch/sparc/include/asm/vdso.h
 create mode 100644 arch/sparc/include/asm/vvar.h
 create mode 100644 arch/sparc/kernel/vdso.c
 create mode 100644 arch/sparc/vdso/.gitignore
 create mode 100644 arch/sparc/vdso/Makefile
 create mode 100644 arch/sparc/vdso/vclock_gettime.c
 create mode 100644 arch/sparc/vdso/vdso-layout.lds.S
 create mode 100644 arch/sparc/vdso/vdso-note.S
 create mode 100644 arch/sparc/vdso/vdso.lds.S
 create mode 100644 arch/sparc/vdso/vdso2c.c
 create mode 100644 arch/sparc/vdso/vdso2c.h
 create mode 100644 arch/sparc/vdso/vdso32/.gitignore
 create mode 100644 arch/sparc/vdso/vdso32/vclock_gettime.c
 create mode 100644 arch/sparc/vdso/vdso32/vdso-note.S
 create mode 100644 arch/sparc/vdso/vdso32/vdso32.lds.S
 create mode 100644 arch/sparc/vdso/vma.c

diff --git a/arch/sparc/Kbuild b/arch/sparc/Kbuild
index 675afa285ddb..b4d0f570cc00 100644
--- a/arch/sparc/Kbuild
+++ b/arch/sparc/Kbuild
@@ -7,3 +7,4 @@ obj-y += mm/
 obj-y += math-emu/
 obj-y += net/
 obj-y += crypto/
+obj-$(CONFIG_SPARC64) += vdso/
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 0be3828752e5..cd28b6cd95a2 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -85,6 +85,8 @@ config SPARC64
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USE_QUEUED_SPINLOCKS
+	select GENERIC_TIME_VSYSCALL
+	select ARCH_CLOCKSOURCE_DATA
 
 config ARCH_DEFCONFIG
 	string
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 8496a074bd0e..0395b0de6cb4 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -80,6 +80,10 @@ install:
 archclean:
 	$(Q)$(MAKE) $(clean)=$(boot)
 
+PHONY += vdso_install
+vdso_install:
+	$(Q)$(MAKE) $(build)=arch/sparc/vdso $@
+
 # This is the image used for packaging
 KBUILD_IMAGE := $(boot)/zImage
 
diff --git a/arch/sparc/include/asm/clocksource.h b/arch/sparc/include/asm/clocksource.h
new file mode 100644
index 000000000000..d63ef224befe
--- /dev/null
+++ b/arch/sparc/include/asm/clocksource.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _ASM_SPARC_CLOCKSOURCE_H
+#define _ASM_SPARC_CLOCKSOURCE_H
+
+/* VDSO clocksources */
+#define VCLOCK_NONE   0  /* Nothing userspace can do. */
+#define VCLOCK_TICK   1  /* Use %tick.  */
+#define VCLOCK_STICK  2  /* Use %stick. */
+
+struct arch_clocksource_data {
+	int vclock_mode;
+};
+
+#endif /* _ASM_SPARC_CLOCKSOURCE_H */
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index 3f2d403873bd..1aa35a334a22 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -210,4 +210,18 @@ do {	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)	\
 			(current->personality & (~PER_MASK)));	\
 } while (0)
 
+extern unsigned int vdso_enabled;
+
+#define	ARCH_DLINFO							\
+do {									\
+	if (vdso_enabled)						\
+		NEW_AUX_ENT(AT_SYSINFO_EHDR,				\
+			    (unsigned long)current->mm->context.vdso);	\
+} while (0)
+
+struct linux_binprm;
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES	1
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+					int uses_interp);
 #endif /* !(__ASM_SPARC64_ELF_H) */
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index 83b36a5371ff..4142132dcb6b 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -96,6 +96,7 @@ typedef struct {
 	unsigned long		thp_pte_count;
 	struct tsb_config	tsb_block[MM_NUM_TSBS];
 	struct hv_tsb_descr	tsb_descr[MM_NUM_TSBS];
+	void			*vdso;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index f04dc5a43062..bcb3172abeda 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -199,6 +199,13 @@ unsigned long get_wchan(struct task_struct *task);
  * To make a long story short, we are trying to yield the current cpu
  * strand during busy loops.
  */
+#ifdef	BUILD_VDSO
+#define	cpu_relax()	asm volatile("\n99:\n\t"			\
+				     "rd	%%ccr, %%g0\n\t"	\
+				     "rd	%%ccr, %%g0\n\t"	\
+				     "rd	%%ccr, %%g0\n\t"	\
+				     ::: "memory")
+#else /* ! BUILD_VDSO */
 #define cpu_relax()	asm volatile("\n99:\n\t"			\
 				     "rd	%%ccr, %%g0\n\t"	\
 				     "rd	%%ccr, %%g0\n\t"	\
@@ -210,6 +217,7 @@ unsigned long get_wchan(struct task_struct *task);
 				     "nop\n\t"				\
 				     ".previous"			\
 				     ::: "memory")
+#endif
 
 /* Prefetch support.  This is tuned for UltraSPARC-III and later.
  * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
diff --git a/arch/sparc/include/asm/vdso.h b/arch/sparc/include/asm/vdso.h
new file mode 100644
index 000000000000..93b628731a5e
--- /dev/null
+++ b/arch/sparc/include/asm/vdso.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _ASM_SPARC_VDSO_H
+#define _ASM_SPARC_VDSO_H
+
+struct vdso_image {
+	void *data;
+	unsigned long size;   /* Always a multiple of PAGE_SIZE */
+	long sym_vvar_start;  /* Negative offset to the vvar area */
+	long sym_vread_tick; /* Start of vread_tick section */
+	long sym_vread_tick_patch_start; /* Start of tick read */
+	long sym_vread_tick_patch_end;   /* End of tick read */
+};
+
+#ifdef CONFIG_SPARC64
+extern const struct vdso_image vdso_image_64_builtin;
+#endif
+#ifdef CONFIG_COMPAT
+extern const struct vdso_image vdso_image_32_builtin;
+#endif
+
+#endif /* _ASM_SPARC_VDSO_H */
diff --git a/arch/sparc/include/asm/vvar.h b/arch/sparc/include/asm/vvar.h
new file mode 100644
index 000000000000..0289503d1cb0
--- /dev/null
+++ b/arch/sparc/include/asm/vvar.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _ASM_SPARC_VVAR_DATA_H
+#define _ASM_SPARC_VVAR_DATA_H
+
+#include <asm/clocksource.h>
+#include <linux/seqlock.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+struct vvar_data {
+	unsigned int seq;
+
+	int vclock_mode;
+	struct { /* extract of a clocksource struct */
+		u64	cycle_last;
+		u64	mask;
+		int	mult;
+		int	shift;
+	} clock;
+	/* open coded 'struct timespec' */
+	u64		wall_time_sec;
+	u64		wall_time_snsec;
+	u64		monotonic_time_snsec;
+	u64		monotonic_time_sec;
+	u64		monotonic_time_coarse_sec;
+	u64		monotonic_time_coarse_nsec;
+	u64		wall_time_coarse_sec;
+	u64		wall_time_coarse_nsec;
+
+	int		tz_minuteswest;
+	int		tz_dsttime;
+};
+
+extern struct vvar_data *vvar_data;
+extern int vdso_fix_stick;
+
+static inline unsigned int vvar_read_begin(const struct vvar_data *s)
+{
+	unsigned int ret;
+
+repeat:
+	ret = READ_ONCE(s->seq);
+	if (unlikely(ret & 1)) {
+		cpu_relax();
+		goto repeat;
+	}
+	smp_rmb(); /* Finish all reads before we return seq */
+	return ret;
+}
+
+static inline int vvar_read_retry(const struct vvar_data *s,
+					unsigned int start)
+{
+	smp_rmb(); /* Finish all reads before checking the value of seq */
+	return unlikely(s->seq != start);
+}
+
+static inline void vvar_write_begin(struct vvar_data *s)
+{
+	++s->seq;
+	smp_wmb(); /* Makes sure that increment of seq is reflected */
+}
+
+static inline void vvar_write_end(struct vvar_data *s)
+{
+	smp_wmb(); /* Makes the value of seq current before we increment */
+	++s->seq;
+}
+
+
+#endif /* _ASM_SPARC_VVAR_DATA_H */
diff --git a/arch/sparc/include/uapi/asm/auxvec.h b/arch/sparc/include/uapi/asm/auxvec.h
index ad6f360261f6..5f80a70cc901 100644
--- a/arch/sparc/include/uapi/asm/auxvec.h
+++ b/arch/sparc/include/uapi/asm/auxvec.h
@@ -1,4 +1,8 @@
 #ifndef __ASMSPARC_AUXVEC_H
 #define __ASMSPARC_AUXVEC_H
 
+#define AT_SYSINFO_EHDR		33
+
+#define AT_VECTOR_SIZE_ARCH	1
+
 #endif /* !(__ASMSPARC_AUXVEC_H) */
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index aac609889ee4..408071e675e8 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_SPARC32)   += systbls_32.o
 obj-y                   += time_$(BITS).o
 obj-$(CONFIG_SPARC32)   += windows.o
 obj-y                   += cpu.o
+obj-$(CONFIG_SPARC64)	+= vdso.o
 obj-$(CONFIG_SPARC32)   += devices.o
 obj-y                   += ptrace_$(BITS).o
 obj-y                   += unaligned_$(BITS).o
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 5c8524ff09d9..79fdf576dab0 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -52,6 +52,8 @@
 
 DEFINE_SPINLOCK(rtc_lock);
 
+unsigned int __read_mostly vdso_fix_stick;
+
 #ifdef CONFIG_SMP
 unsigned long profile_pc(struct pt_regs *regs)
 {
@@ -829,12 +831,17 @@ static void init_tick_ops(struct sparc64_tick_ops *ops)
 void __init time_init_early(void)
 {
 	if (tlb_type == spitfire) {
-		if (is_hummingbird())
+		if (is_hummingbird()) {
 			init_tick_ops(&hbtick_operations);
-		else
+			clocksource_tick.archdata.vclock_mode = VCLOCK_NONE;
+		} else {
 			init_tick_ops(&tick_operations);
+			clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
+			vdso_fix_stick = 1;
+		}
 	} else {
 		init_tick_ops(&stick_operations);
+		clocksource_tick.archdata.vclock_mode = VCLOCK_STICK;
 	}
 }
 
diff --git a/arch/sparc/kernel/vdso.c b/arch/sparc/kernel/vdso.c
new file mode 100644
index 000000000000..58880662b271
--- /dev/null
+++ b/arch/sparc/kernel/vdso.c
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ *  Copyright 2003 Andi Kleen, SuSE Labs.
+ *
+ *  Thanks to hpa@transmeta.com for some useful hint.
+ *  Special thanks to Ingo Molnar for his early experience with
+ *  a different vsyscall implementation for Linux/IA32 and for the name.
+ */
+
+#include <linux/seqlock.h>
+#include <linux/time.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/vvar.h>
+
+void update_vsyscall_tz(void)
+{
+	if (unlikely(vvar_data == NULL))
+		return;
+
+	vvar_data->tz_minuteswest = sys_tz.tz_minuteswest;
+	vvar_data->tz_dsttime = sys_tz.tz_dsttime;
+}
+
+void update_vsyscall(struct timekeeper *tk)
+{
+	struct vvar_data *vdata = vvar_data;
+
+	if (unlikely(vdata == NULL))
+		return;
+
+	vvar_write_begin(vdata);
+	vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
+	vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
+	vdata->clock.mask = tk->tkr_mono.mask;
+	vdata->clock.mult = tk->tkr_mono.mult;
+	vdata->clock.shift = tk->tkr_mono.shift;
+
+	vdata->wall_time_sec = tk->xtime_sec;
+	vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
+
+	vdata->monotonic_time_sec = tk->xtime_sec +
+				    tk->wall_to_monotonic.tv_sec;
+	vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec +
+				      (tk->wall_to_monotonic.tv_nsec <<
+				       tk->tkr_mono.shift);
+
+	while (vdata->monotonic_time_snsec >=
+	       (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+		vdata->monotonic_time_snsec -=
+				((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
+		vdata->monotonic_time_sec++;
+	}
+
+	vdata->wall_time_coarse_sec = tk->xtime_sec;
+	vdata->wall_time_coarse_nsec =
+			(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
+
+	vdata->monotonic_time_coarse_sec =
+		vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
+	vdata->monotonic_time_coarse_nsec =
+		vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
+
+	while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
+		vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
+		vdata->monotonic_time_coarse_sec++;
+	}
+
+	vvar_write_end(vdata);
+}
diff --git a/arch/sparc/vdso/.gitignore b/arch/sparc/vdso/.gitignore
new file mode 100644
index 000000000000..ef925b998222
--- /dev/null
+++ b/arch/sparc/vdso/.gitignore
@@ -0,0 +1,3 @@
+vdso.lds
+vdso-image-*.c
+vdso2c
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
new file mode 100644
index 000000000000..a6615d8864f7
--- /dev/null
+++ b/arch/sparc/vdso/Makefile
@@ -0,0 +1,149 @@
+#
+# Building vDSO images for sparc.
+#
+
+KBUILD_CFLAGS += $(DISABLE_LTO)
+
+VDSO64-$(CONFIG_SPARC64)	:= y
+VDSOCOMPAT-$(CONFIG_COMPAT)	:= y
+
+# files to link into the vdso
+vobjs-y := vdso-note.o vclock_gettime.o
+
+# files to link into kernel
+obj-y				+= vma.o
+
+# vDSO images to build
+vdso_img-$(VDSO64-y)		+= 64
+vdso_img-$(VDSOCOMPAT-y)	+= 32
+
+vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
+
+$(obj)/vdso.o: $(obj)/vdso.so
+
+targets += vdso.lds $(vobjs-y)
+
+# Build the vDSO image C files and link them in.
+vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
+vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
+vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
+obj-y += $(vdso_img_objs)
+targets += $(vdso_img_cfiles)
+targets += $(vdso_img_sodbg)
+.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \
+	$(vdso_img-y:%=$(obj)/vdso%.so)
+
+export CPPFLAGS_vdso.lds += -P -C
+
+VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+			-Wl,--no-undefined \
+			-Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
+			$(DISABLE_LTO)
+
+$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+	$(call if_changed,vdso)
+
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+hostprogs-y			+= vdso2c
+
+quiet_cmd_vdso2c = VDSO2C  $@
+define cmd_vdso2c
+	$(obj)/vdso2c $< $(<:%.dbg=%) $@
+endef
+
+$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
+	$(call if_changed,vdso2c)
+
+#
+# Don't omit frame pointers for ease of userspace debugging, but do
+# optimize sibling calls.
+#
+CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \
+       -m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \
+       -ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \
+       $(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \
+       -foptimize-sibling-calls -DBUILD_VDSO
+
+$(vobjs): KBUILD_CFLAGS += $(CFL)
+
+#
+# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+#
+CFLAGS_REMOVE_vdso-note.o = -pg
+CFLAGS_REMOVE_vclock_gettime.o = -pg
+
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg
+	$(call if_changed,objcopy)
+
+CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1
+
+#This makes sure the $(obj) subdirectory exists even though vdso32/
+#is not a kbuild sub-make subdirectory
+override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
+
+targets += vdso32/vdso32.lds
+targets += vdso32/vdso-note.o
+targets += vdso32/vclock_gettime.o
+
+KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
+$(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
+$(obj)/vdso32.so.dbg: asflags-$(CONFIG_SPARC64) += -m32
+
+KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7
+KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
+KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
+KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
+KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS_32 += -mv8plus
+$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+
+$(obj)/vdso32.so.dbg: FORCE \
+			$(obj)/vdso32/vdso32.lds \
+			$(obj)/vdso32/vclock_gettime.o \
+			$(obj)/vdso32/vdso-note.o
+		$(call	if_changed,vdso)
+
+#
+# The DSO images are built using a special linker script.
+#
+quiet_cmd_vdso = VDSO    $@
+      cmd_vdso = $(CC) -nostdlib -o $@ \
+		       $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+		       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
+
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+	$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic
+GCOV_PROFILE := n
+
+#
+# Install the unstripped copies of vdso*.so.  If our toolchain supports
+# build-id, install .build-id links as well.
+#
+quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
+define cmd_vdso_install
+	cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
+	if readelf -n $< |grep -q 'Build ID'; then \
+	  buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+	  first=`echo $$buildid | cut -b-2`; \
+	  last=`echo $$buildid | cut -b3-`; \
+	  mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+	  ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+	fi
+endef
+
+vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
+
+$(MODLIB)/vdso: FORCE
+	@mkdir -p $(MODLIB)/vdso
+
+$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
+	$(call cmd,vdso_install)
+
+PHONY += vdso_install $(vdso_img_insttargets)
+vdso_install: $(vdso_img_insttargets) FORCE
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
new file mode 100644
index 000000000000..3feb3d960ca5
--- /dev/null
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2006 Andi Kleen, SUSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
+ * Fast user context implementation of clock_gettime, gettimeofday, and time.
+ *
+ * The code should have no internal unresolved relocations.
+ * Check with readelf after changing.
+ * Also alternative() doesn't work.
+ */
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+/* Disable profiling for userspace code: */
+#ifndef	DISABLE_BRANCH_PROFILING
+#define	DISABLE_BRANCH_PROFILING
+#endif
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/unistd.h>
+#include <asm/timex.h>
+#include <asm/clocksource.h>
+#include <asm/vvar.h>
+
+#undef	TICK_PRIV_BIT
+#ifdef	CONFIG_SPARC64
+#define	TICK_PRIV_BIT	(1UL << 63)
+#else
+#define	TICK_PRIV_BIT	(1ULL << 63)
+#endif
+
+#define SYSCALL_STRING							\
+	"ta	0x6d;"							\
+	"sub	%%g0, %%o0, %%o0;"					\
+
+#define SYSCALL_CLOBBERS						\
+	"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",			\
+	"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",		\
+	"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",		\
+	"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",		\
+	"f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",		\
+	"f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",		\
+	"cc", "memory"
+
+/*
+ * Compute the vvar page's address in the process address space, and return it
+ * as a pointer to the vvar_data.
+ */
+static notrace noinline struct vvar_data *
+get_vvar_data(void)
+{
+	unsigned long ret;
+
+	/*
+	 * vdso data page is the first vDSO page so grab the return address
+	 * and move up a page to get to the data page.
+	 */
+	ret = (unsigned long)__builtin_return_address(0);
+	ret &= ~(8192 - 1);
+	ret -= 8192;
+
+	return (struct vvar_data *) ret;
+}
+
+static notrace long
+vdso_fallback_gettime(long clock, struct timespec *ts)
+{
+	register long num __asm__("g1") = __NR_clock_gettime;
+	register long o0 __asm__("o0") = clock;
+	register long o1 __asm__("o1") = (long) ts;
+
+	__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
+			     "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
+	return o0;
+}
+
+static notrace __always_inline long
+vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+	register long num __asm__("g1") = __NR_gettimeofday;
+	register long o0 __asm__("o0") = (long) tv;
+	register long o1 __asm__("o1") = (long) tz;
+
+	__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
+			     "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
+	return o0;
+}
+
+#ifdef	CONFIG_SPARC64
+static notrace noinline u64
+vread_tick(void) {
+	u64	ret;
+
+	__asm__ __volatile__("rd	%%asr24, %0 \n"
+			     ".section	.vread_tick_patch, \"ax\" \n"
+			     "rd	%%tick, %0 \n"
+			     ".previous \n"
+			     : "=&r" (ret));
+	return ret & ~TICK_PRIV_BIT;
+}
+#else
+static notrace noinline u64
+vread_tick(void)
+{
+	unsigned int lo, hi;
+
+	__asm__ __volatile__("rd	%%asr24, %%g1\n\t"
+			     "srlx	%%g1, 32, %1\n\t"
+			     "srl	%%g1, 0, %0\n"
+			     ".section	.vread_tick_patch, \"ax\" \n"
+			     "rd	%%tick, %%g1\n"
+			     ".previous \n"
+			     : "=&r" (lo), "=&r" (hi)
+			     :
+			     : "g1");
+	return lo | ((u64)hi << 32);
+}
+#endif
+
+static notrace inline u64
+vgetsns(struct vvar_data *vvar)
+{
+	u64 v;
+	u64 cycles;
+
+	cycles = vread_tick();
+	v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
+	return v * vvar->clock.mult;
+}
+
+static notrace noinline int
+do_realtime(struct vvar_data *vvar, struct timespec *ts)
+{
+	unsigned long seq;
+	u64 ns;
+
+	ts->tv_nsec = 0;
+	do {
+		seq = vvar_read_begin(vvar);
+		ts->tv_sec = vvar->wall_time_sec;
+		ns = vvar->wall_time_snsec;
+		ns += vgetsns(vvar);
+		ns >>= vvar->clock.shift;
+	} while (unlikely(vvar_read_retry(vvar, seq)));
+
+	timespec_add_ns(ts, ns);
+
+	return 0;
+}
+
+static notrace noinline int
+do_monotonic(struct vvar_data *vvar, struct timespec *ts)
+{
+	unsigned long seq;
+	u64 ns;
+
+	ts->tv_nsec = 0;
+	do {
+		seq = vvar_read_begin(vvar);
+		ts->tv_sec = vvar->monotonic_time_sec;
+		ns = vvar->monotonic_time_snsec;
+		ns += vgetsns(vvar);
+		ns >>= vvar->clock.shift;
+	} while (unlikely(vvar_read_retry(vvar, seq)));
+
+	timespec_add_ns(ts, ns);
+
+	return 0;
+}
+
+static notrace noinline int
+do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
+{
+	unsigned long seq;
+
+	do {
+		seq = vvar_read_begin(vvar);
+		ts->tv_sec = vvar->wall_time_coarse_sec;
+		ts->tv_nsec = vvar->wall_time_coarse_nsec;
+	} while (unlikely(vvar_read_retry(vvar, seq)));
+	return 0;
+}
+
+static notrace noinline int
+do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts)
+{
+	unsigned long seq;
+
+	do {
+		seq = vvar_read_begin(vvar);
+		ts->tv_sec = vvar->monotonic_time_coarse_sec;
+		ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
+	} while (unlikely(vvar_read_retry(vvar, seq)));
+
+	return 0;
+}
+
+notrace int
+__vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+{
+	struct vvar_data *vvd = get_vvar_data();
+
+	switch (clock) {
+	case CLOCK_REALTIME:
+		if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+			break;
+		return do_realtime(vvd, ts);
+	case CLOCK_MONOTONIC:
+		if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+			break;
+		return do_monotonic(vvd, ts);
+	case CLOCK_REALTIME_COARSE:
+		return do_realtime_coarse(vvd, ts);
+	case CLOCK_MONOTONIC_COARSE:
+		return do_monotonic_coarse(vvd, ts);
+	}
+	/*
+	 * Unknown clock ID ? Fall back to the syscall.
+	 */
+	return vdso_fallback_gettime(clock, ts);
+}
+int
+clock_gettime(clockid_t, struct timespec *)
+	__attribute__((weak, alias("__vdso_clock_gettime")));
+
+notrace int
+__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+	struct vvar_data *vvd = get_vvar_data();
+
+	if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
+		if (likely(tv != NULL)) {
+			union tstv_t {
+				struct timespec ts;
+				struct timeval tv;
+			} *tstv = (union tstv_t *) tv;
+			do_realtime(vvd, &tstv->ts);
+			/*
+			 * Assign before dividing to ensure that the division is
+			 * done in the type of tv_usec, not tv_nsec.
+			 *
+			 * There cannot be > 1 billion usec in a second:
+			 * do_realtime() has already distributed such overflow
+			 * into tv_sec.  So we can assign it to an int safely.
+			 */
+			tstv->tv.tv_usec = tstv->ts.tv_nsec;
+			tstv->tv.tv_usec /= 1000;
+		}
+		if (unlikely(tz != NULL)) {
+			/* Avoid memcpy. Some old compilers fail to inline it */
+			tz->tz_minuteswest = vvd->tz_minuteswest;
+			tz->tz_dsttime = vvd->tz_dsttime;
+		}
+		return 0;
+	}
+	return vdso_fallback_gettimeofday(tv, tz);
+}
+int
+gettimeofday(struct timeval *, struct timezone *)
+	__attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/sparc/vdso/vdso-layout.lds.S b/arch/sparc/vdso/vdso-layout.lds.S
new file mode 100644
index 000000000000..f2c83abaca12
--- /dev/null
+++ b/arch/sparc/vdso/vdso-layout.lds.S
@@ -0,0 +1,104 @@
+/*
+ * Linker script for vDSO.  This is an ELF shared object prelinked to
+ * its virtual address, and with only one read-only segment.
+ * This script controls its layout.
+ */
+
+#if defined(BUILD_VDSO64)
+# define SHDR_SIZE 64
+#elif defined(BUILD_VDSO32)
+# define SHDR_SIZE 40
+#else
+# error unknown VDSO target
+#endif
+
+#define NUM_FAKE_SHDRS 7
+
+SECTIONS
+{
+	/*
+	 * User/kernel shared data is before the vDSO.  This may be a little
+	 * uglier than putting it after the vDSO, but it avoids issues with
+	 * non-allocatable things that dangle past the end of the PT_LOAD
+	 * segment. Page size is 8192 for both 64-bit and 32-bit vdso binaries
+	 */
+
+	vvar_start = . -8192;
+	vvar_data = vvar_start;
+
+	. = SIZEOF_HEADERS;
+
+	.hash		: { *(.hash) }			:text
+	.gnu.hash	: { *(.gnu.hash) }
+	.dynsym		: { *(.dynsym) }
+	.dynstr		: { *(.dynstr) }
+	.gnu.version	: { *(.gnu.version) }
+	.gnu.version_d	: { *(.gnu.version_d) }
+	.gnu.version_r	: { *(.gnu.version_r) }
+
+	.dynamic	: { *(.dynamic) }		:text	:dynamic
+
+	.rodata		: {
+		*(.rodata*)
+		*(.data*)
+		*(.sdata*)
+		*(.got.plt) *(.got)
+		*(.gnu.linkonce.d.*)
+		*(.bss*)
+		*(.dynbss*)
+		*(.gnu.linkonce.b.*)
+
+		/*
+		 * Ideally this would live in a C file: kept in here for
+		 * compatibility with x86-64.
+		 */
+		VDSO_FAKE_SECTION_TABLE_START = .;
+		. = . + NUM_FAKE_SHDRS * SHDR_SIZE;
+		VDSO_FAKE_SECTION_TABLE_END = .;
+	}						:text
+
+	.fake_shstrtab	: { *(.fake_shstrtab) }		:text
+
+
+	.note		: { *(.note.*) }		:text	:note
+
+	.eh_frame_hdr	: { *(.eh_frame_hdr) }		:text	:eh_frame_hdr
+	.eh_frame	: { KEEP (*(.eh_frame)) }	:text
+
+
+	/*
+	 * Text is well-separated from actual data: there's plenty of
+	 * stuff that isn't used at runtime in between.
+	 */
+
+	.text		: { *(.text*) }			:text	=0x90909090,
+
+	.vread_tick_patch : {
+		vread_tick_patch_start = .;
+		*(.vread_tick_patch)
+		vread_tick_patch_end = .;
+	}
+
+	/DISCARD/ : {
+		*(.discard)
+		*(.discard.*)
+		*(__bug_table)
+	}
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME	0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+	text		PT_LOAD		FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+	dynamic		PT_DYNAMIC	FLAGS(4);		/* PF_R */
+	note		PT_NOTE		FLAGS(4);		/* PF_R */
+	eh_frame_hdr	PT_GNU_EH_FRAME;
+}
diff --git a/arch/sparc/vdso/vdso-note.S b/arch/sparc/vdso/vdso-note.S
new file mode 100644
index 000000000000..79a071e4357e
--- /dev/null
+++ b/arch/sparc/vdso/vdso-note.S
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+	.long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/sparc/vdso/vdso.lds.S b/arch/sparc/vdso/vdso.lds.S
new file mode 100644
index 000000000000..f3caa29a331c
--- /dev/null
+++ b/arch/sparc/vdso/vdso.lds.S
@@ -0,0 +1,25 @@
+/*
+ * Linker script for 64-bit vDSO.
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#define BUILD_VDSO64
+
+#include "vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+	LINUX_2.6 {
+	global:
+		clock_gettime;
+		__vdso_clock_gettime;
+		gettimeofday;
+		__vdso_gettimeofday;
+	local: *;
+	};
+}
diff --git a/arch/sparc/vdso/vdso2c.c b/arch/sparc/vdso/vdso2c.c
new file mode 100644
index 000000000000..9f5b1cd6d51d
--- /dev/null
+++ b/arch/sparc/vdso/vdso2c.c
@@ -0,0 +1,234 @@
+/*
+ * vdso2c - A vdso image preparation tool
+ * Copyright (c) 2014 Andy Lutomirski and others
+ * Licensed under the GPL v2
+ *
+ * vdso2c requires stripped and unstripped input.  It would be trivial
+ * to fully strip the input in here, but, for reasons described below,
+ * we need to write a section table.  Doing this is more or less
+ * equivalent to dropping all non-allocatable sections, but it's
+ * easier to let objcopy handle that instead of doing it ourselves.
+ * If we ever need to do something fancier than what objcopy provides,
+ * it would be straightforward to add here.
+ *
+ * We keep a section table for a few reasons:
+ *
+ * Binutils has issues debugging the vDSO: it reads the section table to
+ * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
+ * would break build-id if we removed the section table.  Binutils
+ * also requires that shstrndx != 0.  See:
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
+ *
+ * elfutils might not look for PT_NOTE if there is a section table at
+ * all.  I don't know whether this matters for any practical purpose.
+ *
+ * For simplicity, rather than hacking up a partial section table, we
+ * just write a mostly complete one.  We omit non-dynamic symbols,
+ * though, since they're rather large.
+ *
+ * Once binutils gets fixed, we might be able to drop this for all but
+ * the 64-bit vdso, since build-id only works in kernel RPMs, and
+ * systems that update to new enough kernel RPMs will likely update
+ * binutils in sync.  build-id has never worked for home-built kernel
+ * RPMs without manual symlinking, and I suspect that no one ever does
+ * that.
+ */
+
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <err.h>
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <tools/be_byteshift.h>
+
+#include <linux/elf.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+const char *outfilename;
+
+/* Symbols that we need in vdso2c. */
+enum {
+	sym_vvar_start,
+	sym_VDSO_FAKE_SECTION_TABLE_START,
+	sym_VDSO_FAKE_SECTION_TABLE_END,
+	sym_vread_tick,
+	sym_vread_tick_patch_start,
+	sym_vread_tick_patch_end
+};
+
+struct vdso_sym {
+	const char *name;
+	int export;
+};
+
+struct vdso_sym required_syms[] = {
+	[sym_vvar_start] = {"vvar_start", 1},
+	[sym_VDSO_FAKE_SECTION_TABLE_START] = {
+		"VDSO_FAKE_SECTION_TABLE_START", 0
+	},
+	[sym_VDSO_FAKE_SECTION_TABLE_END] = {
+		"VDSO_FAKE_SECTION_TABLE_END", 0
+	},
+	[sym_vread_tick] = {"vread_tick", 1},
+	[sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1},
+	[sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1}
+};
+
+__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
+static void fail(const char *format, ...)
+{
+	va_list ap;
+
+	va_start(ap, format);
+	fprintf(stderr, "Error: ");
+	vfprintf(stderr, format, ap);
+	if (outfilename)
+		unlink(outfilename);
+	exit(1);
+	va_end(ap);
+}
+
+/*
+ * Evil macros for big-endian reads and writes
+ */
+#define GBE(x, bits, ifnot)						\
+	__builtin_choose_expr(						\
+		(sizeof(*(x)) == bits/8),				\
+		(__typeof__(*(x)))get_unaligned_be##bits(x), ifnot)
+
+#define LAST_GBE(x)							\
+	__builtin_choose_expr(sizeof(*(x)) == 1, *(x), (void)(0))
+
+#define GET_BE(x)							\
+	GBE(x, 64, GBE(x, 32, GBE(x, 16, LAST_GBE(x))))
+
+#define PBE(x, val, bits, ifnot)					\
+	__builtin_choose_expr(						\
+		(sizeof(*(x)) == bits/8),				\
+		put_unaligned_be##bits((val), (x)), ifnot)
+
+#define LAST_PBE(x, val)						\
+	__builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), (void)(0))
+
+#define PUT_BE(x, val)					\
+	PBE(x, val, 64, PBE(x, val, 32, PBE(x, val, 16, LAST_PBE(x, val))))
+
+#define NSYMS ARRAY_SIZE(required_syms)
+
+#define BITSFUNC3(name, bits, suffix) name##bits##suffix
+#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
+#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, )
+
+#define INT_BITS BITSFUNC2(int, ELF_BITS, _t)
+
+#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
+#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
+#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
+
+#define ELF_BITS 64
+#include "vdso2c.h"
+#undef ELF_BITS
+
+#define ELF_BITS 32
+#include "vdso2c.h"
+#undef ELF_BITS
+
+static void go(void *raw_addr, size_t raw_len,
+	       void *stripped_addr, size_t stripped_len,
+	       FILE *outfile, const char *name)
+{
+	Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr;
+
+	if (hdr->e_ident[EI_CLASS] == ELFCLASS64) {
+		go64(raw_addr, raw_len, stripped_addr, stripped_len,
+		     outfile, name);
+	} else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) {
+		go32(raw_addr, raw_len, stripped_addr, stripped_len,
+		     outfile, name);
+	} else {
+		fail("unknown ELF class\n");
+	}
+}
+
+static void map_input(const char *name, void **addr, size_t *len, int prot)
+{
+	off_t tmp_len;
+
+	int fd = open(name, O_RDONLY);
+
+	if (fd == -1)
+		err(1, "%s", name);
+
+	tmp_len = lseek(fd, 0, SEEK_END);
+	if (tmp_len == (off_t)-1)
+		err(1, "lseek");
+	*len = (size_t)tmp_len;
+
+	*addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0);
+	if (*addr == MAP_FAILED)
+		err(1, "mmap");
+
+	close(fd);
+}
+
+int main(int argc, char **argv)
+{
+	size_t raw_len, stripped_len;
+	void *raw_addr, *stripped_addr;
+	FILE *outfile;
+	char *name, *tmp;
+	int namelen;
+
+	if (argc != 4) {
+		printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n");
+		return 1;
+	}
+
+	/*
+	 * Figure out the struct name.  If we're writing to a .so file,
+	 * generate raw output insted.
+	 */
+	name = strdup(argv[3]);
+	namelen = strlen(name);
+	if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) {
+		name = NULL;
+	} else {
+		tmp = strrchr(name, '/');
+		if (tmp)
+			name = tmp + 1;
+		tmp = strchr(name, '.');
+		if (tmp)
+			*tmp = '\0';
+		for (tmp = name; *tmp; tmp++)
+			if (*tmp == '-')
+				*tmp = '_';
+	}
+
+	map_input(argv[1], &raw_addr, &raw_len, PROT_READ);
+	map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ);
+
+	outfilename = argv[3];
+	outfile = fopen(outfilename, "w");
+	if (!outfile)
+		err(1, "%s", argv[2]);
+
+	go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name);
+
+	munmap(raw_addr, raw_len);
+	munmap(stripped_addr, stripped_len);
+	fclose(outfile);
+
+	return 0;
+}
diff --git a/arch/sparc/vdso/vdso2c.h b/arch/sparc/vdso/vdso2c.h
new file mode 100644
index 000000000000..808decb0f7be
--- /dev/null
+++ b/arch/sparc/vdso/vdso2c.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * This file is included up to twice from vdso2c.c.  It generates code for
+ * 32-bit and 64-bit vDSOs.  We will eventually need both for 64-bit builds,
+ * since 32-bit vDSOs will then be built for 32-bit userspace.
+ */
+
+static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+			 void *stripped_addr, size_t stripped_len,
+			 FILE *outfile, const char *name)
+{
+	int found_load = 0;
+	unsigned long load_size = -1;  /* Work around bogus warning */
+	unsigned long mapping_size;
+	int i;
+	unsigned long j;
+
+	ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr;
+	ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
+	ELF(Dyn) *dyn = 0, *dyn_end = 0;
+	INT_BITS syms[NSYMS] = {};
+
+	ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff));
+
+	/* Walk the segment table. */
+	for (i = 0; i < GET_BE(&hdr->e_phnum); i++) {
+		if (GET_BE(&pt[i].p_type) == PT_LOAD) {
+			if (found_load)
+				fail("multiple PT_LOAD segs\n");
+
+			if (GET_BE(&pt[i].p_offset) != 0 ||
+			    GET_BE(&pt[i].p_vaddr) != 0)
+				fail("PT_LOAD in wrong place\n");
+
+			if (GET_BE(&pt[i].p_memsz) != GET_BE(&pt[i].p_filesz))
+				fail("cannot handle memsz != filesz\n");
+
+			load_size = GET_BE(&pt[i].p_memsz);
+			found_load = 1;
+		} else if (GET_BE(&pt[i].p_type) == PT_DYNAMIC) {
+			dyn = raw_addr + GET_BE(&pt[i].p_offset);
+			dyn_end = raw_addr + GET_BE(&pt[i].p_offset) +
+				GET_BE(&pt[i].p_memsz);
+		}
+	}
+	if (!found_load)
+		fail("no PT_LOAD seg\n");
+
+	if (stripped_len < load_size)
+		fail("stripped input is too short\n");
+
+	/* Walk the dynamic table */
+	for (i = 0; dyn + i < dyn_end &&
+		     GET_BE(&dyn[i].d_tag) != DT_NULL; i++) {
+		typeof(dyn[i].d_tag) tag = GET_BE(&dyn[i].d_tag);
+		typeof(dyn[i].d_un.d_val) val = GET_BE(&dyn[i].d_un.d_val);
+
+		if ((tag == DT_RELSZ || tag == DT_RELASZ) && (val != 0))
+			fail("vdso image contains dynamic relocations\n");
+	}
+
+	/* Walk the section table */
+	for (i = 0; i < GET_BE(&hdr->e_shnum); i++) {
+		ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) +
+			GET_BE(&hdr->e_shentsize) * i;
+		if (GET_BE(&sh->sh_type) == SHT_SYMTAB)
+			symtab_hdr = sh;
+	}
+
+	if (!symtab_hdr)
+		fail("no symbol table\n");
+
+	strtab_hdr = raw_addr + GET_BE(&hdr->e_shoff) +
+		GET_BE(&hdr->e_shentsize) * GET_BE(&symtab_hdr->sh_link);
+
+	/* Walk the symbol table */
+	for (i = 0;
+	     i < GET_BE(&symtab_hdr->sh_size) / GET_BE(&symtab_hdr->sh_entsize);
+	     i++) {
+		int k;
+
+		ELF(Sym) *sym = raw_addr + GET_BE(&symtab_hdr->sh_offset) +
+			GET_BE(&symtab_hdr->sh_entsize) * i;
+		const char *name = raw_addr + GET_BE(&strtab_hdr->sh_offset) +
+			GET_BE(&sym->st_name);
+
+		for (k = 0; k < NSYMS; k++) {
+			if (!strcmp(name, required_syms[k].name)) {
+				if (syms[k]) {
+					fail("duplicate symbol %s\n",
+					     required_syms[k].name);
+				}
+
+				/*
+				 * Careful: we use negative addresses, but
+				 * st_value is unsigned, so we rely
+				 * on syms[k] being a signed type of the
+				 * correct width.
+				 */
+				syms[k] = GET_BE(&sym->st_value);
+			}
+		}
+	}
+
+	/* Validate mapping addresses. */
+	if (syms[sym_vvar_start] % 8192)
+		fail("vvar_begin must be a multiple of 8192\n");
+
+	if (!name) {
+		fwrite(stripped_addr, stripped_len, 1, outfile);
+		return;
+	}
+
+	mapping_size = (stripped_len + 8191) / 8192 * 8192;
+
+	fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
+	fprintf(outfile, "#include <linux/cache.h>\n");
+	fprintf(outfile, "#include <asm/vdso.h>\n");
+	fprintf(outfile, "\n");
+	fprintf(outfile,
+		"static unsigned char raw_data[%lu] __ro_after_init __aligned(8192)= {",
+		mapping_size);
+	for (j = 0; j < stripped_len; j++) {
+		if (j % 10 == 0)
+			fprintf(outfile, "\n\t");
+		fprintf(outfile, "0x%02X, ",
+			(int)((unsigned char *)stripped_addr)[j]);
+	}
+	fprintf(outfile, "\n};\n\n");
+
+	fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name);
+	fprintf(outfile, "\t.data = raw_data,\n");
+	fprintf(outfile, "\t.size = %lu,\n", mapping_size);
+	for (i = 0; i < NSYMS; i++) {
+		if (required_syms[i].export && syms[i])
+			fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
+				required_syms[i].name, (int64_t)syms[i]);
+	}
+	fprintf(outfile, "};\n");
+}
diff --git a/arch/sparc/vdso/vdso32/.gitignore b/arch/sparc/vdso/vdso32/.gitignore
new file mode 100644
index 000000000000..e45fba9d0ced
--- /dev/null
+++ b/arch/sparc/vdso/vdso32/.gitignore
@@ -0,0 +1 @@
+vdso32.lds
diff --git a/arch/sparc/vdso/vdso32/vclock_gettime.c b/arch/sparc/vdso/vdso32/vclock_gettime.c
new file mode 100644
index 000000000000..026abb3b826c
--- /dev/null
+++ b/arch/sparc/vdso/vdso32/vclock_gettime.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define	BUILD_VDSO32
+
+#ifndef	CONFIG_CC_OPTIMIZE_FOR_SIZE
+#undef	CONFIG_OPTIMIZE_INLINING
+#endif
+
+#ifdef	CONFIG_SPARC64
+
+/*
+ * in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
+ * configuration
+ */
+#undef	CONFIG_64BIT
+#undef	CONFIG_SPARC64
+#define	BUILD_VDSO32_64
+#define	CONFIG_32BIT
+#undef	CONFIG_QUEUED_RWLOCKS
+#undef	CONFIG_QUEUED_SPINLOCKS
+
+#endif
+
+#include "../vclock_gettime.c"
diff --git a/arch/sparc/vdso/vdso32/vdso-note.S b/arch/sparc/vdso/vdso32/vdso-note.S
new file mode 100644
index 000000000000..e234983cf0d8
--- /dev/null
+++ b/arch/sparc/vdso/vdso32/vdso-note.S
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO
+ * text. Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+	.long	LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/sparc/vdso/vdso32/vdso32.lds.S b/arch/sparc/vdso/vdso32/vdso32.lds.S
new file mode 100644
index 000000000000..53575ee154c4
--- /dev/null
+++ b/arch/sparc/vdso/vdso32/vdso32.lds.S
@@ -0,0 +1,24 @@
+/*
+ * Linker script for sparc32 vDSO
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#define	BUILD_VDSO32
+#include "../vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+	LINUX_2.6 {
+	global:
+		clock_gettime;
+		__vdso_clock_gettime;
+		gettimeofday;
+		__vdso_gettimeofday;
+	local: *;
+	};
+}
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
new file mode 100644
index 000000000000..0a6f50098e23
--- /dev/null
+++ b/arch/sparc/vdso/vma.c
@@ -0,0 +1,268 @@
+/*
+ * Set up the VMAs to tell the VM about the vDSO.
+ * Copyright 2007 Andi Kleen, SUSE Labs.
+ * Subject to the GPL, v.2
+ */
+
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/random.h>
+#include <linux/elf.h>
+#include <asm/vdso.h>
+#include <asm/vvar.h>
+#include <asm/page.h>
+
+unsigned int __read_mostly vdso_enabled = 1;
+
+static struct vm_special_mapping vvar_mapping = {
+	.name = "[vvar]"
+};
+
+#ifdef	CONFIG_SPARC64
+static struct vm_special_mapping vdso_mapping64 = {
+	.name = "[vdso]"
+};
+#endif
+
+#ifdef CONFIG_COMPAT
+static struct vm_special_mapping vdso_mapping32 = {
+	.name = "[vdso]"
+};
+#endif
+
+struct vvar_data *vvar_data;
+
+#define	SAVE_INSTR_SIZE	4
+
+/*
+ * Allocate pages for the vdso and vvar, and copy in the vdso text from the
+ * kernel image.
+ */
+int __init init_vdso_image(const struct vdso_image *image,
+		struct vm_special_mapping *vdso_mapping)
+{
+	int i;
+	struct page *dp, **dpp = NULL;
+	int dnpages = 0;
+	struct page *cp, **cpp = NULL;
+	int cnpages = (image->size) / PAGE_SIZE;
+
+	/*
+	 * First, the vdso text.  This is initialied data, an integral number of
+	 * pages long.
+	 */
+	if (WARN_ON(image->size % PAGE_SIZE != 0))
+		goto oom;
+
+	cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
+	vdso_mapping->pages = cpp;
+
+	if (!cpp)
+		goto oom;
+
+	if (vdso_fix_stick) {
+		/*
+		 * If the system uses %tick instead of %stick, patch the VDSO
+		 * with instruction reading %tick instead of %stick.
+		 */
+		unsigned int j, k = SAVE_INSTR_SIZE;
+		unsigned char *data = image->data;
+
+		for (j = image->sym_vread_tick_patch_start;
+		     j < image->sym_vread_tick_patch_end; j++) {
+
+			data[image->sym_vread_tick + k] = data[j];
+			k++;
+		}
+	}
+
+	for (i = 0; i < cnpages; i++) {
+		cp = alloc_page(GFP_KERNEL);
+		if (!cp)
+			goto oom;
+		cpp[i] = cp;
+		copy_page(page_address(cp), image->data + i * PAGE_SIZE);
+	}
+
+	/*
+	 * Now the vvar page.  This is uninitialized data.
+	 */
+
+	if (vvar_data == NULL) {
+		dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
+		if (WARN_ON(dnpages != 1))
+			goto oom;
+		dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
+		vvar_mapping.pages = dpp;
+
+		if (!dpp)
+			goto oom;
+
+		dp = alloc_page(GFP_KERNEL);
+		if (!dp)
+			goto oom;
+
+		dpp[0] = dp;
+		vvar_data = page_address(dp);
+		memset(vvar_data, 0, PAGE_SIZE);
+
+		vvar_data->seq = 0;
+	}
+
+	return 0;
+ oom:
+	if (cpp != NULL) {
+		for (i = 0; i < cnpages; i++) {
+			if (cpp[i] != NULL)
+				__free_page(cpp[i]);
+		}
+		kfree(cpp);
+		vdso_mapping->pages = NULL;
+	}
+
+	if (dpp != NULL) {
+		for (i = 0; i < dnpages; i++) {
+			if (dpp[i] != NULL)
+				__free_page(dpp[i]);
+		}
+		kfree(dpp);
+		vvar_mapping.pages = NULL;
+	}
+
+	pr_warn("Cannot allocate vdso\n");
+	vdso_enabled = 0;
+	return -ENOMEM;
+}
+
+static int __init init_vdso(void)
+{
+	int err = 0;
+#ifdef CONFIG_SPARC64
+	err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
+	if (err)
+		return err;
+#endif
+
+#ifdef CONFIG_COMPAT
+	err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
+#endif
+	return err;
+
+}
+subsys_initcall(init_vdso);
+
+struct linux_binprm;
+
+/* Shuffle the vdso up a bit, randomly. */
+static unsigned long vdso_addr(unsigned long start, unsigned int len)
+{
+	unsigned int offset;
+
+	/* This loses some more bits than a modulo, but is cheaper */
+	offset = get_random_int() & (PTRS_PER_PTE - 1);
+	return start + (offset << PAGE_SHIFT);
+}
+
+static int map_vdso(const struct vdso_image *image,
+		struct vm_special_mapping *vdso_mapping)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long text_start, addr = 0;
+	int ret = 0;
+
+	down_write(&mm->mmap_sem);
+
+	/*
+	 * First, get an unmapped region: then randomize it, and make sure that
+	 * region is free.
+	 */
+	if (current->flags & PF_RANDOMIZE) {
+		addr = get_unmapped_area(NULL, 0,
+					 image->size - image->sym_vvar_start,
+					 0, 0);
+		if (IS_ERR_VALUE(addr)) {
+			ret = addr;
+			goto up_fail;
+		}
+		addr = vdso_addr(addr, image->size - image->sym_vvar_start);
+	}
+	addr = get_unmapped_area(NULL, addr,
+				 image->size - image->sym_vvar_start, 0, 0);
+	if (IS_ERR_VALUE(addr)) {
+		ret = addr;
+		goto up_fail;
+	}
+
+	text_start = addr - image->sym_vvar_start;
+	current->mm->context.vdso = (void __user *)text_start;
+
+	/*
+	 * MAYWRITE to allow gdb to COW and set breakpoints
+	 */
+	vma = _install_special_mapping(mm,
+				       text_start,
+				       image->size,
+				       VM_READ|VM_EXEC|
+				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+				       vdso_mapping);
+
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto up_fail;
+	}
+
+	vma = _install_special_mapping(mm,
+				       addr,
+				       -image->sym_vvar_start,
+				       VM_READ|VM_MAYREAD,
+				       &vvar_mapping);
+
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		do_munmap(mm, text_start, image->size, NULL);
+	}
+
+up_fail:
+	if (ret)
+		current->mm->context.vdso = NULL;
+
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+
+	if (!vdso_enabled)
+		return 0;
+
+#if defined CONFIG_COMPAT
+	if (!(is_32bit_task()))
+		return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
+	else
+		return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
+#else
+		return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
+#endif
+
+}
+
+static __init int vdso_setup(char *s)
+{
+	int err;
+	unsigned long val;
+
+	err = kstrtoul(s, 10, &val);
+	vdso_enabled = val;
+	return err;
+}
+__setup("vdso=", vdso_setup);

From 41413a60352cf8a7f0dd4a869d6e5c3e25aa89b2 Mon Sep 17 00:00:00 2001
From: Vijay Kumar <vijay.ac.kumar@oracle.com>
Date: Wed, 11 Oct 2017 12:50:02 -0600
Subject: [PATCH 06/14] sparc64: Define SPARC default fls function

fls will now require a boot time patching on T4 and above.
Redefining it under arch/sparc/lib.

Signed-off-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/include/asm/bitops_64.h |  3 +-
 arch/sparc/lib/Makefile            |  1 +
 arch/sparc/lib/fls.S               | 67 ++++++++++++++++++++++++++++++
 3 files changed, 70 insertions(+), 1 deletion(-)
 create mode 100644 arch/sparc/lib/fls.S

diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index 2d522402a937..30aea561176f 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -22,9 +22,10 @@ void set_bit(unsigned long nr, volatile unsigned long *addr);
 void clear_bit(unsigned long nr, volatile unsigned long *addr);
 void change_bit(unsigned long nr, volatile unsigned long *addr);
 
+int fls(unsigned int word);
+
 #include <asm-generic/bitops/non-atomic.h>
 
-#include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index a1a2d39ec96e..3b9f5e0c1bcd 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -16,6 +16,7 @@ lib-$(CONFIG_SPARC64) += atomic_64.o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 lib-$(CONFIG_SPARC64) += multi3.o
+lib-$(CONFIG_SPARC64) += fls.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/fls.S b/arch/sparc/lib/fls.S
new file mode 100644
index 000000000000..06b8d300bcae
--- /dev/null
+++ b/arch/sparc/lib/fls.S
@@ -0,0 +1,67 @@
+/* fls.S: SPARC default fls definition.
+ *
+ * SPARC default fls definition, which follows the same algorithm as
+ * in generic fls(). This function will be boot time patched on T4
+ * and onward.
+ */
+
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+	.text
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+ENTRY(fls)
+	brz,pn	%o0, 6f
+	 mov	0, %o1
+	sethi	%hi(0xffff0000), %g3
+	mov	%o0, %g2
+	andcc	%o0, %g3, %g0
+	be,pt	%icc, 8f
+	 mov	32, %o1
+	sethi	%hi(0xff000000), %g3
+	andcc	%g2, %g3, %g0
+	bne,pt	%icc, 3f
+	 sethi	%hi(0xf0000000), %g3
+	sll	%o0, 8, %o0
+1:
+	add	%o1, -8, %o1
+	sra	%o0, 0, %o0
+	mov	%o0, %g2
+2:
+	sethi	%hi(0xf0000000), %g3
+3:
+	andcc	%g2, %g3, %g0
+	bne,pt	%icc, 4f
+	 sethi	%hi(0xc0000000), %g3
+	sll	%o0, 4, %o0
+	add	%o1, -4, %o1
+	sra	%o0, 0, %o0
+	mov	%o0, %g2
+4:
+	andcc	%g2, %g3, %g0
+	be,a,pt	%icc, 7f
+	 sll	%o0, 2, %o0
+5:
+	xnor	%g0, %o0, %o0
+	srl	%o0, 31, %o0
+	sub	%o1, %o0, %o1
+6:
+	jmp	%o7 + 8
+	 sra	%o1, 0, %o0
+7:
+	add	%o1, -2, %o1
+	ba,pt	%xcc, 5b
+	 sra	%o0, 0, %o0
+8:
+	sll	%o0, 16, %o0
+	sethi	%hi(0xff000000), %g3
+	sra	%o0, 0, %o0
+	mov	%o0, %g2
+	andcc	%g2, %g3, %g0
+	bne,pt	%icc, 2b
+	 mov	16, %o1
+	ba,pt	%xcc, 1b
+	 sll	%o0, 8, %o0
+ENDPROC(fls)
+EXPORT_SYMBOL(fls)

From be52bbe3ea1ae2619ca64d67d56e84a9c7e358f9 Mon Sep 17 00:00:00 2001
From: Vijay Kumar <vijay.ac.kumar@oracle.com>
Date: Wed, 11 Oct 2017 12:50:03 -0600
Subject: [PATCH 07/14] sparc64: Define SPARC default __fls function

__fls will now require a boot time patching on T4 and above.
Redefining it under arch/sparc/lib.

Signed-off-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/include/asm/bitops_64.h |  2 +-
 arch/sparc/lib/Makefile            |  1 +
 arch/sparc/lib/fls64.S             | 61 ++++++++++++++++++++++++++++++
 3 files changed, 63 insertions(+), 1 deletion(-)
 create mode 100644 arch/sparc/lib/fls64.S

diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index 30aea561176f..d7a46e2480c0 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -23,10 +23,10 @@ void clear_bit(unsigned long nr, volatile unsigned long *addr);
 void change_bit(unsigned long nr, volatile unsigned long *addr);
 
 int fls(unsigned int word);
+int __fls(unsigned long word);
 
 #include <asm-generic/bitops/non-atomic.h>
 
-#include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 
 #ifdef __KERNEL__
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 3b9f5e0c1bcd..5380c5985420 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -17,6 +17,7 @@ lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 lib-$(CONFIG_SPARC64) += multi3.o
 lib-$(CONFIG_SPARC64) += fls.o
+lib-$(CONFIG_SPARC64) += fls64.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/fls64.S b/arch/sparc/lib/fls64.S
new file mode 100644
index 000000000000..c83e22ae9586
--- /dev/null
+++ b/arch/sparc/lib/fls64.S
@@ -0,0 +1,61 @@
+/* fls64.S: SPARC default __fls definition.
+ *
+ * SPARC default __fls definition, which follows the same algorithm as
+ * in generic __fls(). This function will be boot time patched on T4
+ * and onward.
+ */
+
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+	.text
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+ENTRY(__fls)
+	mov	-1, %g2
+	sllx	%g2, 32, %g2
+	and	%o0, %g2, %g2
+	brnz,pt	%g2, 1f
+	 mov	63, %g1
+	sllx	%o0, 32, %o0
+	mov	31, %g1
+1:
+	mov	-1, %g2
+	sllx	%g2, 48, %g2
+	and	%o0, %g2, %g2
+	brnz,pt	%g2, 2f
+	 mov	-1, %g2
+	sllx	%o0, 16, %o0
+	add	%g1, -16, %g1
+2:
+	mov	-1, %g2
+	sllx	%g2, 56, %g2
+	and	%o0, %g2, %g2
+	brnz,pt	%g2, 3f
+	 mov	-1, %g2
+	sllx	%o0, 8, %o0
+	add	%g1, -8, %g1
+3:
+	sllx	%g2, 60, %g2
+	and	%o0, %g2, %g2
+	brnz,pt	%g2, 4f
+	 mov	-1, %g2
+	sllx	%o0, 4, %o0
+	add	%g1, -4, %g1
+4:
+	sllx	%g2, 62, %g2
+	and	%o0, %g2, %g2
+	brnz,pt	%g2, 5f
+	 mov	-1, %g3
+	sllx	%o0, 2, %o0
+	add	%g1, -2, %g1
+5:
+	mov	0, %g2
+	sllx	%g3, 63, %g3
+	and	%o0, %g3, %o0
+	movre	%o0, 1, %g2
+	sub	%g1, %g2, %g1
+	jmp	%o7+8
+	 sra	%g1, 0, %o0
+ENDPROC(__fls)
+EXPORT_SYMBOL(__fls)

From 70cbec0c53739e170ea9c418e740f007ab5bca52 Mon Sep 17 00:00:00 2001
From: Vijay Kumar <vijay.ac.kumar@oracle.com>
Date: Wed, 11 Oct 2017 12:50:04 -0600
Subject: [PATCH 08/14] sparc64: SPARC optimized fls function

Defined SPARC optimized fls using lzcnt opcode.

Signed-off-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/lib/Makefile |  1 +
 arch/sparc/lib/NG4fls.S | 20 ++++++++++++++++++++
 2 files changed, 21 insertions(+)
 create mode 100644 arch/sparc/lib/NG4fls.S

diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 5380c5985420..2823b8e530ed 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -18,6 +18,7 @@ lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 lib-$(CONFIG_SPARC64) += multi3.o
 lib-$(CONFIG_SPARC64) += fls.o
 lib-$(CONFIG_SPARC64) += fls64.o
+obj-$(CONFIG_SPARC64) += NG4fls.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/NG4fls.S b/arch/sparc/lib/NG4fls.S
new file mode 100644
index 000000000000..bc17b65f1052
--- /dev/null
+++ b/arch/sparc/lib/NG4fls.S
@@ -0,0 +1,20 @@
+/* NG4fls.S: SPARC optimized fls and __fls for T4 and above.
+ *
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/linkage.h>
+
+#define LZCNT_O0_G2	\
+	.word	0x85b002e8
+
+	.text
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+
+ENTRY(NG4fls)
+	LZCNT_O0_G2	!lzcnt	%o0, %g2
+	mov	64, %g3
+	retl
+	 sub	%g3, %g2, %o0
+ENDPROC(NG4fls)

From 2b41ce5df20b698436526fcd8478094140b91706 Mon Sep 17 00:00:00 2001
From: Vijay Kumar <vijay.ac.kumar@oracle.com>
Date: Wed, 11 Oct 2017 12:50:05 -0600
Subject: [PATCH 09/14] sparc64: SPARC optimized __fls function

Defined SPARC optimized __fls using lzcnt opcode.

Signed-off-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/lib/NG4fls.S | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/arch/sparc/lib/NG4fls.S b/arch/sparc/lib/NG4fls.S
index bc17b65f1052..2d0991e5b034 100644
--- a/arch/sparc/lib/NG4fls.S
+++ b/arch/sparc/lib/NG4fls.S
@@ -18,3 +18,13 @@ ENTRY(NG4fls)
 	retl
 	 sub	%g3, %g2, %o0
 ENDPROC(NG4fls)
+
+ENTRY(__NG4fls)
+	brz,pn	%o0, 1f
+	LZCNT_O0_G2	!lzcnt	%o0, %g2
+	mov	63, %g3
+	sub	%g3, %g2, %o0
+1:
+	retl
+	 nop
+ENDPROC(__NG4fls)

From 46ad8d2d22c17e2b577adec55ae87161666a3267 Mon Sep 17 00:00:00 2001
From: Vijay Kumar <vijay.ac.kumar@oracle.com>
Date: Wed, 11 Oct 2017 12:50:06 -0600
Subject: [PATCH 10/14] sparc64: Use sparc optimized fls and __fls for T4 and
 above

For T4 and above, patch fls and __fls functions
at the boot time to use lzcnt instruction.

Signed-off-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/kernel/head_64.S | 2 ++
 arch/sparc/lib/NG4patch.S   | 9 +++++++++
 2 files changed, 11 insertions(+)

diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 4de9fbd1a177..f362ecb9955d 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -640,6 +640,8 @@ niagara4_patch:
 	 nop
 	call	niagara4_patch_pageops
 	 nop
+	call	niagara4_patch_fls
+	 nop
 
 	ba,a,pt	%xcc, 80f
 	 nop
diff --git a/arch/sparc/lib/NG4patch.S b/arch/sparc/lib/NG4patch.S
index 3cc0f8cc95df..da65a3ebb7cc 100644
--- a/arch/sparc/lib/NG4patch.S
+++ b/arch/sparc/lib/NG4patch.S
@@ -3,6 +3,8 @@
  * Copyright (C) 2012 David S. Miller <davem@davemloft.net>
  */
 
+#include <linux/linkage.h>
+
 #define BRANCH_ALWAYS	0x10680000
 #define NOP		0x01000000
 #define NG_DO_PATCH(OLD, NEW)	\
@@ -52,3 +54,10 @@ niagara4_patch_pageops:
 	retl
 	 nop
 	.size	niagara4_patch_pageops,.-niagara4_patch_pageops
+
+ENTRY(niagara4_patch_fls)
+	NG_DO_PATCH(fls, NG4fls)
+	NG_DO_PATCH(__fls, __NG4fls)
+	retl
+	 nop
+ENDPROC(niagara4_patch_fls)

From 68fa10dce0bfe263000ea3fd147ed74f5a4342c2 Mon Sep 17 00:00:00 2001
From: Kees Cook <keescook@chromium.org>
Date: Tue, 10 Oct 2017 15:07:27 -0700
Subject: [PATCH 11/14] sparc/led: Convert timers to use timer_setup()

In preparation for unconditionally passing the struct timer_list pointer to
all timer callbacks, switch to using the new timer_setup() and from_timer()
to pass the timer pointer explicitly. Adds a static variable to hold timeout
value.

Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geliang Tang <geliangtang@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: sparclinux@vger.kernel.org
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/kernel/led.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index e278bf52963b..519f5ba7ed7e 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -31,19 +31,20 @@ static inline void led_toggle(void)
 }
 
 static struct timer_list led_blink_timer;
+static unsigned long led_blink_timer_timeout;
 
-static void led_blink(unsigned long timeout)
+static void led_blink(struct timer_list *unused)
 {
+	unsigned long timeout = led_blink_timer_timeout;
+
 	led_toggle();
 
 	/* reschedule */
 	if (!timeout) { /* blink according to load */
 		led_blink_timer.expires = jiffies +
 			((1 + (avenrun[0] >> FSHIFT)) * HZ);
-		led_blink_timer.data = 0;
 	} else { /* blink at user specified interval */
 		led_blink_timer.expires = jiffies + (timeout * HZ);
-		led_blink_timer.data = timeout;
 	}
 	add_timer(&led_blink_timer);
 }
@@ -88,9 +89,11 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
 	} else if (!strcmp(buf, "toggle")) {
 		led_toggle();
 	} else if ((*buf > '0') && (*buf <= '9')) {
-		led_blink(simple_strtoul(buf, NULL, 10));
+		led_blink_timer_timeout = simple_strtoul(buf, NULL, 10);
+		led_blink(&led_blink_timer);
 	} else if (!strcmp(buf, "load")) {
-		led_blink(0);
+		led_blink_timer_timeout = 0;
+		led_blink(&led_blink_timer);
 	} else {
 		auxio_set_led(AUXIO_LED_OFF);
 	}
@@ -115,8 +118,7 @@ static struct proc_dir_entry *led;
 
 static int __init led_init(void)
 {
-	init_timer(&led_blink_timer);
-	led_blink_timer.function = led_blink;
+	timer_setup(&led_blink_timer, led_blink, 0);
 
 	led = proc_create("led", 0, NULL, &led_proc_fops);
 	if (!led)

From cdf5976fcb7328875a0216a94ae7dafd4ee3abfb Mon Sep 17 00:00:00 2001
From: Elena Reshetova <elena.reshetova@intel.com>
Date: Fri, 20 Oct 2017 10:57:57 +0300
Subject: [PATCH 12/14] sparc64: convert mdesc_handle.refcnt from atomic_t to
 refcount_t

atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable mdesc_handle.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: David Windsor <dwindsor@gmail.com>
Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/kernel/mdesc.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index fa466ce45bc9..821a7247a037 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -12,6 +12,7 @@
 #include <linux/miscdevice.h>
 #include <linux/bootmem.h>
 #include <linux/export.h>
+#include <linux/refcount.h>
 
 #include <asm/cpudata.h>
 #include <asm/hypervisor.h>
@@ -70,7 +71,7 @@ struct mdesc_handle {
 	struct list_head	list;
 	struct mdesc_mem_ops	*mops;
 	void			*self_base;
-	atomic_t		refcnt;
+	refcount_t		refcnt;
 	unsigned int		handle_size;
 	struct mdesc_hdr	mdesc;
 };
@@ -152,7 +153,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
 	memset(hp, 0, handle_size);
 	INIT_LIST_HEAD(&hp->list);
 	hp->self_base = base;
-	atomic_set(&hp->refcnt, 1);
+	refcount_set(&hp->refcnt, 1);
 	hp->handle_size = handle_size;
 }
 
@@ -182,7 +183,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp)
 	unsigned int alloc_size;
 	unsigned long start;
 
-	BUG_ON(atomic_read(&hp->refcnt) != 0);
+	BUG_ON(refcount_read(&hp->refcnt) != 0);
 	BUG_ON(!list_empty(&hp->list));
 
 	alloc_size = PAGE_ALIGN(hp->handle_size);
@@ -220,7 +221,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
 
 static void mdesc_kfree(struct mdesc_handle *hp)
 {
-	BUG_ON(atomic_read(&hp->refcnt) != 0);
+	BUG_ON(refcount_read(&hp->refcnt) != 0);
 	BUG_ON(!list_empty(&hp->list));
 
 	kfree(hp->self_base);
@@ -259,7 +260,7 @@ struct mdesc_handle *mdesc_grab(void)
 	spin_lock_irqsave(&mdesc_lock, flags);
 	hp = cur_mdesc;
 	if (hp)
-		atomic_inc(&hp->refcnt);
+		refcount_inc(&hp->refcnt);
 	spin_unlock_irqrestore(&mdesc_lock, flags);
 
 	return hp;
@@ -271,7 +272,7 @@ void mdesc_release(struct mdesc_handle *hp)
 	unsigned long flags;
 
 	spin_lock_irqsave(&mdesc_lock, flags);
-	if (atomic_dec_and_test(&hp->refcnt)) {
+	if (refcount_dec_and_test(&hp->refcnt)) {
 		list_del_init(&hp->list);
 		hp->mops->free(hp);
 	}
@@ -513,7 +514,7 @@ void mdesc_update(void)
 	if (status != HV_EOK || real_len > len) {
 		printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
 		       status);
-		atomic_dec(&hp->refcnt);
+		refcount_dec(&hp->refcnt);
 		mdesc_free(hp);
 		goto out;
 	}
@@ -526,7 +527,7 @@ void mdesc_update(void)
 	mdesc_notify_clients(orig_hp, hp);
 
 	spin_lock_irqsave(&mdesc_lock, flags);
-	if (atomic_dec_and_test(&orig_hp->refcnt))
+	if (refcount_dec_and_test(&orig_hp->refcnt))
 		mdesc_free(orig_hp);
 	else
 		list_add(&orig_hp->list, &mdesc_zombie_list);

From ff0296877a8d04db27997ec216376d7bbdf75a0b Mon Sep 17 00:00:00 2001
From: Allen Pais <allen.pais@oracle.com>
Date: Tue, 24 Oct 2017 15:08:13 +0530
Subject: [PATCH 13/14] sparc64: Convert timers to user timer_setup()

Switch to using the new timer_setup() and from_timer()
in LDOM Virtual I/O handshake.

Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Allen Pais <allen.pais@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/kernel/viohs.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index dcd278f29573..7cb27c2fb16f 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -797,9 +797,9 @@ void vio_port_up(struct vio_driver_state *vio)
 }
 EXPORT_SYMBOL(vio_port_up);
 
-static void vio_port_timer(unsigned long _arg)
+static void vio_port_timer(struct timer_list *t)
 {
-	struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
+	struct vio_driver_state *vio = from_timer(vio, t, timer);
 
 	vio_port_up(vio);
 }
@@ -848,7 +848,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
 
 	vio->ops = ops;
 
-	setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
+	timer_setup(&vio->timer, vio_port_timer, 0);
 
 	return 0;
 }

From 70f3c8b7c2e7ebcdde8354da004872e7c9184e97 Mon Sep 17 00:00:00 2001
From: Nitin Gupta <nitin.m.gupta@oracle.com>
Date: Fri, 3 Nov 2017 12:26:06 -0700
Subject: [PATCH 14/14] sparc64: Fix page table walk for PUD hugepages

For a PUD hugepage entry, we need to propagate bits [32:22]
from virtual address to resolve at 4M granularity. However,
the current code was incorrectly propagating bits [29:19].
This bug can cause incorrect data to be returned for pages
backed with 16G hugepages.

Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com>
Reported-by: Al Viro <viro@ZenIV.linux.org.uk>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/include/asm/tsb.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index acf55063aa3d..ca0de1646f1e 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -216,7 +216,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
 	sllx		REG2, 32, REG2;			\
 	andcc		REG1, REG2, %g0;		\
 	be,pt		%xcc, 700f;			\
-	 sethi		%hi(0x1ffc0000), REG2;		\
+	 sethi		%hi(0xffe00000), REG2;		\
 	sllx		REG2, 1, REG2;			\
 	brgez,pn	REG1, FAIL_LABEL;		\
 	 andn		REG1, REG2, REG1;		\