Merge tag 'v6.6.25' into v6.6-rt
This is the 6.6.25 stable release
This commit is contained in:
@@ -3269,9 +3269,7 @@
|
||||
|
||||
mem_encrypt= [X86-64] AMD Secure Memory Encryption (SME) control
|
||||
Valid arguments: on, off
|
||||
Default (depends on kernel configuration option):
|
||||
on (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y)
|
||||
off (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n)
|
||||
Default: off
|
||||
mem_encrypt=on: Activate SME
|
||||
mem_encrypt=off: Do not activate SME
|
||||
|
||||
|
||||
@@ -87,14 +87,14 @@ The state of SME in the Linux kernel can be documented as follows:
|
||||
kernel is non-zero).
|
||||
|
||||
SME can also be enabled and activated in the BIOS. If SME is enabled and
|
||||
activated in the BIOS, then all memory accesses will be encrypted and it will
|
||||
not be necessary to activate the Linux memory encryption support. If the BIOS
|
||||
merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
|
||||
memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
|
||||
by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
|
||||
not enable SME, then Linux will not be able to activate memory encryption, even
|
||||
if configured to do so by default or the mem_encrypt=on command line parameter
|
||||
is specified.
|
||||
activated in the BIOS, then all memory accesses will be encrypted and it
|
||||
will not be necessary to activate the Linux memory encryption support.
|
||||
|
||||
If the BIOS merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG),
|
||||
then memory encryption can be enabled by supplying mem_encrypt=on on the
|
||||
kernel command line. However, if BIOS does not enable SME, then Linux
|
||||
will not be able to activate memory encryption, even if configured to do
|
||||
so by default or the mem_encrypt=on command line parameter is specified.
|
||||
|
||||
Secure Nested Paging (SNP)
|
||||
==========================
|
||||
|
||||
@@ -345,9 +345,9 @@ sys.stderr.write("Using %s theme\n" % html_theme)
|
||||
html_static_path = ['sphinx-static']
|
||||
|
||||
# If true, Docutils "smart quotes" will be used to convert quotes and dashes
|
||||
# to typographically correct entities. This will convert "--" to "—",
|
||||
# which is not always what we want, so disable it.
|
||||
smartquotes = False
|
||||
# to typographically correct entities. However, conversion of "--" to "—"
|
||||
# is not always what we want, so enable only quotes.
|
||||
smartquotes_action = 'q'
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
# Note that the RTD theme ignores this
|
||||
|
||||
@@ -375,12 +375,11 @@ Types and flags used to represent the media graph elements
|
||||
are origins of links.
|
||||
|
||||
* - ``MEDIA_PAD_FL_MUST_CONNECT``
|
||||
- If this flag is set and the pad is linked to any other pad, then
|
||||
at least one of those links must be enabled for the entity to be
|
||||
able to stream. There could be temporary reasons (e.g. device
|
||||
configuration dependent) for the pad to need enabled links even
|
||||
when this flag isn't set; the absence of the flag doesn't imply
|
||||
there is none.
|
||||
- If this flag is set, then for this pad to be able to stream, it must
|
||||
be connected by at least one enabled link. There could be temporary
|
||||
reasons (e.g. device configuration dependent) for the pad to need
|
||||
enabled links even when this flag isn't set; the absence of the flag
|
||||
doesn't imply there is none.
|
||||
|
||||
|
||||
One and only one of ``MEDIA_PAD_FL_SINK`` and ``MEDIA_PAD_FL_SOURCE``
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 23
|
||||
SUBLEVEL = 25
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
||||
+2
-2
@@ -591,8 +591,8 @@ source "arch/arm/mm/Kconfig"
|
||||
|
||||
config IWMMXT
|
||||
bool "Enable iWMMXt support"
|
||||
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
|
||||
default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B
|
||||
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
|
||||
default y if PXA27x || PXA3xx || ARCH_MMP
|
||||
help
|
||||
Enable support for iWMMXt context switching at run time if
|
||||
running on a CPU that supports it.
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
&twsi1 {
|
||||
status = "okay";
|
||||
pmic: max8925@3c {
|
||||
compatible = "maxium,max8925";
|
||||
compatible = "maxim,max8925";
|
||||
reg = <0x3c>;
|
||||
interrupts = <1>;
|
||||
interrupt-parent = <&intcmux4>;
|
||||
|
||||
@@ -297,6 +297,7 @@ CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_LCD_CLASS_DEVICE=y
|
||||
CONFIG_LCD_L4F00242T03=y
|
||||
CONFIG_LCD_PLATFORM=y
|
||||
CONFIG_BACKLIGHT_CLASS_DEVICE=y
|
||||
CONFIG_BACKLIGHT_PWM=y
|
||||
CONFIG_BACKLIGHT_GPIO=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_MMAN_H__
|
||||
#define __ASM_MMAN_H__
|
||||
|
||||
#include <asm/system_info.h>
|
||||
#include <uapi/asm/mman.h>
|
||||
|
||||
static inline bool arch_memory_deny_write_exec_supported(void)
|
||||
{
|
||||
return cpu_architecture() >= CPU_ARCH_ARMv6;
|
||||
}
|
||||
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
|
||||
|
||||
#endif /* __ASM_MMAN_H__ */
|
||||
@@ -75,8 +75,6 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
|
||||
obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
|
||||
obj-$(CONFIG_IWMMXT) += iwmmxt.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
|
||||
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
|
||||
|
||||
+13
-38
@@ -18,18 +18,6 @@
|
||||
#include <asm/assembler.h>
|
||||
#include "iwmmxt.h"
|
||||
|
||||
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
|
||||
#define PJ4(code...) code
|
||||
#define XSC(code...)
|
||||
#elif defined(CONFIG_CPU_MOHAWK) || \
|
||||
defined(CONFIG_CPU_XSC3) || \
|
||||
defined(CONFIG_CPU_XSCALE)
|
||||
#define PJ4(code...)
|
||||
#define XSC(code...) code
|
||||
#else
|
||||
#error "Unsupported iWMMXt architecture"
|
||||
#endif
|
||||
|
||||
#define MMX_WR0 (0x00)
|
||||
#define MMX_WR1 (0x08)
|
||||
#define MMX_WR2 (0x10)
|
||||
@@ -81,17 +69,13 @@ ENDPROC(iwmmxt_undef_handler)
|
||||
ENTRY(iwmmxt_task_enable)
|
||||
inc_preempt_count r10, r3
|
||||
|
||||
XSC(mrc p15, 0, r2, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r2, c1, c0, 2)
|
||||
mrc p15, 0, r2, c15, c1, 0
|
||||
@ CP0 and CP1 accessible?
|
||||
XSC(tst r2, #0x3)
|
||||
PJ4(tst r2, #0xf)
|
||||
tst r2, #0x3
|
||||
bne 4f @ if so no business here
|
||||
@ enable access to CP0 and CP1
|
||||
XSC(orr r2, r2, #0x3)
|
||||
XSC(mcr p15, 0, r2, c15, c1, 0)
|
||||
PJ4(orr r2, r2, #0xf)
|
||||
PJ4(mcr p15, 0, r2, c1, c0, 2)
|
||||
orr r2, r2, #0x3
|
||||
mcr p15, 0, r2, c15, c1, 0
|
||||
|
||||
ldr r3, =concan_owner
|
||||
ldr r2, [r0, #S_PC] @ current task pc value
|
||||
@@ -218,12 +202,9 @@ ENTRY(iwmmxt_task_disable)
|
||||
bne 1f @ no: quit
|
||||
|
||||
@ enable access to CP0 and CP1
|
||||
XSC(mrc p15, 0, r4, c15, c1, 0)
|
||||
XSC(orr r4, r4, #0x3)
|
||||
XSC(mcr p15, 0, r4, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r4, c1, c0, 2)
|
||||
PJ4(orr r4, r4, #0xf)
|
||||
PJ4(mcr p15, 0, r4, c1, c0, 2)
|
||||
mrc p15, 0, r4, c15, c1, 0
|
||||
orr r4, r4, #0x3
|
||||
mcr p15, 0, r4, c15, c1, 0
|
||||
|
||||
mov r0, #0 @ nothing to load
|
||||
str r0, [r3] @ no more current owner
|
||||
@@ -232,10 +213,8 @@ ENTRY(iwmmxt_task_disable)
|
||||
bl concan_save
|
||||
|
||||
@ disable access to CP0 and CP1
|
||||
XSC(bic r4, r4, #0x3)
|
||||
XSC(mcr p15, 0, r4, c15, c1, 0)
|
||||
PJ4(bic r4, r4, #0xf)
|
||||
PJ4(mcr p15, 0, r4, c1, c0, 2)
|
||||
bic r4, r4, #0x3
|
||||
mcr p15, 0, r4, c15, c1, 0
|
||||
|
||||
mrc p15, 0, r2, c2, c0, 0
|
||||
mov r2, r2 @ cpwait
|
||||
@@ -330,11 +309,9 @@ ENDPROC(iwmmxt_task_restore)
|
||||
*/
|
||||
ENTRY(iwmmxt_task_switch)
|
||||
|
||||
XSC(mrc p15, 0, r1, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r1, c1, c0, 2)
|
||||
mrc p15, 0, r1, c15, c1, 0
|
||||
@ CP0 and CP1 accessible?
|
||||
XSC(tst r1, #0x3)
|
||||
PJ4(tst r1, #0xf)
|
||||
tst r1, #0x3
|
||||
bne 1f @ yes: block them for next task
|
||||
|
||||
ldr r2, =concan_owner
|
||||
@@ -344,10 +321,8 @@ ENTRY(iwmmxt_task_switch)
|
||||
retne lr @ no: leave Concan disabled
|
||||
|
||||
1: @ flip Concan access
|
||||
XSC(eor r1, r1, #0x3)
|
||||
XSC(mcr p15, 0, r1, c15, c1, 0)
|
||||
PJ4(eor r1, r1, #0xf)
|
||||
PJ4(mcr p15, 0, r1, c1, c0, 2)
|
||||
eor r1, r1, #0x3
|
||||
mcr p15, 0, r1, c15, c1, 0
|
||||
|
||||
mrc p15, 0, r1, c2, c0, 0
|
||||
sub pc, lr, r1, lsr #32 @ cpwait and return
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* linux/arch/arm/kernel/pj4-cp0.c
|
||||
*
|
||||
* PJ4 iWMMXt coprocessor context switching and handling
|
||||
*
|
||||
* Copyright (c) 2010 Marvell International Inc.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/cputype.h>
|
||||
|
||||
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
|
||||
{
|
||||
struct thread_info *thread = t;
|
||||
|
||||
switch (cmd) {
|
||||
case THREAD_NOTIFY_FLUSH:
|
||||
/*
|
||||
* flush_thread() zeroes thread->fpstate, so no need
|
||||
* to do anything here.
|
||||
*
|
||||
* FALLTHROUGH: Ensure we don't try to overwrite our newly
|
||||
* initialised state information on the first fault.
|
||||
*/
|
||||
|
||||
case THREAD_NOTIFY_EXIT:
|
||||
iwmmxt_task_release(thread);
|
||||
break;
|
||||
|
||||
case THREAD_NOTIFY_SWITCH:
|
||||
iwmmxt_task_switch(thread);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
|
||||
.notifier_call = iwmmxt_do,
|
||||
};
|
||||
|
||||
|
||||
static u32 __init pj4_cp_access_read(void)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mrc p15, 0, %0, c1, c0, 2\n\t"
|
||||
: "=r" (value));
|
||||
return value;
|
||||
}
|
||||
|
||||
static void __init pj4_cp_access_write(u32 value)
|
||||
{
|
||||
u32 temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mcr p15, 0, %1, c1, c0, 2\n\t"
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
"isb\n\t"
|
||||
#else
|
||||
"mrc p15, 0, %0, c1, c0, 2\n\t"
|
||||
"mov %0, %0\n\t"
|
||||
"sub pc, pc, #4\n\t"
|
||||
#endif
|
||||
: "=r" (temp) : "r" (value));
|
||||
}
|
||||
|
||||
static int __init pj4_get_iwmmxt_version(void)
|
||||
{
|
||||
u32 cp_access, wcid;
|
||||
|
||||
cp_access = pj4_cp_access_read();
|
||||
pj4_cp_access_write(cp_access | 0xf);
|
||||
|
||||
/* check if coprocessor 0 and 1 are available */
|
||||
if ((pj4_cp_access_read() & 0xf) != 0xf) {
|
||||
pj4_cp_access_write(cp_access);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* read iWMMXt coprocessor id register p1, c0 */
|
||||
__asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
|
||||
|
||||
pj4_cp_access_write(cp_access);
|
||||
|
||||
/* iWMMXt v1 */
|
||||
if ((wcid & 0xffffff00) == 0x56051000)
|
||||
return 1;
|
||||
/* iWMMXt v2 */
|
||||
if ((wcid & 0xffffff00) == 0x56052000)
|
||||
return 2;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
|
||||
* switch code handle iWMMXt context switching.
|
||||
*/
|
||||
static int __init pj4_cp0_init(void)
|
||||
{
|
||||
u32 __maybe_unused cp_access;
|
||||
int vers;
|
||||
|
||||
if (!cpu_is_pj4())
|
||||
return 0;
|
||||
|
||||
vers = pj4_get_iwmmxt_version();
|
||||
if (vers < 0)
|
||||
return 0;
|
||||
|
||||
#ifndef CONFIG_IWMMXT
|
||||
pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
|
||||
#else
|
||||
cp_access = pj4_cp_access_read() & ~0xf;
|
||||
pj4_cp_access_write(cp_access);
|
||||
|
||||
pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
|
||||
elf_hwcap |= HWCAP_IWMMXT;
|
||||
thread_register_notifier(&iwmmxt_notifier_block);
|
||||
register_iwmmxt_undef_handler();
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(pj4_cp0_init);
|
||||
@@ -296,6 +296,9 @@ void __sync_icache_dcache(pte_t pteval)
|
||||
return;
|
||||
|
||||
folio = page_folio(pfn_to_page(pfn));
|
||||
if (folio_test_reserved(folio))
|
||||
return;
|
||||
|
||||
if (cache_is_vipt_aliasing())
|
||||
mapping = folio_flush_mapping(folio);
|
||||
else
|
||||
|
||||
@@ -2098,8 +2098,16 @@
|
||||
ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
|
||||
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
|
||||
|
||||
interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "msi";
|
||||
interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "msi0", "msi1", "msi2", "msi3",
|
||||
"msi4", "msi5", "msi6", "msi7";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
||||
@@ -721,6 +721,8 @@
|
||||
};
|
||||
|
||||
&pcie4 {
|
||||
max-link-speed = <2>;
|
||||
|
||||
perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
|
||||
wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
|
||||
|
||||
|
||||
@@ -743,7 +743,7 @@
|
||||
wcd_tx: codec@0,3 {
|
||||
compatible = "sdw20217010d00";
|
||||
reg = <0 3>;
|
||||
qcom,tx-port-mapping = <1 1 2 3>;
|
||||
qcom,tx-port-mapping = <2 2 3 4>;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -835,7 +835,7 @@
|
||||
wcd_tx: codec@0,3 {
|
||||
compatible = "sdw20217010d00";
|
||||
reg = <0 3>;
|
||||
qcom,tx-port-mapping = <1 1 2 3>;
|
||||
qcom,tx-port-mapping = <2 2 3 4>;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -63,6 +63,7 @@ SECTIONS
|
||||
STABS_DEBUG
|
||||
DWARF_DEBUG
|
||||
ELF_DETAILS
|
||||
.hexagon.attributes 0 : { *(.hexagon.attributes) }
|
||||
|
||||
DISCARDS
|
||||
}
|
||||
|
||||
@@ -44,7 +44,6 @@ static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
|
||||
|
||||
CRC32(crc, value, w);
|
||||
p += sizeof(u32);
|
||||
len -= sizeof(u32);
|
||||
}
|
||||
|
||||
if (len & sizeof(u16)) {
|
||||
@@ -80,7 +79,6 @@ static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
|
||||
|
||||
CRC32C(crc, value, w);
|
||||
p += sizeof(u32);
|
||||
len -= sizeof(u32);
|
||||
}
|
||||
|
||||
if (len & sizeof(u16)) {
|
||||
|
||||
@@ -4,6 +4,7 @@ generic-y += mcs_spinlock.h
|
||||
generic-y += parport.h
|
||||
generic-y += early_ioremap.h
|
||||
generic-y += qrwlock.h
|
||||
generic-y += qspinlock.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += segment.h
|
||||
generic-y += user.h
|
||||
|
||||
@@ -71,6 +71,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
|
||||
#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
|
||||
#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
|
||||
|
||||
#define __io_aw() mmiowb()
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
|
||||
|
||||
@@ -29,7 +29,12 @@ static inline void set_my_cpu_offset(unsigned long off)
|
||||
__my_cpu_offset = off;
|
||||
csr_write64(off, PERCPU_BASE_KS);
|
||||
}
|
||||
#define __my_cpu_offset __my_cpu_offset
|
||||
|
||||
#define __my_cpu_offset \
|
||||
({ \
|
||||
__asm__ __volatile__("":"+r"(__my_cpu_offset)); \
|
||||
__my_cpu_offset; \
|
||||
})
|
||||
|
||||
#define PERCPU_OP(op, asm_op, c_op) \
|
||||
static __always_inline unsigned long __percpu_##op(void *ptr, \
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_QSPINLOCK_H
|
||||
#define _ASM_QSPINLOCK_H
|
||||
|
||||
#include <asm-generic/qspinlock_types.h>
|
||||
|
||||
#define queued_spin_unlock queued_spin_unlock
|
||||
|
||||
static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
compiletime_assert_atomic_type(lock->locked);
|
||||
c_sync();
|
||||
WRITE_ONCE(lock->locked, 0);
|
||||
}
|
||||
|
||||
#include <asm-generic/qspinlock.h>
|
||||
|
||||
#endif /* _ASM_QSPINLOCK_H */
|
||||
@@ -97,26 +97,28 @@
|
||||
* version takes two arguments: a src and destination register.
|
||||
* However, the source and destination registers can not be
|
||||
* the same register.
|
||||
*
|
||||
* We use add,l to avoid clobbering the C/B bits in the PSW.
|
||||
*/
|
||||
|
||||
.macro tophys grvirt, grphys
|
||||
ldil L%(__PAGE_OFFSET), \grphys
|
||||
sub \grvirt, \grphys, \grphys
|
||||
ldil L%(-__PAGE_OFFSET), \grphys
|
||||
addl \grvirt, \grphys, \grphys
|
||||
.endm
|
||||
|
||||
|
||||
.macro tovirt grphys, grvirt
|
||||
ldil L%(__PAGE_OFFSET), \grvirt
|
||||
add \grphys, \grvirt, \grvirt
|
||||
addl \grphys, \grvirt, \grvirt
|
||||
.endm
|
||||
|
||||
.macro tophys_r1 gr
|
||||
ldil L%(__PAGE_OFFSET), %r1
|
||||
sub \gr, %r1, \gr
|
||||
ldil L%(-__PAGE_OFFSET), %r1
|
||||
addl \gr, %r1, \gr
|
||||
.endm
|
||||
|
||||
|
||||
.macro tovirt_r1 gr
|
||||
ldil L%(__PAGE_OFFSET), %r1
|
||||
add \gr, %r1, \gr
|
||||
addl \gr, %r1, \gr
|
||||
.endm
|
||||
|
||||
.macro delay value
|
||||
|
||||
@@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
||||
" addc %0, %5, %0\n"
|
||||
" addc %0, %3, %0\n"
|
||||
"1: ldws,ma 4(%1), %3\n"
|
||||
" addib,< 0, %2, 1b\n"
|
||||
" addib,> -1, %2, 1b\n"
|
||||
" addc %0, %3, %0\n"
|
||||
"\n"
|
||||
" extru %0, 31, 16, %4\n"
|
||||
@@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
||||
** Try to keep 4 registers with "live" values ahead of the ALU.
|
||||
*/
|
||||
|
||||
" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */
|
||||
" ldd,ma 8(%1), %4\n" /* get 1st saddr word */
|
||||
" ldd,ma 8(%2), %5\n" /* get 1st daddr word */
|
||||
" add %4, %0, %0\n"
|
||||
@@ -137,8 +138,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
||||
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
|
||||
" extrd,u %0, 31, 32, %4\n"/* copy upper half down */
|
||||
" depdi 0, 31, 32, %0\n"/* clear upper half */
|
||||
" add %4, %0, %0\n" /* fold into 32-bits */
|
||||
" addc 0, %0, %0\n" /* add carry */
|
||||
" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */
|
||||
" addc 0, %0, %0\n" /* add final carry */
|
||||
|
||||
#else
|
||||
|
||||
@@ -163,7 +164,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
||||
" ldw,ma 4(%2), %7\n" /* 4th daddr */
|
||||
" addc %6, %0, %0\n"
|
||||
" addc %7, %0, %0\n"
|
||||
" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
|
||||
" addc %3, %0, %0\n" /* fold in proto+len */
|
||||
" addc 0, %0, %0\n" /* add carry */
|
||||
|
||||
#endif
|
||||
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_MMAN_H__
|
||||
#define __ASM_MMAN_H__
|
||||
|
||||
#include <uapi/asm/mman.h>
|
||||
|
||||
/* PARISC cannot allow mdwe as it needs writable stacks */
|
||||
static inline bool arch_memory_deny_write_exec_supported(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
|
||||
|
||||
#endif /* __ASM_MMAN_H__ */
|
||||
@@ -169,6 +169,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
|
||||
static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
|
||||
{
|
||||
unsigned long saddr = regs->ior;
|
||||
unsigned long shift, temp1;
|
||||
__u64 val = 0;
|
||||
ASM_EXCEPTIONTABLE_VAR(ret);
|
||||
|
||||
@@ -180,25 +181,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
__asm__ __volatile__ (
|
||||
" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
|
||||
" mtsp %4, %%sr1\n"
|
||||
" depd %%r0,63,3,%3\n"
|
||||
"1: ldd 0(%%sr1,%3),%0\n"
|
||||
"2: ldd 8(%%sr1,%3),%%r20\n"
|
||||
" subi 64,%%r19,%%r19\n"
|
||||
" mtsar %%r19\n"
|
||||
" shrpd %0,%%r20,%%sar,%0\n"
|
||||
" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */
|
||||
" mtsp %5, %%sr1\n"
|
||||
" depd %%r0,63,3,%2\n"
|
||||
"1: ldd 0(%%sr1,%2),%0\n"
|
||||
"2: ldd 8(%%sr1,%2),%4\n"
|
||||
" subi 64,%3,%3\n"
|
||||
" mtsar %3\n"
|
||||
" shrpd %0,%4,%%sar,%0\n"
|
||||
"3: \n"
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
|
||||
: "=r" (val), "+r" (ret)
|
||||
: "0" (val), "r" (saddr), "r" (regs->isr)
|
||||
: "r19", "r20" );
|
||||
: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
|
||||
: "r" (regs->isr) );
|
||||
#else
|
||||
{
|
||||
unsigned long shift, temp1;
|
||||
__asm__ __volatile__ (
|
||||
" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */
|
||||
" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */
|
||||
" mtsp %5, %%sr1\n"
|
||||
" dep %%r0,31,2,%2\n"
|
||||
"1: ldw 0(%%sr1,%2),%0\n"
|
||||
@@ -214,7 +212,6 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
|
||||
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
|
||||
: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
|
||||
: "r" (regs->isr) );
|
||||
}
|
||||
#endif
|
||||
|
||||
DPRINTF("val = 0x%llx\n", val);
|
||||
|
||||
@@ -12,9 +12,16 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
/* Performance Monitor Registers */
|
||||
#define mfpmr(rn) ({unsigned int rval; \
|
||||
asm volatile("mfpmr %0," __stringify(rn) \
|
||||
asm volatile(".machine push; " \
|
||||
".machine e300; " \
|
||||
"mfpmr %0," __stringify(rn) ";" \
|
||||
".machine pop; " \
|
||||
: "=r" (rval)); rval;})
|
||||
#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
|
||||
#define mtpmr(rn, v) asm volatile(".machine push; " \
|
||||
".machine e300; " \
|
||||
"mtpmr " __stringify(rn) ",%0; " \
|
||||
".machine pop; " \
|
||||
: : "r" (v))
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/* Freescale Book E Performance Monitor APU Registers */
|
||||
|
||||
@@ -375,6 +375,18 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
|
||||
if (IS_ENABLED(CONFIG_PPC64))
|
||||
boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
|
||||
|
||||
if (nr_cpu_ids % nthreads != 0) {
|
||||
set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads));
|
||||
pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n",
|
||||
nr_cpu_ids);
|
||||
}
|
||||
|
||||
if (boot_cpuid >= nr_cpu_ids) {
|
||||
set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads)));
|
||||
pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n",
|
||||
boot_cpuid, nr_cpu_ids);
|
||||
}
|
||||
|
||||
/*
|
||||
* PAPR defines "logical" PVR values for cpus that
|
||||
* meet various levels of the architecture:
|
||||
|
||||
@@ -76,7 +76,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
|
||||
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
|
||||
|
||||
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
|
||||
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
|
||||
CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
|
||||
# Enable <altivec.h>
|
||||
CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
|
||||
|
||||
|
||||
@@ -1,256 +1,11 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* parport.h: sparc64 specific parport initialization and dma.
|
||||
*
|
||||
* Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
|
||||
*/
|
||||
#ifndef ___ASM_SPARC_PARPORT_H
|
||||
#define ___ASM_SPARC_PARPORT_H
|
||||
|
||||
#ifndef _ASM_SPARC64_PARPORT_H
|
||||
#define _ASM_SPARC64_PARPORT_H 1
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include <asm/ebus_dma.h>
|
||||
#include <asm/ns87303.h>
|
||||
#include <asm/prom.h>
|
||||
|
||||
#define PARPORT_PC_MAX_PORTS PARPORT_MAX
|
||||
|
||||
/*
|
||||
* While sparc64 doesn't have an ISA DMA API, we provide something that looks
|
||||
* close enough to make parport_pc happy
|
||||
*/
|
||||
#define HAS_DMA
|
||||
|
||||
#ifdef CONFIG_PARPORT_PC_FIFO
|
||||
static DEFINE_SPINLOCK(dma_spin_lock);
|
||||
|
||||
#define claim_dma_lock() \
|
||||
({ unsigned long flags; \
|
||||
spin_lock_irqsave(&dma_spin_lock, flags); \
|
||||
flags; \
|
||||
})
|
||||
|
||||
#define release_dma_lock(__flags) \
|
||||
spin_unlock_irqrestore(&dma_spin_lock, __flags);
|
||||
#if defined(__sparc__) && defined(__arch64__)
|
||||
#include <asm/parport_64.h>
|
||||
#else
|
||||
#include <asm-generic/parport.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static struct sparc_ebus_info {
|
||||
struct ebus_dma_info info;
|
||||
unsigned int addr;
|
||||
unsigned int count;
|
||||
int lock;
|
||||
|
||||
struct parport *port;
|
||||
} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
|
||||
|
||||
static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
|
||||
|
||||
static inline int request_dma(unsigned int dmanr, const char *device_id)
|
||||
{
|
||||
if (dmanr >= PARPORT_PC_MAX_PORTS)
|
||||
return -EINVAL;
|
||||
if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void free_dma(unsigned int dmanr)
|
||||
{
|
||||
if (dmanr >= PARPORT_PC_MAX_PORTS) {
|
||||
printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
|
||||
return;
|
||||
}
|
||||
if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
|
||||
printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void enable_dma(unsigned int dmanr)
|
||||
{
|
||||
ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
|
||||
|
||||
if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
|
||||
sparc_ebus_dmas[dmanr].addr,
|
||||
sparc_ebus_dmas[dmanr].count))
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void disable_dma(unsigned int dmanr)
|
||||
{
|
||||
ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
|
||||
}
|
||||
|
||||
static inline void clear_dma_ff(unsigned int dmanr)
|
||||
{
|
||||
/* nothing */
|
||||
}
|
||||
|
||||
static inline void set_dma_mode(unsigned int dmanr, char mode)
|
||||
{
|
||||
ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
|
||||
}
|
||||
|
||||
static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
|
||||
{
|
||||
sparc_ebus_dmas[dmanr].addr = addr;
|
||||
}
|
||||
|
||||
static inline void set_dma_count(unsigned int dmanr, unsigned int count)
|
||||
{
|
||||
sparc_ebus_dmas[dmanr].count = count;
|
||||
}
|
||||
|
||||
static inline unsigned int get_dma_residue(unsigned int dmanr)
|
||||
{
|
||||
return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
|
||||
}
|
||||
|
||||
static int ecpp_probe(struct platform_device *op)
|
||||
{
|
||||
unsigned long base = op->resource[0].start;
|
||||
unsigned long config = op->resource[1].start;
|
||||
unsigned long d_base = op->resource[2].start;
|
||||
unsigned long d_len;
|
||||
struct device_node *parent;
|
||||
struct parport *p;
|
||||
int slot, err;
|
||||
|
||||
parent = op->dev.of_node->parent;
|
||||
if (of_node_name_eq(parent, "dma")) {
|
||||
p = parport_pc_probe_port(base, base + 0x400,
|
||||
op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
|
||||
op->dev.parent->parent, 0);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
dev_set_drvdata(&op->dev, p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
|
||||
if (!test_and_set_bit(slot, dma_slot_map))
|
||||
break;
|
||||
}
|
||||
err = -ENODEV;
|
||||
if (slot >= PARPORT_PC_MAX_PORTS)
|
||||
goto out_err;
|
||||
|
||||
spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
|
||||
|
||||
d_len = (op->resource[2].end - d_base) + 1UL;
|
||||
sparc_ebus_dmas[slot].info.regs =
|
||||
of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
|
||||
|
||||
if (!sparc_ebus_dmas[slot].info.regs)
|
||||
goto out_clear_map;
|
||||
|
||||
sparc_ebus_dmas[slot].info.flags = 0;
|
||||
sparc_ebus_dmas[slot].info.callback = NULL;
|
||||
sparc_ebus_dmas[slot].info.client_cookie = NULL;
|
||||
sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
|
||||
strcpy(sparc_ebus_dmas[slot].info.name, "parport");
|
||||
if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
|
||||
goto out_unmap_regs;
|
||||
|
||||
ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
|
||||
|
||||
/* Configure IRQ to Push Pull, Level Low */
|
||||
/* Enable ECP, set bit 2 of the CTR first */
|
||||
outb(0x04, base + 0x02);
|
||||
ns87303_modify(config, PCR,
|
||||
PCR_EPP_ENABLE |
|
||||
PCR_IRQ_ODRAIN,
|
||||
PCR_ECP_ENABLE |
|
||||
PCR_ECP_CLK_ENA |
|
||||
PCR_IRQ_POLAR);
|
||||
|
||||
/* CTR bit 5 controls direction of port */
|
||||
ns87303_modify(config, PTR,
|
||||
0, PTR_LPT_REG_DIR);
|
||||
|
||||
p = parport_pc_probe_port(base, base + 0x400,
|
||||
op->archdata.irqs[0],
|
||||
slot,
|
||||
op->dev.parent,
|
||||
0);
|
||||
err = -ENOMEM;
|
||||
if (!p)
|
||||
goto out_disable_irq;
|
||||
|
||||
dev_set_drvdata(&op->dev, p);
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_irq:
|
||||
ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
|
||||
ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
|
||||
|
||||
out_unmap_regs:
|
||||
of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
|
||||
|
||||
out_clear_map:
|
||||
clear_bit(slot, dma_slot_map);
|
||||
|
||||
out_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecpp_remove(struct platform_device *op)
|
||||
{
|
||||
struct parport *p = dev_get_drvdata(&op->dev);
|
||||
int slot = p->dma;
|
||||
|
||||
parport_pc_unregister_port(p);
|
||||
|
||||
if (slot != PARPORT_DMA_NOFIFO) {
|
||||
unsigned long d_base = op->resource[2].start;
|
||||
unsigned long d_len;
|
||||
|
||||
d_len = (op->resource[2].end - d_base) + 1UL;
|
||||
|
||||
ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
|
||||
ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
|
||||
of_iounmap(&op->resource[2],
|
||||
sparc_ebus_dmas[slot].info.regs,
|
||||
d_len);
|
||||
clear_bit(slot, dma_slot_map);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ecpp_match[] = {
|
||||
{
|
||||
.name = "ecpp",
|
||||
},
|
||||
{
|
||||
.name = "parallel",
|
||||
.compatible = "ecpp",
|
||||
},
|
||||
{
|
||||
.name = "parallel",
|
||||
.compatible = "ns87317-ecpp",
|
||||
},
|
||||
{
|
||||
.name = "parallel",
|
||||
.compatible = "pnpALI,1533,3",
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver ecpp_driver = {
|
||||
.driver = {
|
||||
.name = "ecpp",
|
||||
.of_match_table = ecpp_match,
|
||||
},
|
||||
.probe = ecpp_probe,
|
||||
.remove = ecpp_remove,
|
||||
};
|
||||
|
||||
static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
|
||||
{
|
||||
return platform_driver_register(&ecpp_driver);
|
||||
}
|
||||
|
||||
#endif /* !(_ASM_SPARC64_PARPORT_H */
|
||||
|
||||
@@ -0,0 +1,256 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* parport.h: sparc64 specific parport initialization and dma.
|
||||
*
|
||||
* Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
|
||||
*/
|
||||
|
||||
#ifndef _ASM_SPARC64_PARPORT_H
|
||||
#define _ASM_SPARC64_PARPORT_H 1
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include <asm/ebus_dma.h>
|
||||
#include <asm/ns87303.h>
|
||||
#include <asm/prom.h>
|
||||
|
||||
#define PARPORT_PC_MAX_PORTS PARPORT_MAX
|
||||
|
||||
/*
|
||||
* While sparc64 doesn't have an ISA DMA API, we provide something that looks
|
||||
* close enough to make parport_pc happy
|
||||
*/
|
||||
#define HAS_DMA
|
||||
|
||||
#ifdef CONFIG_PARPORT_PC_FIFO
|
||||
static DEFINE_SPINLOCK(dma_spin_lock);
|
||||
|
||||
#define claim_dma_lock() \
|
||||
({ unsigned long flags; \
|
||||
spin_lock_irqsave(&dma_spin_lock, flags); \
|
||||
flags; \
|
||||
})
|
||||
|
||||
#define release_dma_lock(__flags) \
|
||||
spin_unlock_irqrestore(&dma_spin_lock, __flags);
|
||||
#endif
|
||||
|
||||
static struct sparc_ebus_info {
|
||||
struct ebus_dma_info info;
|
||||
unsigned int addr;
|
||||
unsigned int count;
|
||||
int lock;
|
||||
|
||||
struct parport *port;
|
||||
} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
|
||||
|
||||
static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
|
||||
|
||||
static inline int request_dma(unsigned int dmanr, const char *device_id)
|
||||
{
|
||||
if (dmanr >= PARPORT_PC_MAX_PORTS)
|
||||
return -EINVAL;
|
||||
if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void free_dma(unsigned int dmanr)
|
||||
{
|
||||
if (dmanr >= PARPORT_PC_MAX_PORTS) {
|
||||
printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
|
||||
return;
|
||||
}
|
||||
if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
|
||||
printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void enable_dma(unsigned int dmanr)
|
||||
{
|
||||
ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
|
||||
|
||||
if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
|
||||
sparc_ebus_dmas[dmanr].addr,
|
||||
sparc_ebus_dmas[dmanr].count))
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void disable_dma(unsigned int dmanr)
|
||||
{
|
||||
ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
|
||||
}
|
||||
|
||||
static inline void clear_dma_ff(unsigned int dmanr)
|
||||
{
|
||||
/* nothing */
|
||||
}
|
||||
|
||||
static inline void set_dma_mode(unsigned int dmanr, char mode)
|
||||
{
|
||||
ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
|
||||
}
|
||||
|
||||
static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
|
||||
{
|
||||
sparc_ebus_dmas[dmanr].addr = addr;
|
||||
}
|
||||
|
||||
static inline void set_dma_count(unsigned int dmanr, unsigned int count)
|
||||
{
|
||||
sparc_ebus_dmas[dmanr].count = count;
|
||||
}
|
||||
|
||||
static inline unsigned int get_dma_residue(unsigned int dmanr)
|
||||
{
|
||||
return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
|
||||
}
|
||||
|
||||
static int ecpp_probe(struct platform_device *op)
|
||||
{
|
||||
unsigned long base = op->resource[0].start;
|
||||
unsigned long config = op->resource[1].start;
|
||||
unsigned long d_base = op->resource[2].start;
|
||||
unsigned long d_len;
|
||||
struct device_node *parent;
|
||||
struct parport *p;
|
||||
int slot, err;
|
||||
|
||||
parent = op->dev.of_node->parent;
|
||||
if (of_node_name_eq(parent, "dma")) {
|
||||
p = parport_pc_probe_port(base, base + 0x400,
|
||||
op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
|
||||
op->dev.parent->parent, 0);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
dev_set_drvdata(&op->dev, p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
|
||||
if (!test_and_set_bit(slot, dma_slot_map))
|
||||
break;
|
||||
}
|
||||
err = -ENODEV;
|
||||
if (slot >= PARPORT_PC_MAX_PORTS)
|
||||
goto out_err;
|
||||
|
||||
spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
|
||||
|
||||
d_len = (op->resource[2].end - d_base) + 1UL;
|
||||
sparc_ebus_dmas[slot].info.regs =
|
||||
of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
|
||||
|
||||
if (!sparc_ebus_dmas[slot].info.regs)
|
||||
goto out_clear_map;
|
||||
|
||||
sparc_ebus_dmas[slot].info.flags = 0;
|
||||
sparc_ebus_dmas[slot].info.callback = NULL;
|
||||
sparc_ebus_dmas[slot].info.client_cookie = NULL;
|
||||
sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
|
||||
strcpy(sparc_ebus_dmas[slot].info.name, "parport");
|
||||
if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
|
||||
goto out_unmap_regs;
|
||||
|
||||
ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
|
||||
|
||||
/* Configure IRQ to Push Pull, Level Low */
|
||||
/* Enable ECP, set bit 2 of the CTR first */
|
||||
outb(0x04, base + 0x02);
|
||||
ns87303_modify(config, PCR,
|
||||
PCR_EPP_ENABLE |
|
||||
PCR_IRQ_ODRAIN,
|
||||
PCR_ECP_ENABLE |
|
||||
PCR_ECP_CLK_ENA |
|
||||
PCR_IRQ_POLAR);
|
||||
|
||||
/* CTR bit 5 controls direction of port */
|
||||
ns87303_modify(config, PTR,
|
||||
0, PTR_LPT_REG_DIR);
|
||||
|
||||
p = parport_pc_probe_port(base, base + 0x400,
|
||||
op->archdata.irqs[0],
|
||||
slot,
|
||||
op->dev.parent,
|
||||
0);
|
||||
err = -ENOMEM;
|
||||
if (!p)
|
||||
goto out_disable_irq;
|
||||
|
||||
dev_set_drvdata(&op->dev, p);
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_irq:
|
||||
ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
|
||||
ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
|
||||
|
||||
out_unmap_regs:
|
||||
of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
|
||||
|
||||
out_clear_map:
|
||||
clear_bit(slot, dma_slot_map);
|
||||
|
||||
out_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecpp_remove(struct platform_device *op)
|
||||
{
|
||||
struct parport *p = dev_get_drvdata(&op->dev);
|
||||
int slot = p->dma;
|
||||
|
||||
parport_pc_unregister_port(p);
|
||||
|
||||
if (slot != PARPORT_DMA_NOFIFO) {
|
||||
unsigned long d_base = op->resource[2].start;
|
||||
unsigned long d_len;
|
||||
|
||||
d_len = (op->resource[2].end - d_base) + 1UL;
|
||||
|
||||
ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
|
||||
ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
|
||||
of_iounmap(&op->resource[2],
|
||||
sparc_ebus_dmas[slot].info.regs,
|
||||
d_len);
|
||||
clear_bit(slot, dma_slot_map);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ecpp_match[] = {
|
||||
{
|
||||
.name = "ecpp",
|
||||
},
|
||||
{
|
||||
.name = "parallel",
|
||||
.compatible = "ecpp",
|
||||
},
|
||||
{
|
||||
.name = "parallel",
|
||||
.compatible = "ns87317-ecpp",
|
||||
},
|
||||
{
|
||||
.name = "parallel",
|
||||
.compatible = "pnpALI,1533,3",
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver ecpp_driver = {
|
||||
.driver = {
|
||||
.name = "ecpp",
|
||||
.of_match_table = ecpp_match,
|
||||
},
|
||||
.probe = ecpp_probe,
|
||||
.remove = ecpp_remove,
|
||||
};
|
||||
|
||||
static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
|
||||
{
|
||||
return platform_driver_register(&ecpp_driver);
|
||||
}
|
||||
|
||||
#endif /* !(_ASM_SPARC64_PARPORT_H */
|
||||
@@ -279,7 +279,7 @@ static int __init setup_nmi_watchdog(char *str)
|
||||
if (!strncmp(str, "panic", 5))
|
||||
panic_on_timeout = 1;
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("nmi_watchdog=", setup_nmi_watchdog);
|
||||
|
||||
|
||||
@@ -449,9 +449,8 @@ static __init int vdso_setup(char *s)
|
||||
unsigned long val;
|
||||
|
||||
err = kstrtoul(s, 10, &val);
|
||||
if (err)
|
||||
return err;
|
||||
vdso_enabled = val;
|
||||
return 0;
|
||||
if (!err)
|
||||
vdso_enabled = val;
|
||||
return 1;
|
||||
}
|
||||
__setup("vdso=", vdso_setup);
|
||||
|
||||
@@ -1516,19 +1516,6 @@ config AMD_MEM_ENCRYPT
|
||||
This requires an AMD processor that supports Secure Memory
|
||||
Encryption (SME).
|
||||
|
||||
config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
|
||||
bool "Activate AMD Secure Memory Encryption (SME) by default"
|
||||
depends on AMD_MEM_ENCRYPT
|
||||
help
|
||||
Say yes to have system memory encrypted by default if running on
|
||||
an AMD processor that supports Secure Memory Encryption (SME).
|
||||
|
||||
If set to Y, then the encryption of system memory can be
|
||||
deactivated with the mem_encrypt=off command line option.
|
||||
|
||||
If set to N, then the encryption of system memory can be
|
||||
activated with the mem_encrypt=on command line option.
|
||||
|
||||
# Common NUMA Features
|
||||
config NUMA
|
||||
bool "NUMA Memory Allocation and Scheduler Support"
|
||||
|
||||
@@ -15,10 +15,12 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
.code64
|
||||
.text
|
||||
@@ -49,6 +51,11 @@ SYM_FUNC_START(startup_64_mixed_mode)
|
||||
lea efi32_boot_args(%rip), %rdx
|
||||
mov 0(%rdx), %edi
|
||||
mov 4(%rdx), %esi
|
||||
|
||||
/* Switch to the firmware's stack */
|
||||
movl efi32_boot_sp(%rip), %esp
|
||||
andl $~7, %esp
|
||||
|
||||
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
|
||||
mov 8(%rdx), %edx // saved bootparams pointer
|
||||
test %edx, %edx
|
||||
@@ -144,6 +151,7 @@ SYM_FUNC_END(__efi64_thunk)
|
||||
SYM_FUNC_START(efi32_stub_entry)
|
||||
call 1f
|
||||
1: popl %ecx
|
||||
leal (efi32_boot_args - 1b)(%ecx), %ebx
|
||||
|
||||
/* Clear BSS */
|
||||
xorl %eax, %eax
|
||||
@@ -158,6 +166,7 @@ SYM_FUNC_START(efi32_stub_entry)
|
||||
popl %ecx
|
||||
popl %edx
|
||||
popl %esi
|
||||
movl %esi, 8(%ebx)
|
||||
jmp efi32_entry
|
||||
SYM_FUNC_END(efi32_stub_entry)
|
||||
#endif
|
||||
@@ -234,8 +243,6 @@ SYM_FUNC_END(efi_enter32)
|
||||
*
|
||||
* Arguments: %ecx image handle
|
||||
* %edx EFI system table pointer
|
||||
* %esi struct bootparams pointer (or NULL when not using
|
||||
* the EFI handover protocol)
|
||||
*
|
||||
* Since this is the point of no return for ordinary execution, no registers
|
||||
* are considered live except for the function parameters. [Note that the EFI
|
||||
@@ -254,13 +261,25 @@ SYM_FUNC_START_LOCAL(efi32_entry)
|
||||
/* Store firmware IDT descriptor */
|
||||
sidtl (efi32_boot_idt - 1b)(%ebx)
|
||||
|
||||
/* Store firmware stack pointer */
|
||||
movl %esp, (efi32_boot_sp - 1b)(%ebx)
|
||||
|
||||
/* Store boot arguments */
|
||||
leal (efi32_boot_args - 1b)(%ebx), %ebx
|
||||
movl %ecx, 0(%ebx)
|
||||
movl %edx, 4(%ebx)
|
||||
movl %esi, 8(%ebx)
|
||||
movb $0x0, 12(%ebx) // efi_is64
|
||||
|
||||
/*
|
||||
* Allocate some memory for a temporary struct boot_params, which only
|
||||
* needs the minimal pieces that startup_32() relies on.
|
||||
*/
|
||||
subl $PARAM_SIZE, %esp
|
||||
movl %esp, %esi
|
||||
movl $PAGE_SIZE, BP_kernel_alignment(%esi)
|
||||
movl $_end - 1b, BP_init_size(%esi)
|
||||
subl $startup_32 - 1b, BP_init_size(%esi)
|
||||
|
||||
/* Disable paging */
|
||||
movl %cr0, %eax
|
||||
btrl $X86_CR0_PG_BIT, %eax
|
||||
@@ -286,8 +305,7 @@ SYM_FUNC_START(efi32_pe_entry)
|
||||
|
||||
movl 8(%ebp), %ecx // image_handle
|
||||
movl 12(%ebp), %edx // sys_table
|
||||
xorl %esi, %esi
|
||||
jmp efi32_entry // pass %ecx, %edx, %esi
|
||||
jmp efi32_entry // pass %ecx, %edx
|
||||
// no other registers remain live
|
||||
|
||||
2: popl %edi // restore callee-save registers
|
||||
@@ -318,5 +336,6 @@ SYM_DATA_END(efi32_boot_idt)
|
||||
|
||||
SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
|
||||
SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
|
||||
SYM_DATA_LOCAL(efi32_boot_sp, .long 0)
|
||||
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
|
||||
SYM_DATA(efi_is64, .byte 1)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <asm/processor.h>
|
||||
|
||||
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
|
||||
static u64 cc_mask __ro_after_init;
|
||||
u64 cc_mask __ro_after_init;
|
||||
|
||||
static bool noinstr intel_cc_platform_has(enum cc_attr attr)
|
||||
{
|
||||
@@ -148,8 +148,3 @@ u64 cc_mkdec(u64 val)
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cc_mkdec);
|
||||
|
||||
__init void cc_set_mask(u64 mask)
|
||||
{
|
||||
cc_mask = mask;
|
||||
}
|
||||
|
||||
@@ -113,6 +113,20 @@
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#ifndef __pic__
|
||||
static __always_inline __pure void *rip_rel_ptr(void *p)
|
||||
{
|
||||
asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
|
||||
|
||||
return p;
|
||||
}
|
||||
#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
|
||||
#else
|
||||
#define RIP_REL_REF(var) (var)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros to generate condition code outputs from inline assembly,
|
||||
* The output operand must be type "bool".
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#ifndef _ASM_X86_COCO_H
|
||||
#define _ASM_X86_COCO_H
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
enum cc_vendor {
|
||||
@@ -11,9 +12,14 @@ enum cc_vendor {
|
||||
};
|
||||
|
||||
extern enum cc_vendor cc_vendor;
|
||||
extern u64 cc_mask;
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
|
||||
void cc_set_mask(u64 mask);
|
||||
static inline void cc_set_mask(u64 mask)
|
||||
{
|
||||
RIP_REL_REF(cc_mask) = mask;
|
||||
}
|
||||
|
||||
u64 cc_mkenc(u64 val);
|
||||
u64 cc_mkdec(u64 val);
|
||||
#else
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/asm.h>
|
||||
struct boot_params;
|
||||
|
||||
#ifdef CONFIG_X86_MEM_ENCRYPT
|
||||
void __init mem_encrypt_init(void);
|
||||
@@ -57,6 +58,11 @@ void __init mem_encrypt_free_decrypted_mem(void);
|
||||
|
||||
void __init sev_es_init_vc_handling(void);
|
||||
|
||||
static inline u64 sme_get_me_mask(void)
|
||||
{
|
||||
return RIP_REL_REF(sme_me_mask);
|
||||
}
|
||||
|
||||
#define __bss_decrypted __section(".bss..decrypted")
|
||||
|
||||
#else /* !CONFIG_AMD_MEM_ENCRYPT */
|
||||
@@ -89,6 +95,8 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en
|
||||
|
||||
static inline void mem_encrypt_free_decrypted_mem(void) { }
|
||||
|
||||
static inline u64 sme_get_me_mask(void) { return 0; }
|
||||
|
||||
#define __bss_decrypted
|
||||
|
||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||
@@ -106,11 +114,6 @@ void add_encrypt_protection_map(void);
|
||||
|
||||
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
|
||||
|
||||
static inline u64 sme_get_me_mask(void)
|
||||
{
|
||||
return sme_me_mask;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __X86_MEM_ENCRYPT_H__ */
|
||||
|
||||
@@ -203,12 +203,12 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
|
||||
unsigned long npages);
|
||||
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages);
|
||||
void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
|
||||
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
|
||||
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
|
||||
void snp_set_wakeup_secondary_cpu(void);
|
||||
bool snp_init(struct boot_params *bp);
|
||||
void __init __noreturn snp_abort(void);
|
||||
void snp_dmi_setup(void);
|
||||
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||
u64 snp_get_unsupported_features(u64 status);
|
||||
@@ -227,12 +227,12 @@ static inline void __init
|
||||
early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
|
||||
static inline void __init
|
||||
early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
|
||||
static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
|
||||
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
|
||||
static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
|
||||
static inline void snp_set_wakeup_secondary_cpu(void) { }
|
||||
static inline bool snp_init(struct boot_params *bp) { return false; }
|
||||
static inline void snp_abort(void) { }
|
||||
static inline void snp_dmi_setup(void) { }
|
||||
static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
|
||||
{
|
||||
return -ENOTTY;
|
||||
|
||||
@@ -12,11 +12,6 @@
|
||||
|
||||
/* image of the saved processor state */
|
||||
struct saved_context {
|
||||
/*
|
||||
* On x86_32, all segment registers except gs are saved at kernel
|
||||
* entry in pt_regs.
|
||||
*/
|
||||
u16 gs;
|
||||
unsigned long cr0, cr2, cr3, cr4;
|
||||
u64 misc_enable;
|
||||
struct saved_msrs saved_msrs;
|
||||
@@ -27,6 +22,11 @@ struct saved_context {
|
||||
unsigned long tr;
|
||||
unsigned long safety;
|
||||
unsigned long return_address;
|
||||
/*
|
||||
* On x86_32, all segment registers except gs are saved at kernel
|
||||
* entry in pt_regs.
|
||||
*/
|
||||
u16 gs;
|
||||
bool misc_enable_saved;
|
||||
} __attribute__((packed));
|
||||
|
||||
|
||||
@@ -30,12 +30,13 @@ struct x86_init_mpparse {
|
||||
* @reserve_resources: reserve the standard resources for the
|
||||
* platform
|
||||
* @memory_setup: platform specific memory setup
|
||||
*
|
||||
* @dmi_setup: platform specific DMI setup
|
||||
*/
|
||||
struct x86_init_resources {
|
||||
void (*probe_roms)(void);
|
||||
void (*reserve_resources)(void);
|
||||
char *(*memory_setup)(void);
|
||||
void (*dmi_setup)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -1006,11 +1006,11 @@ static bool cpu_has_zenbleed_microcode(void)
|
||||
u32 good_rev = 0;
|
||||
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
|
||||
case 0x60 ... 0x67: good_rev = 0x0860010b; break;
|
||||
case 0x68 ... 0x6f: good_rev = 0x08608105; break;
|
||||
case 0x70 ... 0x7f: good_rev = 0x08701032; break;
|
||||
case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
|
||||
case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
|
||||
case 0x60 ... 0x67: good_rev = 0x0860010c; break;
|
||||
case 0x68 ... 0x6f: good_rev = 0x08608107; break;
|
||||
case 0x70 ... 0x7f: good_rev = 0x08701033; break;
|
||||
case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
|
||||
|
||||
default:
|
||||
return false;
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
/*
|
||||
* EISA specific code
|
||||
*/
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/eisa.h>
|
||||
#include <linux/io.h>
|
||||
@@ -12,7 +13,7 @@ static __init int eisa_bus_probe(void)
|
||||
{
|
||||
void __iomem *p;
|
||||
|
||||
if (xen_pv_domain() && !xen_initial_domain())
|
||||
if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
||||
return 0;
|
||||
|
||||
p = ioremap(0x0FFFD9, 4);
|
||||
|
||||
@@ -178,10 +178,11 @@ void fpu__init_cpu_xstate(void)
|
||||
* Must happen after CR4 setup and before xsetbv() to allow KVM
|
||||
* lazy passthrough. Write independent of the dynamic state static
|
||||
* key as that does not work on the boot CPU. This also ensures
|
||||
* that any stale state is wiped out from XFD.
|
||||
* that any stale state is wiped out from XFD. Reset the per CPU
|
||||
* xfd cache too.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_XFD))
|
||||
wrmsrl(MSR_IA32_XFD, init_fpstate.xfd);
|
||||
xfd_set_state(init_fpstate.xfd);
|
||||
|
||||
/*
|
||||
* XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
|
||||
|
||||
@@ -148,20 +148,26 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline void xfd_set_state(u64 xfd)
|
||||
{
|
||||
wrmsrl(MSR_IA32_XFD, xfd);
|
||||
__this_cpu_write(xfd_state, xfd);
|
||||
}
|
||||
|
||||
static inline void xfd_update_state(struct fpstate *fpstate)
|
||||
{
|
||||
if (fpu_state_size_dynamic()) {
|
||||
u64 xfd = fpstate->xfd;
|
||||
|
||||
if (__this_cpu_read(xfd_state) != xfd) {
|
||||
wrmsrl(MSR_IA32_XFD, xfd);
|
||||
__this_cpu_write(xfd_state, xfd);
|
||||
}
|
||||
if (__this_cpu_read(xfd_state) != xfd)
|
||||
xfd_set_state(xfd);
|
||||
}
|
||||
}
|
||||
|
||||
extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
|
||||
#else
|
||||
static inline void xfd_set_state(u64 xfd) { }
|
||||
|
||||
static inline void xfd_update_state(struct fpstate *fpstate) { }
|
||||
|
||||
static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
|
||||
|
||||
@@ -335,7 +335,16 @@ out:
|
||||
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
|
||||
bool *on_func_entry)
|
||||
{
|
||||
if (is_endbr(*(u32 *)addr)) {
|
||||
u32 insn;
|
||||
|
||||
/*
|
||||
* Since 'addr' is not guaranteed to be safe to access, use
|
||||
* copy_from_kernel_nofault() to read the instruction:
|
||||
*/
|
||||
if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
|
||||
return NULL;
|
||||
|
||||
if (is_endbr(insn)) {
|
||||
*on_func_entry = !offset || offset == 4;
|
||||
if (*on_func_entry)
|
||||
offset = 4;
|
||||
|
||||
@@ -196,12 +196,12 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
|
||||
if (!smp_check_mpc(mpc, oem, str))
|
||||
return 0;
|
||||
|
||||
/* Initialize the lapic mapping */
|
||||
if (!acpi_lapic)
|
||||
register_lapic_address(mpc->lapic);
|
||||
|
||||
if (early)
|
||||
if (early) {
|
||||
/* Initialize the lapic mapping */
|
||||
if (!acpi_lapic)
|
||||
register_lapic_address(mpc->lapic);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Now process the configuration blocks. */
|
||||
while (count < mpc->length) {
|
||||
|
||||
@@ -629,7 +629,7 @@ void nmi_backtrace_stall_check(const struct cpumask *btp)
|
||||
msgp = nmi_check_stall_msg[idx];
|
||||
if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1))
|
||||
modp = ", but OK because ignore_nmis was set";
|
||||
if (nmi_seq & ~0x1)
|
||||
if (nmi_seq & 0x1)
|
||||
msghp = " (CPU currently in NMI handler function)";
|
||||
else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
|
||||
msghp = " (CPU exited one NMI handler function)";
|
||||
|
||||
@@ -203,16 +203,6 @@ void __init probe_roms(void)
|
||||
unsigned char c;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* The ROM memory range is not part of the e820 table and is therefore not
|
||||
* pre-validated by BIOS. The kernel page table maps the ROM region as encrypted
|
||||
* memory, and SNP requires encrypted memory to be validated before access.
|
||||
* Do that here.
|
||||
*/
|
||||
snp_prep_memory(video_rom_resource.start,
|
||||
((system_rom_resource.end + 1) - video_rom_resource.start),
|
||||
SNP_PAGE_STATE_PRIVATE);
|
||||
|
||||
/* video rom */
|
||||
upper = adapter_rom_resources[0].start;
|
||||
for (start = video_rom_resource.start; start < upper; start += 2048) {
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <linux/console.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/ima.h>
|
||||
#include <linux/init_ohci1394_dma.h>
|
||||
@@ -1029,7 +1028,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
efi_init();
|
||||
|
||||
reserve_ibft_region();
|
||||
dmi_setup();
|
||||
x86_init.resources.dmi_setup();
|
||||
|
||||
/*
|
||||
* VMware detection requires dmi to be available, so this
|
||||
|
||||
@@ -556,9 +556,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
|
||||
leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
|
||||
|
||||
/* Skip post-processing for out-of-range zero leafs. */
|
||||
if (!(leaf->fn <= cpuid_std_range_max ||
|
||||
(leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
|
||||
(leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
|
||||
if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
|
||||
(leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
|
||||
(leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1063,11 +1063,11 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
|
||||
const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
|
||||
|
||||
if (fn->eax_in == 0x0)
|
||||
cpuid_std_range_max = fn->eax;
|
||||
RIP_REL_REF(cpuid_std_range_max) = fn->eax;
|
||||
else if (fn->eax_in == 0x40000000)
|
||||
cpuid_hyp_range_max = fn->eax;
|
||||
RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
|
||||
else if (fn->eax_in == 0x80000000)
|
||||
cpuid_ext_range_max = fn->eax;
|
||||
RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+14
-17
@@ -23,6 +23,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/psp-sev.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <uapi/linux/sev-guest.h>
|
||||
|
||||
#include <asm/cpu_entry_area.h>
|
||||
@@ -748,7 +749,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
|
||||
* This eliminates worries about jump tables or checking boot_cpu_data
|
||||
* in the cc_platform_has() function.
|
||||
*/
|
||||
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -767,28 +768,13 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
|
||||
* This eliminates worries about jump tables or checking boot_cpu_data
|
||||
* in the cc_platform_has() function.
|
||||
*/
|
||||
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
return;
|
||||
|
||||
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
|
||||
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
|
||||
}
|
||||
|
||||
void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
|
||||
{
|
||||
unsigned long vaddr, npages;
|
||||
|
||||
vaddr = (unsigned long)__va(paddr);
|
||||
npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
|
||||
|
||||
if (op == SNP_PAGE_STATE_PRIVATE)
|
||||
early_snp_set_memory_private(vaddr, paddr, npages);
|
||||
else if (op == SNP_PAGE_STATE_SHARED)
|
||||
early_snp_set_memory_shared(vaddr, paddr, npages);
|
||||
else
|
||||
WARN(1, "invalid memory op %d\n", op);
|
||||
}
|
||||
|
||||
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
|
||||
unsigned long vaddr_end, int op)
|
||||
{
|
||||
@@ -2112,6 +2098,17 @@ void __init __noreturn snp_abort(void)
|
||||
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
|
||||
}
|
||||
|
||||
/*
|
||||
* SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
|
||||
* enabled, as the alternative (fallback) logic for DMI probing in the legacy
|
||||
* ROM region can cause a crash since this region is not pre-validated.
|
||||
*/
|
||||
void __init snp_dmi_setup(void)
|
||||
{
|
||||
if (efi_enabled(EFI_CONFIG_TABLES))
|
||||
dmi_setup();
|
||||
}
|
||||
|
||||
static void dump_cpuid_table(void)
|
||||
{
|
||||
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
*
|
||||
* For licencing details see kernel-base/COPYING
|
||||
*/
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/export.h>
|
||||
@@ -66,6 +67,7 @@ struct x86_init_ops x86_init __initdata = {
|
||||
.probe_roms = probe_roms,
|
||||
.reserve_resources = reserve_standard_io_resources,
|
||||
.memory_setup = e820__memory_setup_default,
|
||||
.dmi_setup = dmi_setup,
|
||||
},
|
||||
|
||||
.mpparse = {
|
||||
|
||||
+18
-3
@@ -677,6 +677,11 @@ void kvm_set_cpu_caps(void)
|
||||
F(AMX_COMPLEX)
|
||||
);
|
||||
|
||||
kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
|
||||
F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
|
||||
F(BHI_CTRL) | F(MCDT_NO)
|
||||
);
|
||||
|
||||
kvm_cpu_cap_mask(CPUID_D_1_EAX,
|
||||
F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
|
||||
);
|
||||
@@ -956,13 +961,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
break;
|
||||
/* function 7 has additional index. */
|
||||
case 7:
|
||||
entry->eax = min(entry->eax, 1u);
|
||||
max_idx = entry->eax = min(entry->eax, 2u);
|
||||
cpuid_entry_override(entry, CPUID_7_0_EBX);
|
||||
cpuid_entry_override(entry, CPUID_7_ECX);
|
||||
cpuid_entry_override(entry, CPUID_7_EDX);
|
||||
|
||||
/* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
|
||||
if (entry->eax == 1) {
|
||||
/* KVM only supports up to 0x7.2, capped above via min(). */
|
||||
if (max_idx >= 1) {
|
||||
entry = do_host_cpuid(array, function, 1);
|
||||
if (!entry)
|
||||
goto out;
|
||||
@@ -972,6 +977,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->ebx = 0;
|
||||
entry->ecx = 0;
|
||||
}
|
||||
if (max_idx >= 2) {
|
||||
entry = do_host_cpuid(array, function, 2);
|
||||
if (!entry)
|
||||
goto out;
|
||||
|
||||
cpuid_entry_override(entry, CPUID_7_2_EDX);
|
||||
entry->ecx = 0;
|
||||
entry->ebx = 0;
|
||||
entry->eax = 0;
|
||||
}
|
||||
break;
|
||||
case 0xa: { /* Architectural Performance Monitoring */
|
||||
union cpuid10_eax eax;
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
#include "ioapic.h"
|
||||
#include "trace.h"
|
||||
#include "x86.h"
|
||||
#include "xen.h"
|
||||
#include "cpuid.h"
|
||||
#include "hyperv.h"
|
||||
#include "smm.h"
|
||||
@@ -499,8 +500,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
|
||||
}
|
||||
|
||||
/* Check if there are APF page ready requests pending */
|
||||
if (enabled)
|
||||
if (enabled) {
|
||||
kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
|
||||
kvm_xen_sw_enable_lapic(apic->vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
|
||||
|
||||
@@ -16,6 +16,7 @@ enum kvm_only_cpuid_leafs {
|
||||
CPUID_7_1_EDX,
|
||||
CPUID_8000_0007_EDX,
|
||||
CPUID_8000_0022_EAX,
|
||||
CPUID_7_2_EDX,
|
||||
NR_KVM_CPU_CAPS,
|
||||
|
||||
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
|
||||
@@ -46,6 +47,14 @@ enum kvm_only_cpuid_leafs {
|
||||
#define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
|
||||
#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
|
||||
|
||||
/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
|
||||
#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
|
||||
#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
|
||||
#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
|
||||
#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
|
||||
#define X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
|
||||
#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
|
||||
|
||||
/* CPUID level 0x80000007 (EDX). */
|
||||
#define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
|
||||
|
||||
@@ -80,6 +89,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
|
||||
[CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX},
|
||||
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
|
||||
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
|
||||
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -106,18 +116,19 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
|
||||
*/
|
||||
static __always_inline u32 __feature_translate(int x86_feature)
|
||||
{
|
||||
if (x86_feature == X86_FEATURE_SGX1)
|
||||
return KVM_X86_FEATURE_SGX1;
|
||||
else if (x86_feature == X86_FEATURE_SGX2)
|
||||
return KVM_X86_FEATURE_SGX2;
|
||||
else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
|
||||
return KVM_X86_FEATURE_SGX_EDECCSSA;
|
||||
else if (x86_feature == X86_FEATURE_CONSTANT_TSC)
|
||||
return KVM_X86_FEATURE_CONSTANT_TSC;
|
||||
else if (x86_feature == X86_FEATURE_PERFMON_V2)
|
||||
return KVM_X86_FEATURE_PERFMON_V2;
|
||||
#define KVM_X86_TRANSLATE_FEATURE(f) \
|
||||
case X86_FEATURE_##f: return KVM_X86_FEATURE_##f
|
||||
|
||||
return x86_feature;
|
||||
switch (x86_feature) {
|
||||
KVM_X86_TRANSLATE_FEATURE(SGX1);
|
||||
KVM_X86_TRANSLATE_FEATURE(SGX2);
|
||||
KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA);
|
||||
KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
|
||||
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
|
||||
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
|
||||
default:
|
||||
return x86_feature;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline u32 __feature_leaf(int x86_feature)
|
||||
|
||||
+15
-10
@@ -57,7 +57,7 @@ static bool sev_es_enabled = true;
|
||||
module_param_named(sev_es, sev_es_enabled, bool, 0444);
|
||||
|
||||
/* enable/disable SEV-ES DebugSwap support */
|
||||
static bool sev_es_debug_swap_enabled = true;
|
||||
static bool sev_es_debug_swap_enabled = false;
|
||||
module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
|
||||
#else
|
||||
#define sev_enabled false
|
||||
@@ -612,8 +612,11 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
|
||||
save->xss = svm->vcpu.arch.ia32_xss;
|
||||
save->dr6 = svm->vcpu.arch.dr6;
|
||||
|
||||
if (sev_es_debug_swap_enabled)
|
||||
if (sev_es_debug_swap_enabled) {
|
||||
save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
|
||||
pr_warn_once("Enabling DebugSwap with KVM_SEV_ES_INIT. "
|
||||
"This will not work starting with Linux 6.10\n");
|
||||
}
|
||||
|
||||
pr_debug("Virtual Machine Save Area (VMSA):\n");
|
||||
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
|
||||
@@ -1975,20 +1978,22 @@ int sev_mem_enc_register_region(struct kvm *kvm,
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* The guest may change the memory encryption attribute from C=0 -> C=1
|
||||
* or vice versa for this memory range. Lets make sure caches are
|
||||
* flushed to ensure that guest data gets written into memory with
|
||||
* correct C-bit. Note, this must be done before dropping kvm->lock,
|
||||
* as region and its array of pages can be freed by a different task
|
||||
* once kvm->lock is released.
|
||||
*/
|
||||
sev_clflush_pages(region->pages, region->npages);
|
||||
|
||||
region->uaddr = range->addr;
|
||||
region->size = range->size;
|
||||
|
||||
list_add_tail(®ion->list, &sev->regions_list);
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
/*
|
||||
* The guest may change the memory encryption attribute from C=0 -> C=1
|
||||
* or vice versa for this memory range. Lets make sure caches are
|
||||
* flushed to ensure that guest data gets written into memory with
|
||||
* correct C-bit.
|
||||
*/
|
||||
sev_clflush_pages(region->pages, region->npages);
|
||||
|
||||
return ret;
|
||||
|
||||
e_free:
|
||||
|
||||
@@ -7840,6 +7840,16 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
|
||||
if (r < 0)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
/*
|
||||
* Mark the page dirty _before_ checking whether or not the CMPXCHG was
|
||||
* successful, as the old value is written back on failure. Note, for
|
||||
* live migration, this is unnecessarily conservative as CMPXCHG writes
|
||||
* back the original value and the access is atomic, but KVM's ABI is
|
||||
* that all writes are dirty logged, regardless of the value written.
|
||||
*/
|
||||
kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa));
|
||||
|
||||
if (r)
|
||||
return X86EMUL_CMPXCHG_FAILED;
|
||||
|
||||
|
||||
+1
-1
@@ -471,7 +471,7 @@ void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
|
||||
kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
|
||||
}
|
||||
|
||||
static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
|
||||
void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
|
||||
{
|
||||
struct kvm_lapic_irq irq = { };
|
||||
int r;
|
||||
|
||||
@@ -18,6 +18,7 @@ extern struct static_key_false_deferred kvm_xen_enabled;
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
|
||||
void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
|
||||
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
||||
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
||||
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
|
||||
@@ -36,6 +37,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
|
||||
const struct kvm_irq_routing_entry *ue);
|
||||
void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* The local APIC is being enabled. If the per-vCPU upcall vector is
|
||||
* set and the vCPU's evtchn_upcall_pending flag is set, inject the
|
||||
* interrupt.
|
||||
*/
|
||||
if (static_branch_unlikely(&kvm_xen_enabled.key) &&
|
||||
vcpu->arch.xen.vcpu_info_cache.active &&
|
||||
vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
|
||||
kvm_xen_inject_vcpu_vector(vcpu);
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
||||
{
|
||||
return static_branch_unlikely(&kvm_xen_enabled.key) &&
|
||||
@@ -101,6 +115,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
|
||||
@@ -528,6 +528,24 @@ void __init sme_early_init(void)
|
||||
*/
|
||||
if (sev_status & MSR_AMD64_SEV_ENABLED)
|
||||
ia32_disable();
|
||||
|
||||
/*
|
||||
* Override init functions that scan the ROM region in SEV-SNP guests,
|
||||
* as this memory is not pre-validated and would thus cause a crash.
|
||||
*/
|
||||
if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
|
||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||
x86_init.pci.init_irq = x86_init_noop;
|
||||
x86_init.resources.probe_roms = x86_init_noop;
|
||||
|
||||
/*
|
||||
* DMI setup behavior for SEV-SNP guests depends on
|
||||
* efi_enabled(EFI_CONFIG_TABLES), which hasn't been
|
||||
* parsed yet. snp_dmi_setup() will run after that
|
||||
* parsing has happened.
|
||||
*/
|
||||
x86_init.resources.dmi_setup = snp_dmi_setup;
|
||||
}
|
||||
}
|
||||
|
||||
void __init mem_encrypt_free_decrypted_mem(void)
|
||||
|
||||
@@ -97,7 +97,6 @@ static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
|
||||
|
||||
static char sme_cmdline_arg[] __initdata = "mem_encrypt";
|
||||
static char sme_cmdline_on[] __initdata = "on";
|
||||
static char sme_cmdline_off[] __initdata = "off";
|
||||
|
||||
static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
@@ -305,7 +304,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
|
||||
* instrumentation or checking boot_cpu_data in the cc_platform_has()
|
||||
* function.
|
||||
*/
|
||||
if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
|
||||
if (!sme_get_me_mask() ||
|
||||
RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -504,7 +504,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
|
||||
|
||||
void __init sme_enable(struct boot_params *bp)
|
||||
{
|
||||
const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
|
||||
const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
unsigned long feature_mask;
|
||||
unsigned long me_mask;
|
||||
@@ -542,11 +542,11 @@ void __init sme_enable(struct boot_params *bp)
|
||||
me_mask = 1UL << (ebx & 0x3f);
|
||||
|
||||
/* Check the SEV MSR whether SEV or SME is enabled */
|
||||
sev_status = __rdmsr(MSR_AMD64_SEV);
|
||||
feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
|
||||
RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
|
||||
feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
|
||||
|
||||
/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
|
||||
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
snp_abort();
|
||||
|
||||
/* Check if memory encryption is enabled */
|
||||
@@ -572,7 +572,6 @@ void __init sme_enable(struct boot_params *bp)
|
||||
return;
|
||||
} else {
|
||||
/* SEV state cannot be controlled by a command line option */
|
||||
sme_me_mask = me_mask;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -587,28 +586,17 @@ void __init sme_enable(struct boot_params *bp)
|
||||
asm ("lea sme_cmdline_on(%%rip), %0"
|
||||
: "=r" (cmdline_on)
|
||||
: "p" (sme_cmdline_on));
|
||||
asm ("lea sme_cmdline_off(%%rip), %0"
|
||||
: "=r" (cmdline_off)
|
||||
: "p" (sme_cmdline_off));
|
||||
|
||||
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
|
||||
sme_me_mask = me_mask;
|
||||
|
||||
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
|
||||
((u64)bp->ext_cmd_line_ptr << 32));
|
||||
|
||||
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
|
||||
goto out;
|
||||
|
||||
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
|
||||
sme_me_mask = me_mask;
|
||||
else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
|
||||
sme_me_mask = 0;
|
||||
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
|
||||
strncmp(buffer, cmdline_on, sizeof(buffer)))
|
||||
return;
|
||||
|
||||
out:
|
||||
if (sme_me_mask) {
|
||||
physical_mask &= ~sme_me_mask;
|
||||
cc_vendor = CC_VENDOR_AMD;
|
||||
cc_set_mask(sme_me_mask);
|
||||
}
|
||||
RIP_REL_REF(sme_me_mask) = me_mask;
|
||||
physical_mask &= ~me_mask;
|
||||
cc_vendor = CC_VENDOR_AMD;
|
||||
cc_set_mask(me_mask);
|
||||
}
|
||||
|
||||
+4
-3
@@ -1149,7 +1149,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
|
||||
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
struct page *page;
|
||||
size_t done = 0;
|
||||
size_t nr_pages;
|
||||
|
||||
if (mark_dirty) {
|
||||
folio_lock(fi.folio);
|
||||
@@ -1157,10 +1157,11 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
|
||||
folio_unlock(fi.folio);
|
||||
}
|
||||
page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
|
||||
nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
|
||||
fi.offset / PAGE_SIZE + 1;
|
||||
do {
|
||||
bio_release_page(bio, page++);
|
||||
done += PAGE_SIZE;
|
||||
} while (done < fi.length);
|
||||
} while (--nr_pages != 0);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__bio_release_pages);
|
||||
|
||||
+2
-7
@@ -767,16 +767,11 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||
/*
|
||||
* Partial zone append completions cannot be supported as the
|
||||
* BIO fragments may end up not being written sequentially.
|
||||
* For such case, force the completed nbytes to be equal to
|
||||
* the BIO size so that bio_advance() sets the BIO remaining
|
||||
* size to 0 and we end up calling bio_endio() before returning.
|
||||
*/
|
||||
if (bio->bi_iter.bi_size != nbytes) {
|
||||
if (bio->bi_iter.bi_size != nbytes)
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
nbytes = bio->bi_iter.bi_size;
|
||||
} else {
|
||||
else
|
||||
bio->bi_iter.bi_sector = rq->__sector;
|
||||
}
|
||||
}
|
||||
|
||||
bio_advance(bio, nbytes);
|
||||
|
||||
@@ -686,6 +686,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
t->zone_write_granularity = max(t->zone_write_granularity,
|
||||
b->zone_write_granularity);
|
||||
t->zoned = max(t->zoned, b->zoned);
|
||||
if (!t->zoned) {
|
||||
t->zone_write_granularity = 0;
|
||||
t->max_zone_append_sectors = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_stack_limits);
|
||||
|
||||
+1
-2
@@ -646,9 +646,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
struct blk_mq_tags *tags = hctx->sched_tags;
|
||||
unsigned int shift = tags->bitmap_tags.sb.shift;
|
||||
|
||||
dd->async_depth = max(1U, 3 * (1U << shift) / 4);
|
||||
dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
|
||||
|
||||
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
|
||||
}
|
||||
|
||||
@@ -208,8 +208,10 @@ void spk_do_flush(void)
|
||||
wake_up_process(speakup_task);
|
||||
}
|
||||
|
||||
void synth_write(const char *buf, size_t count)
|
||||
void synth_write(const char *_buf, size_t count)
|
||||
{
|
||||
const unsigned char *buf = (const unsigned char *) _buf;
|
||||
|
||||
while (count--)
|
||||
synth_buffer_add(*buf++);
|
||||
synth_start();
|
||||
|
||||
@@ -670,11 +670,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
|
||||
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
||||
struct ahci_host_priv *hpriv)
|
||||
{
|
||||
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
|
||||
dev_info(&pdev->dev, "ASM1166 has only six ports\n");
|
||||
hpriv->saved_port_map = 0x3f;
|
||||
}
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
|
||||
dev_info(&pdev->dev, "JMB361 has only one port\n");
|
||||
hpriv->saved_port_map = 1;
|
||||
|
||||
@@ -700,8 +700,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
|
||||
ehc->saved_ncq_enabled |= 1 << devno;
|
||||
|
||||
/* If we are resuming, wake up the device */
|
||||
if (ap->pflags & ATA_PFLAG_RESUMING)
|
||||
if (ap->pflags & ATA_PFLAG_RESUMING) {
|
||||
dev->flags |= ATA_DFLAG_RESUMING;
|
||||
ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3170,6 +3172,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
|
||||
return 0;
|
||||
|
||||
err:
|
||||
dev->flags &= ~ATA_DFLAG_RESUMING;
|
||||
*r_failed_dev = dev;
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -4765,6 +4765,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
||||
struct ata_link *link;
|
||||
struct ata_device *dev;
|
||||
unsigned long flags;
|
||||
bool do_resume;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ap->scsi_scan_mutex);
|
||||
@@ -4786,7 +4787,15 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
||||
if (scsi_device_get(sdev))
|
||||
continue;
|
||||
|
||||
do_resume = dev->flags & ATA_DFLAG_RESUMING;
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
if (do_resume) {
|
||||
ret = scsi_resume_device(sdev);
|
||||
if (ret == -EWOULDBLOCK)
|
||||
goto unlock;
|
||||
dev->flags &= ~ATA_DFLAG_RESUMING;
|
||||
}
|
||||
ret = scsi_rescan_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
@@ -313,8 +313,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
|
||||
return;
|
||||
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
|
||||
wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
|
||||
wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
|
||||
enable_irq(wirq->irq);
|
||||
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1234,6 +1234,9 @@ static int btnxpuart_close(struct hci_dev *hdev)
|
||||
|
||||
ps_wakeup(nxpdev);
|
||||
serdev_device_close(nxpdev->serdev);
|
||||
skb_queue_purge(&nxpdev->txq);
|
||||
kfree_skb(nxpdev->rx_skb);
|
||||
nxpdev->rx_skb = NULL;
|
||||
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -919,8 +919,6 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
|
||||
int rc;
|
||||
u32 int_status;
|
||||
|
||||
INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
|
||||
|
||||
rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
|
||||
tis_int_handler, IRQF_ONESHOT | flags,
|
||||
dev_name(&chip->dev), chip);
|
||||
@@ -1132,6 +1130,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
||||
priv->phy_ops = phy_ops;
|
||||
priv->locality_count = 0;
|
||||
mutex_init(&priv->locality_count_mutex);
|
||||
INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
|
||||
|
||||
dev_set_drvdata(&chip->dev, priv);
|
||||
|
||||
|
||||
@@ -856,6 +856,7 @@ static struct clk_rcg2 lpass_sway_clk_src = {
|
||||
|
||||
static const struct freq_tbl ftbl_pcie0_aux_clk_src[] = {
|
||||
F(2000000, P_XO, 12, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 pcie0_aux_clk_src = {
|
||||
@@ -1098,6 +1099,7 @@ static const struct freq_tbl ftbl_qpic_io_macro_clk_src[] = {
|
||||
F(100000000, P_GPLL0, 8, 0, 0),
|
||||
F(200000000, P_GPLL0, 4, 0, 0),
|
||||
F(320000000, P_GPLL0, 2.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 qpic_io_macro_clk_src = {
|
||||
@@ -1193,6 +1195,7 @@ static struct clk_rcg2 ubi0_axi_clk_src = {
|
||||
static const struct freq_tbl ftbl_ubi0_core_clk_src[] = {
|
||||
F(850000000, P_UBI32_PLL, 1, 0, 0),
|
||||
F(1000000000, P_UBI32_PLL, 1, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 ubi0_core_clk_src = {
|
||||
|
||||
@@ -1554,6 +1554,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = {
|
||||
|
||||
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
|
||||
F(24000000, P_XO, 1, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
|
||||
@@ -1734,6 +1735,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
|
||||
F(160000000, P_GPLL0, 5, 0, 0),
|
||||
F(216000000, P_GPLL6, 5, 0, 0),
|
||||
F(308570000, P_GPLL6, 3.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
|
||||
|
||||
@@ -644,6 +644,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
|
||||
|
||||
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
|
||||
F(19200000, P_XO, 1, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct clk_parent_data gcc_xo_gpll0_sleep_clk[] = {
|
||||
@@ -795,6 +796,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
|
||||
F(19200000, P_XO, 1, 0, 0),
|
||||
F(160000000, P_GPLL0, 5, 0, 0),
|
||||
F(308570000, P_GPLL6, 3.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
|
||||
|
||||
@@ -2082,6 +2082,7 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
|
||||
static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
|
||||
F(150000000, P_GPLL4, 8, 0, 0),
|
||||
F(300000000, P_GPLL4, 4, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 sdcc1_ice_core_clk_src = {
|
||||
|
||||
@@ -4037,3 +4037,4 @@ module_exit(gcc_sdm845_exit);
|
||||
MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:gcc-sdm845");
|
||||
MODULE_SOFTDEP("pre: rpmhpd");
|
||||
|
||||
@@ -348,6 +348,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
|
||||
F(333430000, P_MMPLL1, 3.5, 0, 0),
|
||||
F(400000000, P_MMPLL0, 2, 0, 0),
|
||||
F(466800000, P_MMPLL1, 2.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 mmss_axi_clk_src = {
|
||||
@@ -372,6 +373,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
|
||||
F(150000000, P_GPLL0, 4, 0, 0),
|
||||
F(228570000, P_MMPLL0, 3.5, 0, 0),
|
||||
F(320000000, P_MMPLL0, 2.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 ocmemnoc_clk_src = {
|
||||
|
||||
@@ -290,6 +290,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
|
||||
F(291750000, P_MMPLL1, 4, 0, 0),
|
||||
F(400000000, P_MMPLL0, 2, 0, 0),
|
||||
F(466800000, P_MMPLL1, 2.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 mmss_axi_clk_src = {
|
||||
@@ -314,6 +315,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
|
||||
F(150000000, P_GPLL0, 4, 0, 0),
|
||||
F(291750000, P_MMPLL1, 4, 0, 0),
|
||||
F(400000000, P_MMPLL0, 2, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 ocmemnoc_clk_src = {
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
|
||||
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
|
||||
#define GT_CONTROL_PRESCALER_SHIFT 8
|
||||
#define GT_CONTROL_PRESCALER_MAX 0xF
|
||||
#define GT_CONTROL_PRESCALER_MAX 0xFF
|
||||
#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
|
||||
GT_CONTROL_PRESCALER_SHIFT)
|
||||
|
||||
|
||||
@@ -570,7 +570,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
|
||||
if (target_perf < capacity)
|
||||
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
|
||||
|
||||
min_perf = READ_ONCE(cpudata->highest_perf);
|
||||
min_perf = READ_ONCE(cpudata->lowest_perf);
|
||||
if (_min_perf < capacity)
|
||||
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
|
||||
|
||||
|
||||
@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
cpumask_set_cpu(cpu, priv->cpus);
|
||||
|
||||
@@ -299,22 +299,6 @@ theend:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
|
||||
{
|
||||
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
|
||||
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
|
||||
struct sun8i_ce_dev *ce = op->ce;
|
||||
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
|
||||
int flow, err;
|
||||
|
||||
flow = rctx->flow;
|
||||
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
|
||||
local_bh_disable();
|
||||
crypto_finalize_skcipher_request(engine, breq, err);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
|
||||
void *async_req)
|
||||
{
|
||||
@@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
|
||||
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
|
||||
{
|
||||
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
|
||||
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
|
||||
struct sun8i_ce_dev *ce = op->ce;
|
||||
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
|
||||
int flow, err;
|
||||
|
||||
flow = rctx->flow;
|
||||
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
|
||||
sun8i_ce_cipher_unprepare(engine, areq);
|
||||
local_bh_disable();
|
||||
crypto_finalize_skcipher_request(engine, breq, err);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
|
||||
{
|
||||
int err = sun8i_ce_cipher_prepare(engine, areq);
|
||||
@@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
|
||||
return err;
|
||||
|
||||
sun8i_ce_cipher_run(engine, areq);
|
||||
sun8i_ce_cipher_unprepare(engine, areq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -92,7 +92,8 @@ static void adf_device_reset_worker(struct work_struct *work)
|
||||
if (adf_dev_restart(accel_dev)) {
|
||||
/* The device hanged and we can't restart it so stop here */
|
||||
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
|
||||
if (reset_data->mode == ADF_DEV_RESET_ASYNC)
|
||||
if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
|
||||
completion_done(&reset_data->compl))
|
||||
kfree(reset_data);
|
||||
WARN(1, "QAT: device restart failed. Device is unusable\n");
|
||||
return;
|
||||
@@ -100,11 +101,19 @@ static void adf_device_reset_worker(struct work_struct *work)
|
||||
adf_dev_restarted_notify(accel_dev);
|
||||
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
||||
|
||||
/* The dev is back alive. Notify the caller if in sync mode */
|
||||
if (reset_data->mode == ADF_DEV_RESET_SYNC)
|
||||
complete(&reset_data->compl);
|
||||
else
|
||||
/*
|
||||
* The dev is back alive. Notify the caller if in sync mode
|
||||
*
|
||||
* If device restart will take a more time than expected,
|
||||
* the schedule_reset() function can timeout and exit. This can be
|
||||
* detected by calling the completion_done() function. In this case
|
||||
* the reset_data structure needs to be freed here.
|
||||
*/
|
||||
if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
|
||||
completion_done(&reset_data->compl))
|
||||
kfree(reset_data);
|
||||
else
|
||||
complete(&reset_data->compl);
|
||||
}
|
||||
|
||||
static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
|
||||
@@ -137,8 +146,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Reset device timeout expired\n");
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
kfree(reset_data);
|
||||
}
|
||||
kfree(reset_data);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
|
||||
theend:
|
||||
pm_runtime_put_autosuspend(rkc->dev);
|
||||
|
||||
rk_hash_unprepare(engine, breq);
|
||||
|
||||
local_bh_disable();
|
||||
crypto_finalize_hash_request(engine, breq, err);
|
||||
local_bh_enable();
|
||||
|
||||
rk_hash_unprepare(engine, breq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -642,18 +642,18 @@ u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
|
||||
|
||||
TRACE_EVENT(cxl_poison,
|
||||
|
||||
TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region,
|
||||
TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,
|
||||
const struct cxl_poison_record *record, u8 flags,
|
||||
__le64 overflow_ts, enum cxl_poison_trace_type trace_type),
|
||||
|
||||
TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type),
|
||||
TP_ARGS(cxlmd, cxlr, record, flags, overflow_ts, trace_type),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(memdev, dev_name(&cxlmd->dev))
|
||||
__string(host, dev_name(cxlmd->dev.parent))
|
||||
__field(u64, serial)
|
||||
__field(u8, trace_type)
|
||||
__string(region, region)
|
||||
__string(region, cxlr ? dev_name(&cxlr->dev) : "")
|
||||
__field(u64, overflow_ts)
|
||||
__field(u64, hpa)
|
||||
__field(u64, dpa)
|
||||
@@ -673,10 +673,10 @@ TRACE_EVENT(cxl_poison,
|
||||
__entry->source = cxl_poison_record_source(record);
|
||||
__entry->trace_type = trace_type;
|
||||
__entry->flags = flags;
|
||||
if (region) {
|
||||
__assign_str(region, dev_name(®ion->dev));
|
||||
memcpy(__entry->uuid, ®ion->params.uuid, 16);
|
||||
__entry->hpa = cxl_trace_hpa(region, cxlmd,
|
||||
if (cxlr) {
|
||||
__assign_str(region, dev_name(&cxlr->dev));
|
||||
memcpy(__entry->uuid, &cxlr->params.uuid, 16);
|
||||
__entry->hpa = cxl_trace_hpa(cxlr, cxlmd,
|
||||
__entry->dpa);
|
||||
} else {
|
||||
__assign_str(region, "");
|
||||
|
||||
@@ -3773,6 +3773,7 @@ static int pci_probe(struct pci_dev *dev,
|
||||
return 0;
|
||||
|
||||
fail_msi:
|
||||
devm_free_irq(&dev->dev, dev->irq, ohci);
|
||||
pci_disable_msi(dev);
|
||||
|
||||
return err;
|
||||
@@ -3800,6 +3801,7 @@ static void pci_remove(struct pci_dev *dev)
|
||||
|
||||
software_reset(ohci);
|
||||
|
||||
devm_free_irq(&dev->dev, dev->irq, ohci);
|
||||
pci_disable_msi(dev);
|
||||
|
||||
dev_notice(&dev->dev, "removing fw-ohci device\n");
|
||||
|
||||
@@ -199,6 +199,8 @@ static bool generic_ops_supported(void)
|
||||
|
||||
name_size = sizeof(name);
|
||||
|
||||
if (!efi.get_next_variable)
|
||||
return false;
|
||||
status = efi.get_next_variable(&name_size, &name, &guid);
|
||||
if (status == EFI_UNSUPPORTED)
|
||||
return false;
|
||||
|
||||
@@ -120,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
|
||||
continue;
|
||||
}
|
||||
|
||||
target = round_up(md->phys_addr, align) + target_slot * align;
|
||||
target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align;
|
||||
pages = size / EFI_PAGE_SIZE;
|
||||
|
||||
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
|
||||
|
||||
@@ -487,6 +487,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
|
||||
hdr->vid_mode = 0xffff;
|
||||
|
||||
hdr->type_of_loader = 0x21;
|
||||
hdr->initrd_addr_max = INT_MAX;
|
||||
|
||||
/* Convert unicode cmdline to ascii */
|
||||
cmdline_ptr = efi_convert_cmdline(image, &options_size);
|
||||
|
||||
@@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
|
||||
*/
|
||||
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (bo->kfd_bo)
|
||||
return mmu_interval_notifier_insert(&bo->notifier, current->mm,
|
||||
r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
|
||||
addr, amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_hsa_ops);
|
||||
return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
|
||||
amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_gfx_ops);
|
||||
else
|
||||
r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
|
||||
amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_gfx_ops);
|
||||
if (r)
|
||||
/*
|
||||
* Make sure amdgpu_hmm_unregister() doesn't call
|
||||
* mmu_interval_notifier_remove() when the notifier isn't properly
|
||||
* initialized.
|
||||
*/
|
||||
bo->notifier.mm = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -520,46 +520,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
|
||||
{
|
||||
struct amdgpu_ring *ring = file_inode(f)->i_private;
|
||||
volatile u32 *mqd;
|
||||
int r;
|
||||
u32 *kbuf;
|
||||
int r, i;
|
||||
uint32_t value, result;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
result = 0;
|
||||
kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_bo_reserve(ring->mqd_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto err_unreserve;
|
||||
|
||||
/*
|
||||
* Copy to local buffer to avoid put_user(), which might fault
|
||||
* and acquire mmap_sem, under reservation_ww_class_mutex.
|
||||
*/
|
||||
for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
|
||||
kbuf[i] = mqd[i];
|
||||
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
|
||||
result = 0;
|
||||
while (size) {
|
||||
if (*pos >= ring->mqd_size)
|
||||
goto done;
|
||||
break;
|
||||
|
||||
value = mqd[*pos/4];
|
||||
value = kbuf[*pos/4];
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r)
|
||||
goto done;
|
||||
goto err_free;
|
||||
buf += 4;
|
||||
result += 4;
|
||||
size -= 4;
|
||||
*pos += 4;
|
||||
}
|
||||
|
||||
done:
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
mqd = NULL;
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
kfree(kbuf);
|
||||
return result;
|
||||
|
||||
err_unreserve:
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
err_free:
|
||||
kfree(kbuf);
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_mqd_fops = {
|
||||
|
||||
@@ -869,6 +869,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
||||
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
|
||||
gtt->ttm.dma_address, flags);
|
||||
}
|
||||
gtt->bound = true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1466,7 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
|
||||
|
||||
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
|
||||
{
|
||||
return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
|
||||
return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
|
||||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
|
||||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
|
||||
}
|
||||
|
||||
@@ -6121,9 +6121,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
|
||||
else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
||||
stream->signal == SIGNAL_TYPE_EDP) {
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
|
||||
//
|
||||
// should decide stream support vsc sdp colorimetry capability
|
||||
// before building vsc info packet
|
||||
@@ -6139,9 +6138,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
|
||||
tf = TRANSFER_FUNC_GAMMA_22;
|
||||
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled)
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
}
|
||||
finish:
|
||||
dc_sink_release(sink);
|
||||
@@ -10746,18 +10744,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
||||
if (!adev->dm.freesync_module)
|
||||
goto update;
|
||||
|
||||
if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
|
||||
|| sink->sink_signal == SIGNAL_TYPE_EDP) {
|
||||
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
|
||||
bool edid_check_required = false;
|
||||
|
||||
if (edid) {
|
||||
edid_check_required = is_dp_capable_without_timing_msa(
|
||||
adev->dm.dc,
|
||||
amdgpu_dm_connector);
|
||||
if (is_dp_capable_without_timing_msa(adev->dm.dc,
|
||||
amdgpu_dm_connector)) {
|
||||
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
|
||||
freesync_capable = true;
|
||||
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
|
||||
} else {
|
||||
edid_check_required = edid->version > 1 ||
|
||||
(edid->version == 1 &&
|
||||
edid->revision > 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (edid_check_required == true && (edid->version > 1 ||
|
||||
(edid->version == 1 && edid->revision > 1))) {
|
||||
if (edid_check_required) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
||||
timing = &edid->detailed_timings[i];
|
||||
@@ -10777,14 +10781,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
||||
if (range->flags != 1)
|
||||
continue;
|
||||
|
||||
amdgpu_dm_connector->min_vfreq = range->min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq = range->max_vfreq;
|
||||
amdgpu_dm_connector->pixel_clock_mhz =
|
||||
range->pixel_clock_mhz * 10;
|
||||
|
||||
connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
|
||||
connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
|
||||
|
||||
if (edid->revision >= 4) {
|
||||
if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
|
||||
connector->display_info.monitor_range.min_vfreq += 255;
|
||||
if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
|
||||
connector->display_info.monitor_range.max_vfreq += 255;
|
||||
}
|
||||
|
||||
amdgpu_dm_connector->min_vfreq =
|
||||
connector->display_info.monitor_range.min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq =
|
||||
connector->display_info.monitor_range.max_vfreq;
|
||||
amdgpu_dm_connector->pixel_clock_mhz =
|
||||
range->pixel_clock_mhz * 10;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -619,10 +619,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
if (pipe_ctx == NULL)
|
||||
return;
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
|
||||
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
enable);
|
||||
|
||||
/* Wait for two frame to make sure AV mute is sent out */
|
||||
if (enable) {
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
|
||||
|
||||
@@ -142,6 +142,16 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
|
||||
OPTC_SEG0_SRC_SEL, 0xf,
|
||||
OPTC_SEG1_SRC_SEL, 0xf,
|
||||
OPTC_SEG2_SRC_SEL, 0xf,
|
||||
OPTC_SEG3_SRC_SEL, 0xf,
|
||||
OPTC_NUM_OF_INPUT_SEGMENT, 0);
|
||||
|
||||
REG_UPDATE(OPTC_MEMORY_CONFIG,
|
||||
OPTC_MEM_SEL, 0);
|
||||
|
||||
/* disable otg request until end of the first line
|
||||
* in the vertical blank region
|
||||
*/
|
||||
@@ -174,6 +184,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
|
||||
OPTC_SEG0_SRC_SEL, 0xf,
|
||||
OPTC_SEG1_SRC_SEL, 0xf,
|
||||
OPTC_SEG2_SRC_SEL, 0xf,
|
||||
OPTC_SEG3_SRC_SEL, 0xf,
|
||||
OPTC_NUM_OF_INPUT_SEGMENT, 0);
|
||||
|
||||
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user