Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. net/mac80211/key.c02e0e426a2
("wifi: mac80211: fix error path key leak")2a8b665e6b
("wifi: mac80211: remove key_mtx")7d6904bf26
("Merge wireless into wireless-next") https://lore.kernel.org/all/20231012113648.46eea5ec@canb.auug.org.au/ Adjacent changes: drivers/net/ethernet/ti/Kconfiga602ee3176
("net: ethernet: ti: Fix mixed module-builtin object")98bdeae950
("net: cpmac: remove driver to prepare for platform removal") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
041c3466f3
@ -1,7 +1,7 @@
|
||||
What: /sys/class/firmware/.../data
|
||||
Date: July 2022
|
||||
KernelVersion: 5.19
|
||||
Contact: Russ Weight <russell.h.weight@intel.com>
|
||||
Contact: Russ Weight <russ.weight@linux.dev>
|
||||
Description: The data sysfs file is used for firmware-fallback and for
|
||||
firmware uploads. Cat a firmware image to this sysfs file
|
||||
after you echo 1 to the loading sysfs file. When the firmware
|
||||
@ -13,7 +13,7 @@ Description: The data sysfs file is used for firmware-fallback and for
|
||||
What: /sys/class/firmware/.../cancel
|
||||
Date: July 2022
|
||||
KernelVersion: 5.19
|
||||
Contact: Russ Weight <russell.h.weight@intel.com>
|
||||
Contact: Russ Weight <russ.weight@linux.dev>
|
||||
Description: Write-only. For firmware uploads, write a "1" to this file to
|
||||
request that the transfer of firmware data to the lower-level
|
||||
device be canceled. This request will be rejected (EBUSY) if
|
||||
@ -23,7 +23,7 @@ Description: Write-only. For firmware uploads, write a "1" to this file to
|
||||
What: /sys/class/firmware/.../error
|
||||
Date: July 2022
|
||||
KernelVersion: 5.19
|
||||
Contact: Russ Weight <russell.h.weight@intel.com>
|
||||
Contact: Russ Weight <russ.weight@linux.dev>
|
||||
Description: Read-only. Returns a string describing a failed firmware
|
||||
upload. This string will be in the form of <STATUS>:<ERROR>,
|
||||
where <STATUS> will be one of the status strings described
|
||||
@ -37,7 +37,7 @@ Description: Read-only. Returns a string describing a failed firmware
|
||||
What: /sys/class/firmware/.../loading
|
||||
Date: July 2022
|
||||
KernelVersion: 5.19
|
||||
Contact: Russ Weight <russell.h.weight@intel.com>
|
||||
Contact: Russ Weight <russ.weight@linux.dev>
|
||||
Description: The loading sysfs file is used for both firmware-fallback and
|
||||
for firmware uploads. Echo 1 onto the loading file to indicate
|
||||
you are writing a firmware file to the data sysfs node. Echo
|
||||
@ -49,7 +49,7 @@ Description: The loading sysfs file is used for both firmware-fallback and
|
||||
What: /sys/class/firmware/.../remaining_size
|
||||
Date: July 2022
|
||||
KernelVersion: 5.19
|
||||
Contact: Russ Weight <russell.h.weight@intel.com>
|
||||
Contact: Russ Weight <russ.weight@linux.dev>
|
||||
Description: Read-only. For firmware upload, this file contains the size
|
||||
of the firmware data that remains to be transferred to the
|
||||
lower-level device driver. The size value is initialized to
|
||||
@ -62,7 +62,7 @@ Description: Read-only. For firmware upload, this file contains the size
|
||||
What: /sys/class/firmware/.../status
|
||||
Date: July 2022
|
||||
KernelVersion: 5.19
|
||||
Contact: Russ Weight <russell.h.weight@intel.com>
|
||||
Contact: Russ Weight <russ.weight@linux.dev>
|
||||
Description: Read-only. Returns a string describing the current status of
|
||||
a firmware upload. The string will be one of the following:
|
||||
idle, "receiving", "preparing", "transferring", "programming".
|
||||
@ -70,7 +70,7 @@ Description: Read-only. Returns a string describing the current status of
|
||||
What: /sys/class/firmware/.../timeout
|
||||
Date: July 2022
|
||||
KernelVersion: 5.19
|
||||
Contact: Russ Weight <russell.h.weight@intel.com>
|
||||
Contact: Russ Weight <russ.weight@linux.dev>
|
||||
Description: This file supports the timeout mechanism for firmware
|
||||
fallback. This file has no affect on firmware uploads. For
|
||||
more information on timeouts please see the documentation
|
||||
|
@ -244,7 +244,7 @@ unbound worker-pools and only one work item could be active at any given
|
||||
time thus achieving the same ordering property as ST wq.
|
||||
|
||||
In the current implementation the above configuration only guarantees
|
||||
ST behavior within a given NUMA node. Instead ``alloc_ordered_queue()`` should
|
||||
ST behavior within a given NUMA node. Instead ``alloc_ordered_workqueue()`` should
|
||||
be used to achieve system-wide ST behavior.
|
||||
|
||||
|
||||
@ -390,7 +390,7 @@ The default affinity scope can be changed with the module parameter
|
||||
scope can be changed using ``apply_workqueue_attrs()``.
|
||||
|
||||
If ``WQ_SYSFS`` is set, the workqueue will have the following affinity scope
|
||||
related interface files under its ``/sys/devices/virtual/WQ_NAME/``
|
||||
related interface files under its ``/sys/devices/virtual/workqueue/WQ_NAME/``
|
||||
directory.
|
||||
|
||||
``affinity_scope``
|
||||
|
@ -13,6 +13,8 @@ description: |
|
||||
|
||||
maintainers:
|
||||
- Michael Tretter <m.tretter@pengutronix.de>
|
||||
- Harini Katakam <harini.katakam@amd.com>
|
||||
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
|
||||
|
||||
allOf:
|
||||
- $ref: ../dma-controller.yaml#
|
||||
@ -65,6 +67,7 @@ required:
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
- xlnx,bus-width
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
@ -61,7 +61,7 @@ patternProperties:
|
||||
required:
|
||||
- reg
|
||||
|
||||
additionalProperties: true
|
||||
additionalProperties: false
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/spi/spi-peripheral-props.yaml#
|
||||
|
@ -45,5 +45,6 @@ examples:
|
||||
light-sensor@38 {
|
||||
compatible = "rohm,bu27010";
|
||||
reg = <0x38>;
|
||||
vdd-supply = <&vdd>;
|
||||
};
|
||||
};
|
||||
|
@ -339,6 +339,18 @@ The specified lower directories will be stacked beginning from the
|
||||
rightmost one and going left. In the above example lower1 will be the
|
||||
top, lower2 the middle and lower3 the bottom layer.
|
||||
|
||||
Note: directory names containing colons can be provided as lower layer by
|
||||
escaping the colons with a single backslash. For example:
|
||||
|
||||
mount -t overlay overlay -olowerdir=/a\:lower\:\:dir /merged
|
||||
|
||||
Since kernel version v6.5, directory names containing colons can also
|
||||
be provided as lower layer using the fsconfig syscall from new mount api:
|
||||
|
||||
fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir", "/a:lower::dir", 0);
|
||||
|
||||
In the latter case, colons in lower layer directory names will be escaped
|
||||
as an octal characters (\072) when displayed in /proc/self/mountinfo.
|
||||
|
||||
Metadata only copy up
|
||||
---------------------
|
||||
|
@ -313,7 +313,7 @@ operations:
|
||||
- dev-name
|
||||
- sb-index
|
||||
reply: &sb-get-reply
|
||||
value: 11
|
||||
value: 13
|
||||
attributes: *sb-id-attrs
|
||||
dump:
|
||||
request:
|
||||
@ -340,7 +340,7 @@ operations:
|
||||
- sb-index
|
||||
- sb-pool-index
|
||||
reply: &sb-pool-get-reply
|
||||
value: 15
|
||||
value: 17
|
||||
attributes: *sb-pool-id-attrs
|
||||
dump:
|
||||
request:
|
||||
@ -368,7 +368,7 @@ operations:
|
||||
- sb-index
|
||||
- sb-pool-index
|
||||
reply: &sb-port-pool-get-reply
|
||||
value: 19
|
||||
value: 21
|
||||
attributes: *sb-port-pool-id-attrs
|
||||
dump:
|
||||
request:
|
||||
@ -397,7 +397,7 @@ operations:
|
||||
- sb-pool-type
|
||||
- sb-tc-index
|
||||
reply: &sb-tc-pool-bind-get-reply
|
||||
value: 23
|
||||
value: 25
|
||||
attributes: *sb-tc-pool-bind-id-attrs
|
||||
dump:
|
||||
request:
|
||||
@ -528,7 +528,7 @@ operations:
|
||||
- dev-name
|
||||
- trap-name
|
||||
reply: &trap-get-reply
|
||||
value: 61
|
||||
value: 63
|
||||
attributes: *trap-id-attrs
|
||||
dump:
|
||||
request:
|
||||
@ -554,7 +554,7 @@ operations:
|
||||
- dev-name
|
||||
- trap-group-name
|
||||
reply: &trap-group-get-reply
|
||||
value: 65
|
||||
value: 67
|
||||
attributes: *trap-group-id-attrs
|
||||
dump:
|
||||
request:
|
||||
@ -580,7 +580,7 @@ operations:
|
||||
- dev-name
|
||||
- trap-policer-id
|
||||
reply: &trap-policer-get-reply
|
||||
value: 69
|
||||
value: 71
|
||||
attributes: *trap-policer-id-attrs
|
||||
dump:
|
||||
request:
|
||||
@ -607,7 +607,7 @@ operations:
|
||||
- port-index
|
||||
- rate-node-name
|
||||
reply: &rate-get-reply
|
||||
value: 74
|
||||
value: 76
|
||||
attributes: *rate-id-attrs
|
||||
dump:
|
||||
request:
|
||||
@ -633,7 +633,7 @@ operations:
|
||||
- dev-name
|
||||
- linecard-index
|
||||
reply: &linecard-get-reply
|
||||
value: 78
|
||||
value: 80
|
||||
attributes: *linecard-id-attrs
|
||||
dump:
|
||||
request:
|
||||
|
@ -162,9 +162,11 @@ How are representors identified?
|
||||
The representor netdevice should *not* directly refer to a PCIe device (e.g.
|
||||
through ``net_dev->dev.parent`` / ``SET_NETDEV_DEV()``), either of the
|
||||
representee or of the switchdev function.
|
||||
Instead, it should implement the ``ndo_get_devlink_port()`` netdevice op, which
|
||||
the kernel uses to provide the ``phys_switch_id`` and ``phys_port_name`` sysfs
|
||||
nodes. (Some legacy drivers implement ``ndo_get_port_parent_id()`` and
|
||||
Instead, the driver should use the ``SET_NETDEV_DEVLINK_PORT`` macro to
|
||||
assign a devlink port instance to the netdevice before registering the
|
||||
netdevice; the kernel uses the devlink port to provide the ``phys_switch_id``
|
||||
and ``phys_port_name`` sysfs nodes.
|
||||
(Some legacy drivers implement ``ndo_get_port_parent_id()`` and
|
||||
``ndo_get_phys_port_name()`` directly, but this is deprecated.) See
|
||||
:ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>` for the
|
||||
details of this API.
|
||||
|
@ -25,15 +25,15 @@ Contact
|
||||
The Linux kernel hardware security team is separate from the regular Linux
|
||||
kernel security team.
|
||||
|
||||
The team only handles the coordination of embargoed hardware security
|
||||
issues. Reports of pure software security bugs in the Linux kernel are not
|
||||
The team only handles developing fixes for embargoed hardware security
|
||||
issues. Reports of pure software security bugs in the Linux kernel are not
|
||||
handled by this team and the reporter will be guided to contact the regular
|
||||
Linux kernel security team (:ref:`Documentation/admin-guide/
|
||||
<securitybugs>`) instead.
|
||||
|
||||
The team can be contacted by email at <hardware-security@kernel.org>. This
|
||||
is a private list of security officers who will help you to coordinate an
|
||||
issue according to our documented process.
|
||||
is a private list of security officers who will help you to coordinate a
|
||||
fix according to our documented process.
|
||||
|
||||
The list is encrypted and email to the list can be sent by either PGP or
|
||||
S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
|
||||
@ -132,11 +132,11 @@ other hardware could be affected.
|
||||
|
||||
The hardware security team will provide an incident-specific encrypted
|
||||
mailing-list which will be used for initial discussion with the reporter,
|
||||
further disclosure and coordination.
|
||||
further disclosure, and coordination of fixes.
|
||||
|
||||
The hardware security team will provide the disclosing party a list of
|
||||
developers (domain experts) who should be informed initially about the
|
||||
issue after confirming with the developers that they will adhere to this
|
||||
issue after confirming with the developers that they will adhere to this
|
||||
Memorandum of Understanding and the documented process. These developers
|
||||
form the initial response team and will be responsible for handling the
|
||||
issue after initial contact. The hardware security team is supporting the
|
||||
@ -209,13 +209,18 @@ five work days this is taken as silent acknowledgement.
|
||||
After acknowledgement or resolution of an objection the expert is disclosed
|
||||
by the incident team and brought into the development process.
|
||||
|
||||
List participants may not communicate about the issue outside of the
|
||||
private mailing list. List participants may not use any shared resources
|
||||
(e.g. employer build farms, CI systems, etc) when working on patches.
|
||||
|
||||
|
||||
Coordinated release
|
||||
"""""""""""""""""""
|
||||
|
||||
The involved parties will negotiate the date and time where the embargo
|
||||
ends. At that point the prepared mitigations are integrated into the
|
||||
relevant kernel trees and published.
|
||||
relevant kernel trees and published. There is no pre-notification process:
|
||||
fixes are published in public and available to everyone at the same time.
|
||||
|
||||
While we understand that hardware security issues need coordinated embargo
|
||||
time, the embargo time should be constrained to the minimum time which is
|
||||
|
@ -91,9 +91,9 @@ The prototype of the entry/exit callback function are as follows:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int entry_callback(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs, void *entry_data);
|
||||
int entry_callback(struct fprobe *fp, unsigned long entry_ip, unsigned long ret_ip, struct pt_regs *regs, void *entry_data);
|
||||
|
||||
void exit_callback(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs, void *entry_data);
|
||||
void exit_callback(struct fprobe *fp, unsigned long entry_ip, unsigned long ret_ip, struct pt_regs *regs, void *entry_data);
|
||||
|
||||
Note that the @entry_ip is saved at function entry and passed to exit handler.
|
||||
If the entry callback function returns !0, the corresponding exit callback will be cancelled.
|
||||
@ -108,6 +108,10 @@ If the entry callback function returns !0, the corresponding exit callback will
|
||||
Note that this may not be the actual entry address of the function but
|
||||
the address where the ftrace is instrumented.
|
||||
|
||||
@ret_ip
|
||||
This is the return address that the traced function will return to,
|
||||
somewhere in the caller. This can be used at both entry and exit.
|
||||
|
||||
@regs
|
||||
This is the `pt_regs` data structure at the entry and exit. Note that
|
||||
the instruction pointer of @regs may be different from the @entry_ip
|
||||
|
@ -202,7 +202,7 @@ workqueue将自动创建与属性相匹配的后备工作者池。调节并发
|
||||
同的排序属性。
|
||||
|
||||
在目前的实现中,上述配置只保证了特定NUMA节点内的ST行为。相反,
|
||||
``alloc_ordered_queue()`` 应该被用来实现全系统的ST行为。
|
||||
``alloc_ordered_workqueue()`` 应该被用来实现全系统的ST行为。
|
||||
|
||||
|
||||
执行场景示例
|
||||
|
@ -8114,7 +8114,7 @@ F: include/linux/arm_ffa.h
|
||||
|
||||
FIRMWARE LOADER (request_firmware)
|
||||
M: Luis Chamberlain <mcgrof@kernel.org>
|
||||
M: Russ Weight <russell.h.weight@intel.com>
|
||||
M: Russ Weight <russ.weight@linux.dev>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/firmware_class/
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -344,14 +344,14 @@
|
||||
*/
|
||||
#define __HFGRTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51))
|
||||
#define __HFGRTR_EL2_MASK GENMASK(49, 0)
|
||||
#define __HFGRTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
|
||||
#define __HFGRTR_EL2_nMASK (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
|
||||
|
||||
#define __HFGWTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51) | \
|
||||
BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
|
||||
GENMASK(26, 25) | BIT(21) | BIT(18) | \
|
||||
GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
|
||||
#define __HFGWTR_EL2_MASK GENMASK(49, 0)
|
||||
#define __HFGWTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
|
||||
#define __HFGWTR_EL2_nMASK (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
|
||||
|
||||
#define __HFGITR_EL2_RES0 GENMASK(63, 57)
|
||||
#define __HFGITR_EL2_MASK GENMASK(54, 0)
|
||||
|
@ -55,11 +55,6 @@ static struct irq_ops arch_timer_irq_ops = {
|
||||
.get_input_level = kvm_arch_timer_get_input_level,
|
||||
};
|
||||
|
||||
static bool has_cntpoff(void)
|
||||
{
|
||||
return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
|
||||
}
|
||||
|
||||
static int nr_timers(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu_has_nv(vcpu))
|
||||
@ -180,7 +175,7 @@ u64 kvm_phys_timer_read(void)
|
||||
return timecounter->cc->read(timecounter->cc);
|
||||
}
|
||||
|
||||
static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
|
||||
void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
|
||||
{
|
||||
if (vcpu_has_nv(vcpu)) {
|
||||
if (is_hyp_ctxt(vcpu)) {
|
||||
@ -548,8 +543,7 @@ static void timer_save_state(struct arch_timer_context *ctx)
|
||||
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
|
||||
cval = read_sysreg_el0(SYS_CNTP_CVAL);
|
||||
|
||||
if (!has_cntpoff())
|
||||
cval -= timer_get_offset(ctx);
|
||||
cval -= timer_get_offset(ctx);
|
||||
|
||||
timer_set_cval(ctx, cval);
|
||||
|
||||
@ -636,8 +630,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
|
||||
cval = timer_get_cval(ctx);
|
||||
offset = timer_get_offset(ctx);
|
||||
set_cntpoff(offset);
|
||||
if (!has_cntpoff())
|
||||
cval += offset;
|
||||
cval += offset;
|
||||
write_sysreg_el0(cval, SYS_CNTP_CVAL);
|
||||
isb();
|
||||
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
|
||||
|
@ -977,6 +977,8 @@ enum fg_filter_id {
|
||||
|
||||
static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
|
||||
/* HFGRTR_EL2, HFGWTR_EL2 */
|
||||
SR_FGT(SYS_PIR_EL1, HFGxTR, nPIR_EL1, 0),
|
||||
SR_FGT(SYS_PIRE0_EL1, HFGxTR, nPIRE0_EL1, 0),
|
||||
SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0),
|
||||
SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0),
|
||||
SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0),
|
||||
|
@ -39,6 +39,26 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
___activate_traps(vcpu);
|
||||
|
||||
if (has_cntpoff()) {
|
||||
struct timer_map map;
|
||||
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
/*
|
||||
* We're entrering the guest. Reload the correct
|
||||
* values from memory now that TGE is clear.
|
||||
*/
|
||||
if (map.direct_ptimer == vcpu_ptimer(vcpu))
|
||||
val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
|
||||
if (map.direct_ptimer == vcpu_hptimer(vcpu))
|
||||
val = __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
|
||||
|
||||
if (map.direct_ptimer) {
|
||||
write_sysreg_el0(val, SYS_CNTP_CVAL);
|
||||
isb();
|
||||
}
|
||||
}
|
||||
|
||||
val = read_sysreg(cpacr_el1);
|
||||
val |= CPACR_ELx_TTA;
|
||||
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
|
||||
@ -77,6 +97,30 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
|
||||
|
||||
if (has_cntpoff()) {
|
||||
struct timer_map map;
|
||||
u64 val, offset;
|
||||
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
/*
|
||||
* We're exiting the guest. Save the latest CVAL value
|
||||
* to memory and apply the offset now that TGE is set.
|
||||
*/
|
||||
val = read_sysreg_el0(SYS_CNTP_CVAL);
|
||||
if (map.direct_ptimer == vcpu_ptimer(vcpu))
|
||||
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
|
||||
if (map.direct_ptimer == vcpu_hptimer(vcpu))
|
||||
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
|
||||
|
||||
offset = read_sysreg_s(SYS_CNTPOFF_EL2);
|
||||
|
||||
if (map.direct_ptimer && offset) {
|
||||
write_sysreg_el0(val + offset, SYS_CNTP_CVAL);
|
||||
isb();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ARM errata 1165522 and 1530923 require the actual execution of the
|
||||
* above before we can switch to the EL2/EL0 translation regime used by
|
||||
|
@ -39,7 +39,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
|
||||
{
|
||||
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
|
||||
|
||||
if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
|
||||
if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
|
||||
return;
|
||||
|
||||
if (!attr->exclude_host)
|
||||
@ -55,7 +55,7 @@ void kvm_clr_pmu_events(u32 clr)
|
||||
{
|
||||
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
|
||||
|
||||
if (!kvm_arm_support_pmu_v3() || !pmu)
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return;
|
||||
|
||||
pmu->events_host &= ~clr;
|
||||
|
@ -2122,8 +2122,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
|
||||
|
||||
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
{ SYS_DESC(SYS_PIRE0_EL1), access_vm_reg, reset_unknown, PIRE0_EL1 },
|
||||
{ SYS_DESC(SYS_PIR_EL1), access_vm_reg, reset_unknown, PIR_EL1 },
|
||||
{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
|
||||
{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
|
||||
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
||||
|
||||
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
|
||||
|
@ -15,9 +15,4 @@ DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
|
||||
|
||||
DECLARE_PER_CPU(int, cpu_state);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int arch_register_cpu(int num);
|
||||
extern void arch_unregister_cpu(int);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_IA64_CPU_H_ */
|
||||
|
@ -59,7 +59,7 @@ void __ref arch_unregister_cpu(int num)
|
||||
}
|
||||
EXPORT_SYMBOL(arch_unregister_cpu);
|
||||
#else
|
||||
static int __init arch_register_cpu(int num)
|
||||
int __init arch_register_cpu(int num)
|
||||
{
|
||||
return register_cpu(&sysfs_cpus[num].cpu, num);
|
||||
}
|
||||
|
@ -52,10 +52,9 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
|
||||
* @offset: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*/
|
||||
extern pgprot_t pgprot_wc;
|
||||
|
||||
#define ioremap_wc(offset, size) \
|
||||
ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
|
||||
ioremap_prot((offset), (size), \
|
||||
pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
|
||||
|
||||
#define ioremap_cache(offset, size) \
|
||||
ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
|
||||
|
@ -33,4 +33,12 @@
|
||||
.cfi_endproc; \
|
||||
SYM_END(name, SYM_T_FUNC)
|
||||
|
||||
#define SYM_CODE_START(name) \
|
||||
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
|
||||
.cfi_startproc;
|
||||
|
||||
#define SYM_CODE_END(name) \
|
||||
.cfi_endproc; \
|
||||
SYM_END(name, SYM_T_NONE)
|
||||
|
||||
#endif
|
||||
|
@ -105,13 +105,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
|
||||
return __pgprot(prot);
|
||||
}
|
||||
|
||||
extern bool wc_enabled;
|
||||
|
||||
#define pgprot_writecombine pgprot_writecombine
|
||||
|
||||
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
|
||||
{
|
||||
unsigned long prot = pgprot_val(_prot);
|
||||
|
||||
prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
|
||||
prot = (prot & ~_CACHE_MASK) | (wc_enabled ? _CACHE_WUC : _CACHE_SUC);
|
||||
|
||||
return __pgprot(prot);
|
||||
}
|
||||
|
@ -18,7 +18,7 @@
|
||||
.text
|
||||
.cfi_sections .debug_frame
|
||||
.align 5
|
||||
SYM_FUNC_START(handle_syscall)
|
||||
SYM_CODE_START(handle_syscall)
|
||||
csrrd t0, PERCPU_BASE_KS
|
||||
la.pcrel t1, kernelsp
|
||||
add.d t1, t1, t0
|
||||
@ -71,7 +71,7 @@ SYM_FUNC_START(handle_syscall)
|
||||
bl do_syscall
|
||||
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_syscall)
|
||||
SYM_CODE_END(handle_syscall)
|
||||
_ASM_NOKPROBE(handle_syscall)
|
||||
|
||||
SYM_CODE_START(ret_from_fork)
|
||||
|
@ -31,7 +31,7 @@ SYM_FUNC_START(__arch_cpu_idle)
|
||||
1: jr ra
|
||||
SYM_FUNC_END(__arch_cpu_idle)
|
||||
|
||||
SYM_FUNC_START(handle_vint)
|
||||
SYM_CODE_START(handle_vint)
|
||||
BACKUP_T0T1
|
||||
SAVE_ALL
|
||||
la_abs t1, __arch_cpu_idle
|
||||
@ -46,11 +46,11 @@ SYM_FUNC_START(handle_vint)
|
||||
la_abs t0, do_vint
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_vint)
|
||||
SYM_CODE_END(handle_vint)
|
||||
|
||||
SYM_FUNC_START(except_vec_cex)
|
||||
SYM_CODE_START(except_vec_cex)
|
||||
b cache_parity_error
|
||||
SYM_FUNC_END(except_vec_cex)
|
||||
SYM_CODE_END(except_vec_cex)
|
||||
|
||||
.macro build_prep_badv
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
@ -66,7 +66,7 @@ SYM_FUNC_END(except_vec_cex)
|
||||
|
||||
.macro BUILD_HANDLER exception handler prep
|
||||
.align 5
|
||||
SYM_FUNC_START(handle_\exception)
|
||||
SYM_CODE_START(handle_\exception)
|
||||
666:
|
||||
BACKUP_T0T1
|
||||
SAVE_ALL
|
||||
@ -76,7 +76,7 @@ SYM_FUNC_END(except_vec_cex)
|
||||
jirl ra, t0, 0
|
||||
668:
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_\exception)
|
||||
SYM_CODE_END(handle_\exception)
|
||||
SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
|
||||
.endm
|
||||
|
||||
@ -93,7 +93,7 @@ SYM_FUNC_END(except_vec_cex)
|
||||
BUILD_HANDLER watch watch none
|
||||
BUILD_HANDLER reserved reserved none /* others */
|
||||
|
||||
SYM_FUNC_START(handle_sys)
|
||||
SYM_CODE_START(handle_sys)
|
||||
la_abs t0, handle_syscall
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_sys)
|
||||
SYM_CODE_END(handle_sys)
|
||||
|
@ -161,19 +161,19 @@ static void __init smbios_parse(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_WRITECOMBINE
|
||||
pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
|
||||
bool wc_enabled = true;
|
||||
#else
|
||||
pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
|
||||
bool wc_enabled = false;
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(pgprot_wc);
|
||||
EXPORT_SYMBOL(wc_enabled);
|
||||
|
||||
static int __init setup_writecombine(char *p)
|
||||
{
|
||||
if (!strcmp(p, "on"))
|
||||
pgprot_wc = PAGE_KERNEL_WUC;
|
||||
wc_enabled = true;
|
||||
else if (!strcmp(p, "off"))
|
||||
pgprot_wc = PAGE_KERNEL_SUC;
|
||||
wc_enabled = false;
|
||||
else
|
||||
pr_warn("Unknown writecombine setting \"%s\".\n", p);
|
||||
|
||||
|
@ -43,11 +43,11 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||
{
|
||||
void *vfrom, *vto;
|
||||
|
||||
vto = kmap_atomic(to);
|
||||
vfrom = kmap_atomic(from);
|
||||
vfrom = kmap_local_page(from);
|
||||
vto = kmap_local_page(to);
|
||||
copy_page(vto, vfrom);
|
||||
kunmap_atomic(vfrom);
|
||||
kunmap_atomic(vto);
|
||||
kunmap_local(vfrom);
|
||||
kunmap_local(vto);
|
||||
/* Make sure this page is cleared on other CPU's too before using it */
|
||||
smp_wmb();
|
||||
}
|
||||
@ -240,6 +240,7 @@ pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
|
||||
pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
|
||||
EXPORT_SYMBOL(invalid_pud_table);
|
||||
#endif
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
|
||||
|
@ -17,7 +17,7 @@
|
||||
#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
|
||||
|
||||
.macro tlb_do_page_fault, write
|
||||
SYM_FUNC_START(tlb_do_page_fault_\write)
|
||||
SYM_CODE_START(tlb_do_page_fault_\write)
|
||||
SAVE_ALL
|
||||
csrrd a2, LOONGARCH_CSR_BADV
|
||||
move a0, sp
|
||||
@ -25,13 +25,13 @@
|
||||
li.w a1, \write
|
||||
bl do_page_fault
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(tlb_do_page_fault_\write)
|
||||
SYM_CODE_END(tlb_do_page_fault_\write)
|
||||
.endm
|
||||
|
||||
tlb_do_page_fault 0
|
||||
tlb_do_page_fault 1
|
||||
|
||||
SYM_FUNC_START(handle_tlb_protect)
|
||||
SYM_CODE_START(handle_tlb_protect)
|
||||
BACKUP_T0T1
|
||||
SAVE_ALL
|
||||
move a0, sp
|
||||
@ -41,9 +41,9 @@ SYM_FUNC_START(handle_tlb_protect)
|
||||
la_abs t0, do_page_fault
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_tlb_protect)
|
||||
SYM_CODE_END(handle_tlb_protect)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_load)
|
||||
SYM_CODE_START(handle_tlb_load)
|
||||
csrwr t0, EXCEPTION_KS0
|
||||
csrwr t1, EXCEPTION_KS1
|
||||
csrwr ra, EXCEPTION_KS2
|
||||
@ -187,16 +187,16 @@ nopage_tlb_load:
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la_abs t0, tlb_do_page_fault_0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_load)
|
||||
SYM_CODE_END(handle_tlb_load)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_load_ptw)
|
||||
SYM_CODE_START(handle_tlb_load_ptw)
|
||||
csrwr t0, LOONGARCH_CSR_KS0
|
||||
csrwr t1, LOONGARCH_CSR_KS1
|
||||
la_abs t0, tlb_do_page_fault_0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_load_ptw)
|
||||
SYM_CODE_END(handle_tlb_load_ptw)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_store)
|
||||
SYM_CODE_START(handle_tlb_store)
|
||||
csrwr t0, EXCEPTION_KS0
|
||||
csrwr t1, EXCEPTION_KS1
|
||||
csrwr ra, EXCEPTION_KS2
|
||||
@ -343,16 +343,16 @@ nopage_tlb_store:
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la_abs t0, tlb_do_page_fault_1
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_store)
|
||||
SYM_CODE_END(handle_tlb_store)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_store_ptw)
|
||||
SYM_CODE_START(handle_tlb_store_ptw)
|
||||
csrwr t0, LOONGARCH_CSR_KS0
|
||||
csrwr t1, LOONGARCH_CSR_KS1
|
||||
la_abs t0, tlb_do_page_fault_1
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_store_ptw)
|
||||
SYM_CODE_END(handle_tlb_store_ptw)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_modify)
|
||||
SYM_CODE_START(handle_tlb_modify)
|
||||
csrwr t0, EXCEPTION_KS0
|
||||
csrwr t1, EXCEPTION_KS1
|
||||
csrwr ra, EXCEPTION_KS2
|
||||
@ -497,16 +497,16 @@ nopage_tlb_modify:
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la_abs t0, tlb_do_page_fault_1
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_modify)
|
||||
SYM_CODE_END(handle_tlb_modify)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_modify_ptw)
|
||||
SYM_CODE_START(handle_tlb_modify_ptw)
|
||||
csrwr t0, LOONGARCH_CSR_KS0
|
||||
csrwr t1, LOONGARCH_CSR_KS1
|
||||
la_abs t0, tlb_do_page_fault_1
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_modify_ptw)
|
||||
SYM_CODE_END(handle_tlb_modify_ptw)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_refill)
|
||||
SYM_CODE_START(handle_tlb_refill)
|
||||
csrwr t0, LOONGARCH_CSR_TLBRSAVE
|
||||
csrrd t0, LOONGARCH_CSR_PGD
|
||||
lddir t0, t0, 3
|
||||
@ -521,4 +521,4 @@ SYM_FUNC_START(handle_tlb_refill)
|
||||
tlbfill
|
||||
csrrd t0, LOONGARCH_CSR_TLBRSAVE
|
||||
ertn
|
||||
SYM_FUNC_END(handle_tlb_refill)
|
||||
SYM_CODE_END(handle_tlb_refill)
|
||||
|
@ -592,7 +592,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
|
||||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
int srcu_idx, err;
|
||||
kvm_pfn_t pfn;
|
||||
pte_t *ptep, entry, old_pte;
|
||||
pte_t *ptep, entry;
|
||||
bool writeable;
|
||||
unsigned long prot_bits;
|
||||
unsigned long mmu_seq;
|
||||
@ -664,7 +664,6 @@ retry:
|
||||
entry = pfn_pte(pfn, __pgprot(prot_bits));
|
||||
|
||||
/* Write the PTE */
|
||||
old_pte = *ptep;
|
||||
set_pte(ptep, entry);
|
||||
|
||||
err = 0;
|
||||
|
@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
||||
|
||||
#define pte_wrprotect pte_wrprotect
|
||||
|
||||
static inline int pte_read(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
|
||||
}
|
||||
|
||||
#define pte_read pte_read
|
||||
|
||||
static inline int pte_write(pte_t pte)
|
||||
{
|
||||
return !(pte_val(pte) & _PAGE_RO);
|
||||
|
@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
|
||||
{
|
||||
unsigned long old;
|
||||
|
||||
if (pte_young(*ptep))
|
||||
if (!pte_young(*ptep))
|
||||
return 0;
|
||||
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
|
||||
return (old & _PAGE_ACCESSED) != 0;
|
||||
|
@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte)
|
||||
return pte_val(pte) & _PAGE_RW;
|
||||
}
|
||||
#endif
|
||||
#ifndef pte_read
|
||||
static inline int pte_read(pte_t pte) { return 1; }
|
||||
#endif
|
||||
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
|
||||
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
|
||||
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
|
||||
|
@ -137,8 +137,9 @@ ret_from_syscall:
|
||||
lis r4,icache_44x_need_flush@ha
|
||||
lwz r5,icache_44x_need_flush@l(r4)
|
||||
cmplwi cr0,r5,0
|
||||
bne- 2f
|
||||
bne- .L44x_icache_flush
|
||||
#endif /* CONFIG_PPC_47x */
|
||||
.L44x_icache_flush_return:
|
||||
kuep_unlock
|
||||
lwz r4,_LINK(r1)
|
||||
lwz r5,_CCR(r1)
|
||||
@ -172,10 +173,11 @@ syscall_exit_finish:
|
||||
b 1b
|
||||
|
||||
#ifdef CONFIG_44x
|
||||
2: li r7,0
|
||||
.L44x_icache_flush:
|
||||
li r7,0
|
||||
iccci r0,r0
|
||||
stw r7,icache_44x_need_flush@l(r4)
|
||||
b 1b
|
||||
b .L44x_icache_flush_return
|
||||
#endif /* CONFIG_44x */
|
||||
|
||||
.globl ret_from_fork
|
||||
|
@ -395,7 +395,7 @@ interrupt_base:
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
FP_UNAVAILABLE_EXCEPTION
|
||||
#else
|
||||
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
|
||||
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, emulation_assist_interrupt)
|
||||
#endif
|
||||
|
||||
/* System Call Interrupt */
|
||||
|
@ -184,9 +184,6 @@ _GLOBAL_TOC(plpar_hcall)
|
||||
plpar_hcall_trace:
|
||||
HCALL_INST_PRECALL(R5)
|
||||
|
||||
std r4,STK_PARAM(R4)(r1)
|
||||
mr r0,r4
|
||||
|
||||
mr r4,r5
|
||||
mr r5,r6
|
||||
mr r6,r7
|
||||
@ -196,7 +193,7 @@ plpar_hcall_trace:
|
||||
|
||||
HVSC
|
||||
|
||||
ld r12,STK_PARAM(R4)(r1)
|
||||
ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1)
|
||||
std r4,0(r12)
|
||||
std r5,8(r12)
|
||||
std r6,16(r12)
|
||||
@ -296,9 +293,6 @@ _GLOBAL_TOC(plpar_hcall9)
|
||||
plpar_hcall9_trace:
|
||||
HCALL_INST_PRECALL(R5)
|
||||
|
||||
std r4,STK_PARAM(R4)(r1)
|
||||
mr r0,r4
|
||||
|
||||
mr r4,r5
|
||||
mr r5,r6
|
||||
mr r6,r7
|
||||
|
@ -6,7 +6,6 @@
|
||||
# for more details.
|
||||
#
|
||||
|
||||
OBJCOPYFLAGS := -O binary
|
||||
LDFLAGS_vmlinux := -z norelro
|
||||
ifeq ($(CONFIG_RELOCATABLE),y)
|
||||
LDFLAGS_vmlinux += -shared -Bsymbolic -z notext --emit-relocs
|
||||
|
@ -1 +1,5 @@
|
||||
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
|
||||
CFLAGS_errata.o := -mcmodel=medany
|
||||
endif
|
||||
|
||||
obj-y += errata.o
|
||||
|
@ -31,6 +31,27 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Let's do like x86/arm64 and ignore the compat syscalls.
|
||||
*/
|
||||
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
|
||||
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
|
||||
{
|
||||
return is_compat_task();
|
||||
}
|
||||
|
||||
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
||||
static inline bool arch_syscall_match_sym_name(const char *sym,
|
||||
const char *name)
|
||||
{
|
||||
/*
|
||||
* Since all syscall functions have __riscv_ prefix, we must skip it.
|
||||
* However, as we described above, we decided to ignore compat
|
||||
* syscalls, so we don't care about __riscv_compat_ prefix here.
|
||||
*/
|
||||
return !strcmp(sym + 8, name);
|
||||
}
|
||||
|
||||
struct dyn_arch_ftrace {
|
||||
};
|
||||
#endif
|
||||
|
@ -40,6 +40,15 @@ void arch_remove_kprobe(struct kprobe *p);
|
||||
int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
|
||||
bool kprobe_breakpoint_handler(struct pt_regs *regs);
|
||||
bool kprobe_single_step_handler(struct pt_regs *regs);
|
||||
#else
|
||||
static inline bool kprobe_breakpoint_handler(struct pt_regs *regs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool kprobe_single_step_handler(struct pt_regs *regs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_KPROBES */
|
||||
#endif /* _ASM_RISCV_KPROBES_H */
|
||||
|
@ -34,7 +34,18 @@ struct arch_uprobe {
|
||||
bool simulate;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_UPROBES
|
||||
bool uprobe_breakpoint_handler(struct pt_regs *regs);
|
||||
bool uprobe_single_step_handler(struct pt_regs *regs);
|
||||
#else
|
||||
static inline bool uprobe_breakpoint_handler(struct pt_regs *regs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool uprobe_single_step_handler(struct pt_regs *regs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_UPROBES */
|
||||
#endif /* _ASM_RISCV_UPROBES_H */
|
||||
|
@ -60,7 +60,7 @@ static void init_irq_stacks(void)
|
||||
}
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
|
||||
#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
#ifdef CONFIG_IRQ_STACKS
|
||||
@ -92,7 +92,7 @@ void do_softirq_own_stack(void)
|
||||
#endif
|
||||
__do_softirq();
|
||||
}
|
||||
#endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
|
||||
#endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
|
||||
|
||||
#else
|
||||
static void init_irq_stacks(void) {}
|
||||
|
@ -173,19 +173,6 @@ static void __init init_resources(void)
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
if (crashk_res.start != crashk_res.end) {
|
||||
ret = add_resource(&iomem_resource, &crashk_res);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
if (crashk_low_res.start != crashk_low_res.end) {
|
||||
ret = add_resource(&iomem_resource, &crashk_low_res);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (elfcorehdr_size > 0) {
|
||||
elfcorehdr_res.start = elfcorehdr_addr;
|
||||
|
@ -311,13 +311,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
|
||||
/* Align the stack frame. */
|
||||
sp &= ~0xfUL;
|
||||
|
||||
/*
|
||||
* Fail if the size of the altstack is not large enough for the
|
||||
* sigframe construction.
|
||||
*/
|
||||
if (current->sas_ss_size && sp < current->sas_ss_sp)
|
||||
return (void __user __force *)-1UL;
|
||||
|
||||
return (void __user *)sp;
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uprobes.h>
|
||||
#include <asm/uprobes.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/irq.h>
|
||||
@ -247,22 +249,28 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
|
||||
return GET_INSN_LENGTH(insn);
|
||||
}
|
||||
|
||||
static bool probe_single_step_handler(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
|
||||
return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
|
||||
}
|
||||
|
||||
static bool probe_breakpoint_handler(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
|
||||
return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
|
||||
}
|
||||
|
||||
void handle_break(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_KPROBES
|
||||
if (kprobe_single_step_handler(regs))
|
||||
if (probe_single_step_handler(regs))
|
||||
return;
|
||||
|
||||
if (kprobe_breakpoint_handler(regs))
|
||||
return;
|
||||
#endif
|
||||
#ifdef CONFIG_UPROBES
|
||||
if (uprobe_single_step_handler(regs))
|
||||
if (probe_breakpoint_handler(regs))
|
||||
return;
|
||||
|
||||
if (uprobe_breakpoint_handler(regs))
|
||||
return;
|
||||
#endif
|
||||
current->thread.bad_cause = regs->cause;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -303,11 +303,6 @@ static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
|
||||
{
|
||||
return READ_ONCE(gisa->next_alert) != (u32)virt_to_phys(gisa);
|
||||
}
|
||||
|
||||
static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
|
||||
{
|
||||
set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
|
||||
@ -3216,11 +3211,12 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
|
||||
|
||||
if (!gi->origin)
|
||||
return;
|
||||
if (gi->alert.mask)
|
||||
KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
|
||||
kvm, gi->alert.mask);
|
||||
while (gisa_in_alert_list(gi->origin))
|
||||
cpu_relax();
|
||||
WARN(gi->alert.mask != 0x00,
|
||||
"unexpected non zero alert.mask 0x%02x",
|
||||
gi->alert.mask);
|
||||
gi->alert.mask = 0x00;
|
||||
if (gisa_set_iam(gi->origin, gi->alert.mask))
|
||||
process_gib_alert_list();
|
||||
hrtimer_cancel(&gi->timer);
|
||||
gi->origin = NULL;
|
||||
VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
|
||||
|
@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <asm/insn.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include "perf_event.h"
|
||||
|
||||
@ -132,9 +133,9 @@ static int get_branch_type(unsigned long from, unsigned long to, int abort,
|
||||
* The LBR logs any address in the IP, even if the IP just
|
||||
* faulted. This means userspace can control the from address.
|
||||
* Ensure we don't blindly read any address by validating it is
|
||||
* a known text address.
|
||||
* a known text address and not a vsyscall address.
|
||||
*/
|
||||
if (kernel_text_address(from)) {
|
||||
if (kernel_text_address(from) && !in_gate_area_no_mm(from)) {
|
||||
addr = (void *)from;
|
||||
/*
|
||||
* Assume we can get the maximum possible size
|
||||
|
@ -28,8 +28,6 @@ struct x86_cpu {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int arch_register_cpu(int num);
|
||||
extern void arch_unregister_cpu(int);
|
||||
extern void soft_restart_cpu(void);
|
||||
#endif
|
||||
|
||||
|
@ -157,7 +157,8 @@ static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) {
|
||||
static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
|
||||
#endif
|
||||
|
||||
extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
|
||||
extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
|
||||
unsigned int size, u64 xfeatures, u32 pkru);
|
||||
extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
|
||||
|
||||
static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
|
||||
|
@ -528,7 +528,6 @@ struct kvm_pmu {
|
||||
u64 raw_event_mask;
|
||||
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
|
||||
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
|
||||
struct irq_work irq_work;
|
||||
|
||||
/*
|
||||
* Overlay the bitmap with a 64-bit atomic so that all bits can be
|
||||
|
@ -637,12 +637,17 @@
|
||||
/* AMD Last Branch Record MSRs */
|
||||
#define MSR_AMD64_LBR_SELECT 0xc000010e
|
||||
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
/* Zen4 */
|
||||
#define MSR_ZEN4_BP_CFG 0xc001102e
|
||||
#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
|
||||
|
||||
/* Zen 2 */
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
|
||||
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
|
||||
/* Fam 16h MSRs */
|
||||
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
||||
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
||||
|
@ -129,7 +129,6 @@ void native_smp_send_reschedule(int cpu);
|
||||
void native_send_call_func_ipi(const struct cpumask *mask);
|
||||
void native_send_call_func_single_ipi(int cpu);
|
||||
|
||||
bool smp_park_other_cpus_in_init(void);
|
||||
void smp_store_cpu_info(int id);
|
||||
|
||||
asmlinkage __visible void smp_reboot_interrupt(void);
|
||||
|
@ -268,6 +268,7 @@ enum avic_ipi_failure_cause {
|
||||
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
|
||||
AVIC_IPI_FAILURE_INVALID_TARGET,
|
||||
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
|
||||
AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
|
||||
};
|
||||
|
||||
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
|
||||
|
@ -403,6 +403,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
||||
u8 insn_buff[MAX_PATCH_LEN];
|
||||
|
||||
DPRINTK(ALT, "alt table %px, -> %px", start, end);
|
||||
|
||||
/*
|
||||
* In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
|
||||
* cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
|
||||
* During the process, KASAN becomes confused seeing partial LA57
|
||||
* conversion and triggers a false-positive out-of-bound report.
|
||||
*
|
||||
* Disable KASAN until the patching is complete.
|
||||
*/
|
||||
kasan_disable_current();
|
||||
|
||||
/*
|
||||
* The scan order should be from start to end. A later scanned
|
||||
* alternative code can overwrite previously scanned alternative code.
|
||||
@ -452,6 +463,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
||||
|
||||
text_poke_early(instr, insn_buff, insn_buff_sz);
|
||||
}
|
||||
|
||||
kasan_enable_current();
|
||||
}
|
||||
|
||||
static inline bool is_jcc32(struct insn *insn)
|
||||
|
@ -80,6 +80,10 @@ static const int amd_div0[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
|
||||
|
||||
static const int amd_erratum_1485[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
|
||||
AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
|
||||
|
||||
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
{
|
||||
int osvw_id = *erratum++;
|
||||
@ -1149,6 +1153,10 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
}
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
|
||||
cpu_has_amd_erratum(c, amd_erratum_1485))
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -30,15 +30,15 @@ struct rmid_entry {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* @rmid_free_lru A least recently used list of free RMIDs
|
||||
/*
|
||||
* @rmid_free_lru - A least recently used list of free RMIDs
|
||||
* These RMIDs are guaranteed to have an occupancy less than the
|
||||
* threshold occupancy
|
||||
*/
|
||||
static LIST_HEAD(rmid_free_lru);
|
||||
|
||||
/**
|
||||
* @rmid_limbo_count count of currently unused but (potentially)
|
||||
/*
|
||||
* @rmid_limbo_count - count of currently unused but (potentially)
|
||||
* dirty RMIDs.
|
||||
* This counts RMIDs that no one is currently using but that
|
||||
* may have a occupancy value > resctrl_rmid_realloc_threshold. User can
|
||||
@ -46,7 +46,7 @@ static LIST_HEAD(rmid_free_lru);
|
||||
*/
|
||||
static unsigned int rmid_limbo_count;
|
||||
|
||||
/**
|
||||
/*
|
||||
* @rmid_entry - The entry in the limbo and free lists.
|
||||
*/
|
||||
static struct rmid_entry *rmid_ptrs;
|
||||
|
@ -369,14 +369,15 @@ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
|
||||
EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
|
||||
|
||||
void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
|
||||
unsigned int size, u32 pkru)
|
||||
unsigned int size, u64 xfeatures, u32 pkru)
|
||||
{
|
||||
struct fpstate *kstate = gfpu->fpstate;
|
||||
union fpregs_state *ustate = buf;
|
||||
struct membuf mb = { .p = buf, .left = size };
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
|
||||
__copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
|
||||
__copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
|
||||
XSTATE_COPY_XSAVE);
|
||||
} else {
|
||||
memcpy(&ustate->fxsave, &kstate->regs.fxsave,
|
||||
sizeof(ustate->fxsave));
|
||||
|
@ -1049,6 +1049,7 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
|
||||
* __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
|
||||
* @to: membuf descriptor
|
||||
* @fpstate: The fpstate buffer from which to copy
|
||||
* @xfeatures: The mask of xfeatures to save (XSAVE mode only)
|
||||
* @pkru_val: The PKRU value to store in the PKRU component
|
||||
* @copy_mode: The requested copy mode
|
||||
*
|
||||
@ -1059,7 +1060,8 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
|
||||
* It supports partial copy but @to.pos always starts from zero.
|
||||
*/
|
||||
void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
|
||||
u32 pkru_val, enum xstate_copy_mode copy_mode)
|
||||
u64 xfeatures, u32 pkru_val,
|
||||
enum xstate_copy_mode copy_mode)
|
||||
{
|
||||
const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
|
||||
struct xregs_state *xinit = &init_fpstate.regs.xsave;
|
||||
@ -1083,7 +1085,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
|
||||
break;
|
||||
|
||||
case XSTATE_COPY_XSAVE:
|
||||
header.xfeatures &= fpstate->user_xfeatures;
|
||||
header.xfeatures &= fpstate->user_xfeatures & xfeatures;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1185,6 +1187,7 @@ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
|
||||
enum xstate_copy_mode copy_mode)
|
||||
{
|
||||
__copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
|
||||
tsk->thread.fpu.fpstate->user_xfeatures,
|
||||
tsk->thread.pkru, copy_mode);
|
||||
}
|
||||
|
||||
@ -1536,10 +1539,7 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
|
||||
fpregs_restore_userregs();
|
||||
|
||||
newfps->xfeatures = curfps->xfeatures | xfeatures;
|
||||
|
||||
if (!guest_fpu)
|
||||
newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
|
||||
|
||||
newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
|
||||
newfps->xfd = curfps->xfd & ~xfeatures;
|
||||
|
||||
/* Do the final updates within the locked region */
|
||||
|
@ -43,7 +43,8 @@ enum xstate_copy_mode {
|
||||
|
||||
struct membuf;
|
||||
extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
|
||||
u32 pkru_val, enum xstate_copy_mode copy_mode);
|
||||
u64 xfeatures, u32 pkru_val,
|
||||
enum xstate_copy_mode copy_mode);
|
||||
extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
|
||||
enum xstate_copy_mode mode);
|
||||
extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
|
||||
|
@ -131,7 +131,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable virtualization, APIC etc. and park the CPU in a HLT loop
|
||||
* this function calls the 'stop' function on all other CPUs in the system.
|
||||
*/
|
||||
DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
|
||||
{
|
||||
@ -172,17 +172,13 @@ static void native_stop_other_cpus(int wait)
|
||||
* 2) Wait for all other CPUs to report that they reached the
|
||||
* HLT loop in stop_this_cpu()
|
||||
*
|
||||
* 3) If the system uses INIT/STARTUP for CPU bringup, then
|
||||
* send all present CPUs an INIT vector, which brings them
|
||||
* completely out of the way.
|
||||
* 3) If #2 timed out send an NMI to the CPUs which did not
|
||||
* yet report
|
||||
*
|
||||
* 4) If #3 is not possible and #2 timed out send an NMI to the
|
||||
* CPUs which did not yet report
|
||||
*
|
||||
* 5) Wait for all other CPUs to report that they reached the
|
||||
* 4) Wait for all other CPUs to report that they reached the
|
||||
* HLT loop in stop_this_cpu()
|
||||
*
|
||||
* #4 can obviously race against a CPU reaching the HLT loop late.
|
||||
* #3 can obviously race against a CPU reaching the HLT loop late.
|
||||
* That CPU will have reported already and the "have all CPUs
|
||||
* reached HLT" condition will be true despite the fact that the
|
||||
* other CPU is still handling the NMI. Again, there is no
|
||||
@ -198,7 +194,7 @@ static void native_stop_other_cpus(int wait)
|
||||
/*
|
||||
* Don't wait longer than a second for IPI completion. The
|
||||
* wait request is not checked here because that would
|
||||
* prevent an NMI/INIT shutdown in case that not all
|
||||
* prevent an NMI shutdown attempt in case that not all
|
||||
* CPUs reach shutdown state.
|
||||
*/
|
||||
timeout = USEC_PER_SEC;
|
||||
@ -206,27 +202,7 @@ static void native_stop_other_cpus(int wait)
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Park all other CPUs in INIT including "offline" CPUs, if
|
||||
* possible. That's a safe place where they can't resume execution
|
||||
* of HLT and then execute the HLT loop from overwritten text or
|
||||
* page tables.
|
||||
*
|
||||
* The only downside is a broadcast MCE, but up to the point where
|
||||
* the kexec() kernel brought all APs online again an MCE will just
|
||||
* make HLT resume and handle the MCE. The machine crashes and burns
|
||||
* due to overwritten text, page tables and data. So there is a
|
||||
* choice between fire and frying pan. The result is pretty much
|
||||
* the same. Chose frying pan until x86 provides a sane mechanism
|
||||
* to park a CPU.
|
||||
*/
|
||||
if (smp_park_other_cpus_in_init())
|
||||
goto done;
|
||||
|
||||
/*
|
||||
* If park with INIT was not possible and the REBOOT_VECTOR didn't
|
||||
* take all secondary CPUs offline, try with the NMI.
|
||||
*/
|
||||
/* if the REBOOT_VECTOR didn't work, try with the NMI */
|
||||
if (!cpumask_empty(&cpus_stop_mask)) {
|
||||
/*
|
||||
* If NMI IPI is enabled, try to register the stop handler
|
||||
@ -249,7 +225,6 @@ static void native_stop_other_cpus(int wait)
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
done:
|
||||
local_irq_save(flags);
|
||||
disable_local_APIC();
|
||||
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
|
||||
|
@ -1240,33 +1240,6 @@ void arch_thaw_secondary_cpus_end(void)
|
||||
cache_aps_init();
|
||||
}
|
||||
|
||||
bool smp_park_other_cpus_in_init(void)
|
||||
{
|
||||
unsigned int cpu, this_cpu = smp_processor_id();
|
||||
unsigned int apicid;
|
||||
|
||||
if (apic->wakeup_secondary_cpu_64 || apic->wakeup_secondary_cpu)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If this is a crash stop which does not execute on the boot CPU,
|
||||
* then this cannot use the INIT mechanism because INIT to the boot
|
||||
* CPU will reset the machine.
|
||||
*/
|
||||
if (this_cpu)
|
||||
return false;
|
||||
|
||||
for_each_cpu_and(cpu, &cpus_booted_once_mask, cpu_present_mask) {
|
||||
if (cpu == this_cpu)
|
||||
continue;
|
||||
apicid = apic->cpu_present_to_apicid(cpu);
|
||||
if (apicid == BAD_APICID)
|
||||
continue;
|
||||
send_init_sequence(apicid);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Early setup to make printk work.
|
||||
*/
|
||||
|
@ -54,7 +54,7 @@ void arch_unregister_cpu(int num)
|
||||
EXPORT_SYMBOL(arch_unregister_cpu);
|
||||
#else /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int __init arch_register_cpu(int num)
|
||||
int __init arch_register_cpu(int num)
|
||||
{
|
||||
return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
|
||||
}
|
||||
|
@ -360,14 +360,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.guest_supported_xcr0 =
|
||||
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
|
||||
|
||||
/*
|
||||
* FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
|
||||
* XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
|
||||
* supported by the host.
|
||||
*/
|
||||
vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
|
||||
XFEATURE_MASK_FPSSE;
|
||||
|
||||
kvm_update_pv_runtime(vcpu);
|
||||
|
||||
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
|
||||
|
@ -2759,13 +2759,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
|
||||
{
|
||||
u32 reg = kvm_lapic_get_reg(apic, lvt_type);
|
||||
int vector, mode, trig_mode;
|
||||
int r;
|
||||
|
||||
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
|
||||
vector = reg & APIC_VECTOR_MASK;
|
||||
mode = reg & APIC_MODE_MASK;
|
||||
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
|
||||
return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
|
||||
NULL);
|
||||
|
||||
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
|
||||
if (r && lvt_type == APIC_LVTPC)
|
||||
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -93,14 +93,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
|
||||
#undef __KVM_X86_PMU_OP
|
||||
}
|
||||
|
||||
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
|
||||
{
|
||||
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
|
||||
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
|
||||
|
||||
kvm_pmu_deliver_pmi(vcpu);
|
||||
}
|
||||
|
||||
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
|
||||
{
|
||||
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
||||
@ -124,20 +116,7 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
|
||||
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
|
||||
}
|
||||
|
||||
if (!pmc->intr || skip_pmi)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Inject PMI. If vcpu was in a guest mode during NMI PMI
|
||||
* can be ejected on a guest mode re-entry. Otherwise we can't
|
||||
* be sure that vcpu wasn't executing hlt instruction at the
|
||||
* time of vmexit and is not going to re-enter guest mode until
|
||||
* woken up. So we should wake it, but this is impossible from
|
||||
* NMI context. Do it from irq work instead.
|
||||
*/
|
||||
if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
|
||||
irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
|
||||
else
|
||||
if (pmc->intr && !skip_pmi)
|
||||
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
|
||||
}
|
||||
|
||||
@ -675,9 +654,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
||||
irq_work_sync(&pmu->irq_work);
|
||||
static_call(kvm_x86_pmu_reset)(vcpu);
|
||||
}
|
||||
|
||||
@ -687,7 +663,6 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
|
||||
|
||||
memset(pmu, 0, sizeof(*pmu));
|
||||
static_call(kvm_x86_pmu_init)(vcpu);
|
||||
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
|
||||
pmu->event_count = 0;
|
||||
pmu->need_cleanup = false;
|
||||
kvm_pmu_refresh(vcpu);
|
||||
|
@ -74,6 +74,12 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
|
||||
return counter & pmc_bitmask(pmc);
|
||||
}
|
||||
|
||||
static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
|
||||
{
|
||||
pmc->counter += val - pmc_read_counter(pmc);
|
||||
pmc->counter &= pmc_bitmask(pmc);
|
||||
}
|
||||
|
||||
static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
if (pmc->perf_event) {
|
||||
|
@ -529,8 +529,11 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
|
||||
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
||||
WARN_ONCE(1, "Invalid backing page\n");
|
||||
break;
|
||||
case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
|
||||
/* Invalid IPI with vector < 16 */
|
||||
break;
|
||||
default:
|
||||
pr_err("Unknown IPI interception\n");
|
||||
vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -1253,6 +1253,9 @@ void svm_leave_nested(struct kvm_vcpu *vcpu)
|
||||
|
||||
nested_svm_uninit_mmu_context(vcpu);
|
||||
vmcb_mark_all_dirty(svm->vmcb);
|
||||
|
||||
if (kvm_apicv_activated(vcpu->kvm))
|
||||
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
|
||||
}
|
||||
|
||||
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
||||
|
@ -160,7 +160,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
/* MSR_PERFCTRn */
|
||||
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
|
||||
if (pmc) {
|
||||
pmc->counter += data - pmc_read_counter(pmc);
|
||||
pmc_write_counter(pmc, data);
|
||||
pmc_update_sample_period(pmc);
|
||||
return 0;
|
||||
}
|
||||
|
@ -691,7 +691,7 @@ static int svm_hardware_enable(void)
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
|
||||
struct sev_es_save_area *hostsa;
|
||||
u32 msr_hi;
|
||||
u32 __maybe_unused msr_hi;
|
||||
|
||||
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
|
||||
|
||||
@ -913,8 +913,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
|
||||
if (intercept == svm->x2avic_msrs_intercepted)
|
||||
return;
|
||||
|
||||
if (!x2avic_enabled ||
|
||||
!apic_x2apic_mode(svm->vcpu.arch.apic))
|
||||
if (!x2avic_enabled)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
|
||||
|
@ -436,11 +436,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
if (!msr_info->host_initiated &&
|
||||
!(msr & MSR_PMC_FULL_WIDTH_BIT))
|
||||
data = (s64)(s32)data;
|
||||
pmc->counter += data - pmc_read_counter(pmc);
|
||||
pmc_write_counter(pmc, data);
|
||||
pmc_update_sample_period(pmc);
|
||||
break;
|
||||
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
||||
pmc->counter += data - pmc_read_counter(pmc);
|
||||
pmc_write_counter(pmc, data);
|
||||
pmc_update_sample_period(pmc);
|
||||
break;
|
||||
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
||||
|
@ -5382,26 +5382,37 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
|
||||
struct kvm_xsave *guest_xsave)
|
||||
{
|
||||
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
|
||||
return;
|
||||
|
||||
fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
|
||||
guest_xsave->region,
|
||||
sizeof(guest_xsave->region),
|
||||
vcpu->arch.pkru);
|
||||
}
|
||||
|
||||
static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
|
||||
u8 *state, unsigned int size)
|
||||
{
|
||||
/*
|
||||
* Only copy state for features that are enabled for the guest. The
|
||||
* state itself isn't problematic, but setting bits in the header for
|
||||
* features that are supported in *this* host but not exposed to the
|
||||
* guest can result in KVM_SET_XSAVE failing when live migrating to a
|
||||
* compatible host without the features that are NOT exposed to the
|
||||
* guest.
|
||||
*
|
||||
* FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
|
||||
* XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
|
||||
* supported by the host.
|
||||
*/
|
||||
u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
|
||||
XFEATURE_MASK_FPSSE;
|
||||
|
||||
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
|
||||
return;
|
||||
|
||||
fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
|
||||
state, size, vcpu->arch.pkru);
|
||||
fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
|
||||
supported_xcr0, vcpu->arch.pkru);
|
||||
}
|
||||
|
||||
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
|
||||
struct kvm_xsave *guest_xsave)
|
||||
{
|
||||
return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
|
||||
sizeof(guest_xsave->region));
|
||||
}
|
||||
|
||||
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
|
||||
@ -12843,6 +12854,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
||||
return true;
|
||||
#endif
|
||||
|
||||
if (kvm_test_request(KVM_REQ_PMI, vcpu))
|
||||
return true;
|
||||
|
||||
if (kvm_arch_interrupt_allowed(vcpu) &&
|
||||
(kvm_cpu_has_interrupt(vcpu) ||
|
||||
kvm_guest_apic_has_interrupt(vcpu)))
|
||||
|
21
block/fops.c
21
block/fops.c
@ -772,24 +772,35 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
/* Invalidate the page cache, including dirty pages. */
|
||||
error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Invalidate the page cache, including dirty pages, for valid
|
||||
* de-allocate mode calls to fallocate().
|
||||
*/
|
||||
switch (mode) {
|
||||
case FALLOC_FL_ZERO_RANGE:
|
||||
case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
|
||||
error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
|
||||
len >> SECTOR_SHIFT, GFP_KERNEL,
|
||||
BLKDEV_ZERO_NOUNMAP);
|
||||
break;
|
||||
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
|
||||
error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
|
||||
len >> SECTOR_SHIFT, GFP_KERNEL,
|
||||
BLKDEV_ZERO_NOFALLBACK);
|
||||
break;
|
||||
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
|
||||
error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
|
||||
len >> SECTOR_SHIFT, GFP_KERNEL);
|
||||
break;
|
||||
|
@ -12,6 +12,7 @@
|
||||
#define pr_fmt(fmt) "ACPI: " fmt
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -1913,6 +1913,17 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* HP Pavilion Gaming Laptop 15-dk1xxx
|
||||
* https://github.com/systemd/systemd/issues/28942
|
||||
*/
|
||||
.callback = ec_honor_dsdt_gpe,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Samsung hardware
|
||||
|
@ -439,6 +439,13 @@ static const struct dmi_system_id asus_laptop[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B1402CBA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B1502CBA",
|
||||
.matches = {
|
||||
@ -500,16 +507,23 @@ static const struct dmi_system_id maingear_laptop[] = {
|
||||
|
||||
static const struct dmi_system_id pcspecialist_laptop[] = {
|
||||
{
|
||||
.ident = "PCSpecialist Elimina Pro 16 M",
|
||||
/*
|
||||
* Some models have product-name "Elimina Pro 16 M",
|
||||
* others "GM6BGEQ". Match on board-name to match both.
|
||||
*/
|
||||
/* TongFang GM6BGEQ / PCSpecialist Elimina Pro 16 M, RTX 3050 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GM6BG5Q, RTX 4050 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GM6BG5Q"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GM6BG0Q / PCSpecialist Elimina Pro 16 M, RTX 4060 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -4812,6 +4812,8 @@ static void binder_release_work(struct binder_proc *proc,
|
||||
"undelivered TRANSACTION_ERROR: %u\n",
|
||||
e->cmd);
|
||||
} break;
|
||||
case BINDER_WORK_TRANSACTION_PENDING:
|
||||
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
|
||||
case BINDER_WORK_TRANSACTION_COMPLETE: {
|
||||
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
||||
"undelivered TRANSACTION_COMPLETE\n");
|
||||
|
@ -1478,7 +1478,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
|
||||
|
||||
/* If the user didn't specify a name match any */
|
||||
if (data)
|
||||
return !strcmp((*r)->name, data);
|
||||
return (*r)->name && !strcmp((*r)->name, data);
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
@ -962,13 +962,10 @@ static void btrtl_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
}
|
||||
|
||||
static int btrtl_register_devcoredump_support(struct hci_dev *hdev)
|
||||
static void btrtl_register_devcoredump_support(struct hci_dev *hdev)
|
||||
{
|
||||
int err;
|
||||
hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
|
||||
|
||||
err = hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name)
|
||||
@ -1255,8 +1252,7 @@ int btrtl_download_firmware(struct hci_dev *hdev,
|
||||
}
|
||||
|
||||
done:
|
||||
if (!err)
|
||||
err = btrtl_register_devcoredump_support(hdev);
|
||||
btrtl_register_devcoredump_support(hdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -74,7 +74,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
struct vhci_data *data = hci_get_drvdata(hdev);
|
||||
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
mutex_lock(&data->open_mutex);
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
mutex_unlock(&data->open_mutex);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
return 0;
|
||||
|
@ -247,8 +247,8 @@ static int counter_get_ext(const struct counter_comp *const ext,
|
||||
if (*id == component_id)
|
||||
return 0;
|
||||
|
||||
if (ext->type == COUNTER_COMP_ARRAY) {
|
||||
element = ext->priv;
|
||||
if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
|
||||
element = ext[*ext_idx].priv;
|
||||
|
||||
if (component_id - *id < element->length)
|
||||
return 0;
|
||||
|
@ -97,7 +97,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter,
|
||||
priv->qdec_mode = 0;
|
||||
/* Set highest rate based on whether soc has gclk or not */
|
||||
bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
|
||||
if (priv->tc_cfg->has_gclk)
|
||||
if (!priv->tc_cfg->has_gclk)
|
||||
cmr |= ATMEL_TC_TIMER_CLOCK2;
|
||||
else
|
||||
cmr |= ATMEL_TC_TIMER_CLOCK1;
|
||||
|
@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
|
||||
if (!dma_fence_is_signaled(tmp)) {
|
||||
++count;
|
||||
} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
|
||||
&tmp->flags)) {
|
||||
if (ktime_after(tmp->timestamp, timestamp))
|
||||
timestamp = tmp->timestamp;
|
||||
} else {
|
||||
/*
|
||||
* Use the current time if the fence is
|
||||
* currently signaling.
|
||||
*/
|
||||
timestamp = ktime_get();
|
||||
ktime_t t = dma_fence_timestamp(tmp);
|
||||
|
||||
if (ktime_after(t, timestamp))
|
||||
timestamp = t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,
|
||||
sizeof(info->driver_name));
|
||||
|
||||
info->status = dma_fence_get_status(fence);
|
||||
while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
|
||||
!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
|
||||
cpu_relax();
|
||||
info->timestamp_ns =
|
||||
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
|
||||
ktime_to_ns(fence->timestamp) :
|
||||
ktime_set(0, 0);
|
||||
dma_fence_is_signaled(fence) ?
|
||||
ktime_to_ns(dma_fence_timestamp(fence)) :
|
||||
ktime_set(0, 0);
|
||||
|
||||
return info->status;
|
||||
}
|
||||
|
@ -92,8 +92,14 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
|
||||
|
||||
edma_writel_chreg(fsl_chan, val, ch_sbr);
|
||||
|
||||
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
|
||||
edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
|
||||
if (flags & FSL_EDMA_DRV_HAS_CHMUX) {
|
||||
/*
|
||||
* ch_mux: With the exception of 0, attempts to write a value
|
||||
* already in use will be forced to 0.
|
||||
*/
|
||||
if (!edma_readl_chreg(fsl_chan, ch_mux))
|
||||
edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
|
||||
}
|
||||
|
||||
val = edma_readl_chreg(fsl_chan, ch_csr);
|
||||
val |= EDMA_V3_CH_CSR_ERQ;
|
||||
@ -448,12 +454,25 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
|
||||
|
||||
edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
|
||||
|
||||
csr = le16_to_cpu(tcd->csr);
|
||||
|
||||
if (fsl_chan->is_sw) {
|
||||
csr = le16_to_cpu(tcd->csr);
|
||||
csr |= EDMA_TCD_CSR_START;
|
||||
tcd->csr = cpu_to_le16(csr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
|
||||
* eDMAv4 have not such requirement.
|
||||
* Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
|
||||
*/
|
||||
if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
|
||||
(csr & EDMA_TCD_CSR_E_SG)) ||
|
||||
((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
|
||||
(csr & EDMA_TCD_CSR_E_LINK)))
|
||||
edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
|
||||
|
||||
|
||||
edma_write_tcdreg(fsl_chan, tcd->csr, csr);
|
||||
}
|
||||
|
||||
|
@ -183,11 +183,23 @@ struct fsl_edma_desc {
|
||||
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
|
||||
#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
|
||||
#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
|
||||
/* Need clean CHn_CSR DONE before enable TCD's ESG */
|
||||
#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
|
||||
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
|
||||
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
|
||||
|
||||
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
|
||||
FSL_EDMA_DRV_BUS_8BYTE | \
|
||||
FSL_EDMA_DRV_DEV_TO_DEV | \
|
||||
FSL_EDMA_DRV_ALIGN_64BYTE)
|
||||
FSL_EDMA_DRV_ALIGN_64BYTE | \
|
||||
FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
|
||||
FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
|
||||
|
||||
#define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \
|
||||
FSL_EDMA_DRV_BUS_8BYTE | \
|
||||
FSL_EDMA_DRV_DEV_TO_DEV | \
|
||||
FSL_EDMA_DRV_ALIGN_64BYTE | \
|
||||
FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
|
||||
|
||||
struct fsl_edma_drvdata {
|
||||
u32 dmamuxs; /* only used before v3 */
|
||||
|
@ -154,18 +154,20 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
|
||||
fsl_chan = to_fsl_edma_chan(chan);
|
||||
i = fsl_chan - fsl_edma->chans;
|
||||
|
||||
chan = dma_get_slave_channel(chan);
|
||||
chan->device->privatecnt++;
|
||||
fsl_chan->priority = dma_spec->args[1];
|
||||
fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
|
||||
fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
|
||||
fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
|
||||
|
||||
if (!b_chmux && i == dma_spec->args[0]) {
|
||||
chan = dma_get_slave_channel(chan);
|
||||
chan->device->privatecnt++;
|
||||
mutex_unlock(&fsl_edma->fsl_edma_mutex);
|
||||
return chan;
|
||||
} else if (b_chmux && !fsl_chan->srcid) {
|
||||
/* if controller support channel mux, choose a free channel */
|
||||
chan = dma_get_slave_channel(chan);
|
||||
chan->device->privatecnt++;
|
||||
fsl_chan->srcid = dma_spec->args[0];
|
||||
mutex_unlock(&fsl_edma->fsl_edma_mutex);
|
||||
return chan;
|
||||
@ -355,7 +357,7 @@ static struct fsl_edma_drvdata imx93_data3 = {
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata imx93_data4 = {
|
||||
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
|
||||
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
|
||||
.chreg_space_sz = 0x8000,
|
||||
.chreg_off = 0x10000,
|
||||
.setup_irq = fsl_edma3_irq_init,
|
||||
|
@ -477,6 +477,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
union idxd_command_reg cmd;
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
u32 stat;
|
||||
unsigned long flags;
|
||||
|
||||
if (idxd_device_is_halted(idxd)) {
|
||||
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
|
||||
@ -490,7 +491,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
cmd.operand = operand;
|
||||
cmd.int_req = 1;
|
||||
|
||||
spin_lock(&idxd->cmd_lock);
|
||||
spin_lock_irqsave(&idxd->cmd_lock, flags);
|
||||
wait_event_lock_irq(idxd->cmd_waitq,
|
||||
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
|
||||
idxd->cmd_lock);
|
||||
@ -507,7 +508,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
* After command submitted, release lock and go to sleep until
|
||||
* the command completes via interrupt.
|
||||
*/
|
||||
spin_unlock(&idxd->cmd_lock);
|
||||
spin_unlock_irqrestore(&idxd->cmd_lock, flags);
|
||||
wait_for_completion(&done);
|
||||
stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
|
||||
spin_lock(&idxd->cmd_lock);
|
||||
|
@ -450,9 +450,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
|
||||
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
|
||||
|
||||
synchronize_irq(c->irq);
|
||||
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
synchronize_irq(c->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3668,6 +3668,7 @@ static int __init d40_probe(struct platform_device *pdev)
|
||||
regulator_disable(base->lcpa_regulator);
|
||||
regulator_put(base->lcpa_regulator);
|
||||
}
|
||||
pm_runtime_disable(base->dev);
|
||||
|
||||
report_failure:
|
||||
d40_err(dev, "probe failed\n");
|
||||
|
@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
|
||||
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
|
||||
|
||||
/* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
|
||||
if (chan->trig_mdma && sg_len > 1)
|
||||
if (chan->trig_mdma && sg_len > 1) {
|
||||
chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
|
||||
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
|
||||
}
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
|
||||
@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
|
||||
|
||||
residue = stm32_dma_get_remaining_bytes(chan);
|
||||
|
||||
if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
|
||||
if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
|
||||
n_sg++;
|
||||
if (n_sg == chan->desc->num_sgs)
|
||||
n_sg = 0;
|
||||
residue = sg_req->len;
|
||||
if (!chan->trig_mdma)
|
||||
residue = sg_req->len;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
|
||||
* residue = remaining bytes from NDTR + remaining
|
||||
* periods/sg to be transferred
|
||||
*/
|
||||
if (!chan->desc->cyclic || n_sg != 0)
|
||||
if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
|
||||
for (i = n_sg; i < desc->num_sgs; i++)
|
||||
residue += desc->sg_req[i].len;
|
||||
|
||||
|
@ -777,8 +777,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
|
||||
/* Enable interrupts */
|
||||
ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
|
||||
ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
|
||||
if (sg_len > 1)
|
||||
ccr |= STM32_MDMA_CCR_BTIE;
|
||||
desc->ccr = ccr;
|
||||
|
||||
return 0;
|
||||
@ -1236,6 +1234,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
|
||||
unsigned long flags;
|
||||
u32 status, reg;
|
||||
|
||||
/* Transfer can be terminated */
|
||||
if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
|
||||
return -EPERM;
|
||||
|
||||
hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
|
||||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
@ -1316,21 +1318,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
|
||||
|
||||
static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
|
||||
struct stm32_mdma_desc *desc,
|
||||
u32 curr_hwdesc)
|
||||
u32 curr_hwdesc,
|
||||
struct dma_tx_state *state)
|
||||
{
|
||||
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
|
||||
struct stm32_mdma_hwdesc *hwdesc;
|
||||
u32 cbndtr, residue, modulo, burst_size;
|
||||
u32 cisr, clar, cbndtr, residue, modulo, burst_size;
|
||||
int i;
|
||||
|
||||
cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
|
||||
|
||||
residue = 0;
|
||||
for (i = curr_hwdesc + 1; i < desc->count; i++) {
|
||||
/* Get the next hw descriptor to process from current transfer */
|
||||
clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
|
||||
for (i = desc->count - 1; i >= 0; i--) {
|
||||
hwdesc = desc->node[i].hwdesc;
|
||||
|
||||
if (hwdesc->clar == clar)
|
||||
break;/* Current transfer found, stop cumulating */
|
||||
|
||||
/* Cumulate residue of unprocessed hw descriptors */
|
||||
residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
|
||||
}
|
||||
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
|
||||
residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
|
||||
|
||||
state->in_flight_bytes = 0;
|
||||
if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
|
||||
state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
|
||||
|
||||
if (!chan->mem_burst)
|
||||
return residue;
|
||||
|
||||
@ -1360,11 +1376,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
|
||||
|
||||
vdesc = vchan_find_desc(&chan->vchan, cookie);
|
||||
if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
|
||||
residue = stm32_mdma_desc_residue(chan, chan->desc,
|
||||
chan->curr_hwdesc);
|
||||
residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
|
||||
else if (vdesc)
|
||||
residue = stm32_mdma_desc_residue(chan,
|
||||
to_stm32_mdma_desc(vdesc), 0);
|
||||
residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
|
||||
|
||||
dma_set_residue(state, residue);
|
||||
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
|
@ -142,6 +142,10 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
|
||||
int r;
|
||||
int size;
|
||||
|
||||
/* SI HW does not have doorbells, skip allocation */
|
||||
if (adev->doorbell.num_kernel_doorbells == 0)
|
||||
return 0;
|
||||
|
||||
/* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */
|
||||
size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE);
|
||||
|
||||
|
@ -252,7 +252,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_res_cursor cursor;
|
||||
|
||||
if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
|
||||
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
|
||||
return false;
|
||||
|
||||
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
|
||||
|
@ -1262,6 +1262,9 @@ static void disable_vbios_mode_if_required(
|
||||
if (stream == NULL)
|
||||
continue;
|
||||
|
||||
if (stream->apply_seamless_boot_optimization)
|
||||
continue;
|
||||
|
||||
// only looking for first odm pipe
|
||||
if (pipe->prev_odm_pipe)
|
||||
continue;
|
||||
|
@ -290,7 +290,8 @@ static int
|
||||
update_connector_routing(struct drm_atomic_state *state,
|
||||
struct drm_connector *connector,
|
||||
struct drm_connector_state *old_connector_state,
|
||||
struct drm_connector_state *new_connector_state)
|
||||
struct drm_connector_state *new_connector_state,
|
||||
bool added_by_user)
|
||||
{
|
||||
const struct drm_connector_helper_funcs *funcs;
|
||||
struct drm_encoder *new_encoder;
|
||||
@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
|
||||
* there's a chance the connector may have been destroyed during the
|
||||
* process, but it's better to ignore that then cause
|
||||
* drm_atomic_helper_resume() to fail.
|
||||
*
|
||||
* Last, we want to ignore connector registration when the connector
|
||||
* was not pulled in the atomic state by user-space (ie, was pulled
|
||||
* in by the driver, e.g. when updating a DP-MST stream).
|
||||
*/
|
||||
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
|
||||
crtc_state->active) {
|
||||
added_by_user && crtc_state->active) {
|
||||
drm_dbg_atomic(connector->dev,
|
||||
"[CONNECTOR:%d:%s] is not registered\n",
|
||||
connector->base.id, connector->name);
|
||||
@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *old_connector_state, *new_connector_state;
|
||||
int i, ret;
|
||||
unsigned int connectors_mask = 0;
|
||||
unsigned int connectors_mask = 0, user_connectors_mask = 0;
|
||||
|
||||
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
|
||||
user_connectors_mask |= BIT(i);
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
bool has_connectors =
|
||||
@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
|
||||
*/
|
||||
ret = update_connector_routing(state, connector,
|
||||
old_connector_state,
|
||||
new_connector_state);
|
||||
new_connector_state,
|
||||
BIT(i) & user_connectors_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (old_connector_state->crtc) {
|
||||
|
@ -540,7 +540,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
|
||||
struct page **pages;
|
||||
struct folio *folio;
|
||||
struct folio_batch fbatch;
|
||||
int i, j, npages;
|
||||
long i, j, npages;
|
||||
|
||||
if (WARN_ON(!obj->filp))
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -564,11 +564,13 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
|
||||
|
||||
i = 0;
|
||||
while (i < npages) {
|
||||
long nr;
|
||||
folio = shmem_read_folio_gfp(mapping, i,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (IS_ERR(folio))
|
||||
goto fail;
|
||||
for (j = 0; j < folio_nr_pages(folio); j++, i++)
|
||||
nr = min(npages - i, folio_nr_pages(folio));
|
||||
for (j = 0; j < nr; j++, i++)
|
||||
pages[i] = folio_file_page(folio, i);
|
||||
|
||||
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
|
||||
|
@ -119,6 +119,7 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
|
||||
struct dpu_sw_pipe_cfg *pipe_cfg)
|
||||
{
|
||||
int src_width, src_height, dst_height, fps;
|
||||
u64 plane_pixel_rate, plane_bit_rate;
|
||||
u64 plane_prefill_bw;
|
||||
u64 plane_bw;
|
||||
u32 hw_latency_lines;
|
||||
@ -136,13 +137,12 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
|
||||
scale_factor = src_height > dst_height ?
|
||||
mult_frac(src_height, 1, dst_height) : 1;
|
||||
|
||||
plane_bw =
|
||||
src_width * mode->vtotal * fps * fmt->bpp *
|
||||
scale_factor;
|
||||
plane_pixel_rate = src_width * mode->vtotal * fps;
|
||||
plane_bit_rate = plane_pixel_rate * fmt->bpp;
|
||||
|
||||
plane_prefill_bw =
|
||||
src_width * hw_latency_lines * fps * fmt->bpp *
|
||||
scale_factor * mode->vtotal;
|
||||
plane_bw = plane_bit_rate * scale_factor;
|
||||
|
||||
plane_prefill_bw = plane_bw * hw_latency_lines;
|
||||
|
||||
if ((vbp+vpw) > hw_latency_lines)
|
||||
do_div(plane_prefill_bw, (vbp+vpw));
|
||||
@ -733,9 +733,11 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
|
||||
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
|
||||
struct dpu_sw_pipe *pipe,
|
||||
struct dpu_sw_pipe_cfg *pipe_cfg,
|
||||
const struct dpu_format *fmt)
|
||||
const struct dpu_format *fmt,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
uint32_t min_src_size;
|
||||
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
|
||||
|
||||
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
|
||||
|
||||
@ -774,6 +776,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* max clk check */
|
||||
if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
|
||||
DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -899,12 +907,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
|
||||
r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
|
||||
}
|
||||
|
||||
ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt);
|
||||
ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (r_pipe->sspp) {
|
||||
ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt);
|
||||
ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
|
||||
&crtc_state->adjusted_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -1774,13 +1774,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
|
||||
return rc;
|
||||
|
||||
while (--link_train_max_retries) {
|
||||
rc = dp_ctrl_reinitialize_mainlink(ctrl);
|
||||
if (rc) {
|
||||
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
|
||||
rc);
|
||||
break;
|
||||
}
|
||||
|
||||
training_step = DP_TRAINING_NONE;
|
||||
rc = dp_ctrl_setup_main_link(ctrl, &training_step);
|
||||
if (rc == 0) {
|
||||
@ -1832,6 +1825,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
|
||||
/* stop link training before start re training */
|
||||
dp_ctrl_clear_training_pattern(ctrl);
|
||||
}
|
||||
|
||||
rc = dp_ctrl_reinitialize_mainlink(ctrl);
|
||||
if (rc) {
|
||||
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
|
||||
|
@ -1090,7 +1090,7 @@ int dp_link_process_request(struct dp_link *dp_link)
|
||||
} else if (dp_link_read_psr_error_status(link)) {
|
||||
DRM_ERROR("PSR IRQ_HPD received\n");
|
||||
} else if (dp_link_psr_capability_changed(link)) {
|
||||
drm_dbg_dp(link->drm_dev, "PSR Capability changed");
|
||||
drm_dbg_dp(link->drm_dev, "PSR Capability changed\n");
|
||||
} else {
|
||||
ret = dp_link_process_link_status_update(link);
|
||||
if (!ret) {
|
||||
@ -1107,7 +1107,7 @@ int dp_link_process_request(struct dp_link *dp_link)
|
||||
}
|
||||
}
|
||||
|
||||
drm_dbg_dp(link->drm_dev, "sink request=%#x",
|
||||
drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
|
||||
dp_link->sink_request);
|
||||
return ret;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user