Merge drm/drm-next into drm-intel-next
Sync up with changes from drm-intel-gt-next. Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
commit
69f06e4fa0
1
.mailmap
1
.mailmap
@ -327,6 +327,7 @@ Mauro Carvalho Chehab <mchehab@kernel.org> <m.chehab@samsung.com>
|
||||
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com>
|
||||
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@mellanox.com>
|
||||
Maxim Mikityanskiy <maxtram95@gmail.com> <maximmi@nvidia.com>
|
||||
Maxime Ripard <mripard@kernel.org> <maxime@cerno.tech>
|
||||
Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
|
||||
Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
|
||||
Mayuresh Janorkar <mayur@ti.com>
|
||||
|
||||
@ -0,0 +1,118 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
# Copyright 2020 BayLibre, SAS
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/amlogic,meson-g12a-dw-mipi-dsi.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Amlogic specific extensions to the Synopsys Designware MIPI DSI Host Controller
|
||||
|
||||
maintainers:
|
||||
- Neil Armstrong <neil.armstrong@linaro.org>
|
||||
|
||||
description: |
|
||||
The Amlogic Meson Synopsys Designware Integration is composed of
|
||||
- A Synopsys DesignWare MIPI DSI Host Controller IP
|
||||
- A TOP control block controlling the Clocks & Resets of the IP
|
||||
|
||||
allOf:
|
||||
- $ref: dsi-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- amlogic,meson-g12a-dw-mipi-dsi
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
minItems: 3
|
||||
maxItems: 4
|
||||
|
||||
clock-names:
|
||||
minItems: 3
|
||||
items:
|
||||
- const: pclk
|
||||
- const: bit
|
||||
- const: px
|
||||
- const: meas
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
- const: top
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
|
||||
phy-names:
|
||||
items:
|
||||
- const: dphy
|
||||
|
||||
ports:
|
||||
$ref: /schemas/graph.yaml#/properties/ports
|
||||
|
||||
properties:
|
||||
port@0:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description: Input node to receive pixel data.
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description: DSI output node to panel.
|
||||
|
||||
required:
|
||||
- port@0
|
||||
- port@1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
- resets
|
||||
- reset-names
|
||||
- phys
|
||||
- phy-names
|
||||
- ports
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
dsi@6000 {
|
||||
compatible = "amlogic,meson-g12a-dw-mipi-dsi";
|
||||
reg = <0x6000 0x400>;
|
||||
resets = <&reset_top>;
|
||||
reset-names = "top";
|
||||
clocks = <&clk_pclk>, <&bit_clk>, <&clk_px>;
|
||||
clock-names = "pclk", "bit", "px";
|
||||
phys = <&mipi_dphy>;
|
||||
phy-names = "dphy";
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
/* VPU VENC Input */
|
||||
mipi_dsi_venc_port: port@0 {
|
||||
reg = <0>;
|
||||
|
||||
mipi_dsi_in: endpoint {
|
||||
remote-endpoint = <&dpi_out>;
|
||||
};
|
||||
};
|
||||
|
||||
/* DSI Output */
|
||||
mipi_dsi_panel_port: port@1 {
|
||||
reg = <1>;
|
||||
|
||||
mipi_out_panel: endpoint {
|
||||
remote-endpoint = <&mipi_in_panel>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -96,6 +96,11 @@ properties:
|
||||
description:
|
||||
A port node pointing to the HDMI-TX port node.
|
||||
|
||||
port@2:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description:
|
||||
A port node pointing to the DPI port node (e.g. DSI or LVDS transceiver).
|
||||
|
||||
"#address-cells":
|
||||
const: 1
|
||||
|
||||
|
||||
@ -70,7 +70,9 @@ properties:
|
||||
samsung,burst-clock-frequency:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description:
|
||||
DSIM high speed burst mode frequency.
|
||||
DSIM high speed burst mode frequency. If absent,
|
||||
the pixel clock from the attached device or bridge
|
||||
will be used instead.
|
||||
|
||||
samsung,esc-clock-frequency:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
@ -80,7 +82,8 @@ properties:
|
||||
samsung,pll-clock-frequency:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description:
|
||||
DSIM oscillator clock frequency.
|
||||
DSIM oscillator clock frequency. If absent, the clock frequency
|
||||
of sclk_mipi will be used instead.
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
@ -100,7 +103,8 @@ properties:
|
||||
specified.
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
$ref: /schemas/graph.yaml#/$defs/port-base
|
||||
unevaluatedProperties: false
|
||||
description:
|
||||
DSI output port node to the panel or the next bridge
|
||||
in the chain.
|
||||
@ -134,9 +138,7 @@ required:
|
||||
- compatible
|
||||
- interrupts
|
||||
- reg
|
||||
- samsung,burst-clock-frequency
|
||||
- samsung,esc-clock-frequency
|
||||
- samsung,pll-clock-frequency
|
||||
|
||||
allOf:
|
||||
- $ref: ../dsi-controller.yaml#
|
||||
|
||||
@ -21,6 +21,9 @@ properties:
|
||||
maxItems: 1
|
||||
description: virtual channel number of a DSI peripheral
|
||||
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
|
||||
vddc-supply:
|
||||
description: Regulator for 1.2V internal core power.
|
||||
|
||||
|
||||
@ -36,6 +36,9 @@ properties:
|
||||
description: GPIO signal to enable DDC bus
|
||||
maxItems: 1
|
||||
|
||||
hdmi-pwr-supply:
|
||||
description: Power supply for the HDMI +5V Power pin
|
||||
|
||||
port:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description: Connection to controller providing HDMI signals
|
||||
|
||||
@ -21,6 +21,7 @@ properties:
|
||||
- fsl,imx28-lcdif
|
||||
- fsl,imx6sx-lcdif
|
||||
- fsl,imx8mp-lcdif
|
||||
- fsl,imx93-lcdif
|
||||
- items:
|
||||
- enum:
|
||||
- fsl,imx6sl-lcdif
|
||||
@ -88,7 +89,9 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: fsl,imx8mp-lcdif
|
||||
enum:
|
||||
- fsl,imx8mp-lcdif
|
||||
- fsl,imx93-lcdif
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
@ -107,6 +110,7 @@ allOf:
|
||||
enum:
|
||||
- fsl,imx6sx-lcdif
|
||||
- fsl,imx8mp-lcdif
|
||||
- fsl,imx93-lcdif
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
@ -123,6 +127,7 @@ allOf:
|
||||
- fsl,imx8mm-lcdif
|
||||
- fsl,imx8mn-lcdif
|
||||
- fsl,imx8mp-lcdif
|
||||
- fsl,imx93-lcdif
|
||||
then:
|
||||
required:
|
||||
- power-domains
|
||||
|
||||
@ -32,6 +32,10 @@ properties:
|
||||
- innolux,hj110iz-01a
|
||||
# STARRY 2081101QFH032011-53G 10.1" WUXGA TFT LCD panel
|
||||
- starry,2081101qfh032011-53g
|
||||
# STARRY himax83102-j02 10.51" WUXGA TFT LCD panel
|
||||
- starry,himax83102-j02
|
||||
# STARRY ili9882t 10.51" WUXGA TFT LCD panel
|
||||
- starry,ili9882t
|
||||
|
||||
reg:
|
||||
description: the virtual channel number of a DSI peripheral
|
||||
|
||||
@ -33,6 +33,8 @@ properties:
|
||||
- ampire,am-1280800n3tzqw-t00h
|
||||
# Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel
|
||||
- ampire,am-480272h3tmqw-t01h
|
||||
# Ampire AM-800480L1TMQW-T00H 5" WVGA TFT LCD panel
|
||||
- ampire,am-800480l1tmqw-t00h
|
||||
# Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
|
||||
- ampire,am800480r3tmqwa1h
|
||||
# Ampire AM-800600P5TMQW-TB8H 8.0" SVGA TFT LCD panel
|
||||
@ -284,6 +286,8 @@ properties:
|
||||
- rocktech,rk101ii01d-ct
|
||||
# Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
|
||||
- rocktech,rk070er9427
|
||||
# Rocktech Display Ltd. RK043FN48H 4.3" 480x272 LCD-TFT panel
|
||||
- rocktech,rk043fn48h
|
||||
# Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
|
||||
- samsung,atna33xc20
|
||||
# Samsung 12.2" (2560x1600 pixels) TFT LCD panel
|
||||
|
||||
@ -24,7 +24,7 @@ File format specification
|
||||
- All keys shall be prefixed with `drm-`.
|
||||
- Whitespace between the delimiter and first non-whitespace character shall be
|
||||
ignored when parsing.
|
||||
- Neither keys or values are allowed to contain whitespace characters.
|
||||
- Keys are not allowed to contain whitespace characters.
|
||||
- Numerical key value pairs can end with optional unit string.
|
||||
- Data type of the value is fixed as defined in the specification.
|
||||
|
||||
@ -39,12 +39,13 @@ Data types
|
||||
----------
|
||||
|
||||
- <uint> - Unsigned integer without defining the maximum value.
|
||||
- <str> - String excluding any above defined reserved characters or whitespace.
|
||||
- <keystr> - String excluding any above defined reserved characters or whitespace.
|
||||
- <valstr> - String.
|
||||
|
||||
Mandatory fully standardised keys
|
||||
---------------------------------
|
||||
|
||||
- drm-driver: <str>
|
||||
- drm-driver: <valstr>
|
||||
|
||||
String shall contain the name this driver registered as via the respective
|
||||
`struct drm_driver` data structure.
|
||||
@ -52,6 +53,9 @@ String shall contain the name this driver registered as via the respective
|
||||
Optional fully standardised keys
|
||||
--------------------------------
|
||||
|
||||
Identification
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
- drm-pdev: <aaaa:bb.cc.d>
|
||||
|
||||
For PCI devices this should contain the PCI slot address of the device in
|
||||
@ -69,10 +73,13 @@ scope of each device, in which case `drm-pdev` shall be present as well.
|
||||
Userspace should make sure to not double account any usage statistics by using
|
||||
the above described criteria in order to associate data to individual clients.
|
||||
|
||||
- drm-engine-<str>: <uint> ns
|
||||
Utilization
|
||||
^^^^^^^^^^^
|
||||
|
||||
- drm-engine-<keystr>: <uint> ns
|
||||
|
||||
GPUs usually contain multiple execution engines. Each shall be given a stable
|
||||
and unique name (str), with possible values documented in the driver specific
|
||||
and unique name (keystr), with possible values documented in the driver specific
|
||||
documentation.
|
||||
|
||||
Value shall be in specified time units which the respective GPU engine spent
|
||||
@ -84,31 +91,19 @@ larger value within a reasonable period. Upon observing a value lower than what
|
||||
was previously read, userspace is expected to stay with that larger previous
|
||||
value until a monotonic update is seen.
|
||||
|
||||
- drm-engine-capacity-<str>: <uint>
|
||||
- drm-engine-capacity-<keystr>: <uint>
|
||||
|
||||
Engine identifier string must be the same as the one specified in the
|
||||
drm-engine-<str> tag and shall contain a greater than zero number in case the
|
||||
drm-engine-<keystr> tag and shall contain a greater than zero number in case the
|
||||
exported engine corresponds to a group of identical hardware engines.
|
||||
|
||||
In the absence of this tag parser shall assume capacity of one. Zero capacity
|
||||
is not allowed.
|
||||
|
||||
- drm-memory-<str>: <uint> [KiB|MiB]
|
||||
|
||||
Each possible memory type which can be used to store buffer objects by the
|
||||
GPU in question shall be given a stable and unique name to be returned as the
|
||||
string here.
|
||||
|
||||
Value shall reflect the amount of storage currently consumed by the buffer
|
||||
object belong to this client, in the respective memory region.
|
||||
|
||||
Default unit shall be bytes with optional unit specifiers of 'KiB' or 'MiB'
|
||||
indicating kibi- or mebi-bytes.
|
||||
|
||||
- drm-cycles-<str> <uint>
|
||||
- drm-cycles-<keystr>: <uint>
|
||||
|
||||
Engine identifier string must be the same as the one specified in the
|
||||
drm-engine-<str> tag and shall contain the number of busy cycles for the given
|
||||
drm-engine-<keystr> tag and shall contain the number of busy cycles for the given
|
||||
engine.
|
||||
|
||||
Values are not required to be constantly monotonic if it makes the driver
|
||||
@ -117,16 +112,60 @@ larger value within a reasonable period. Upon observing a value lower than what
|
||||
was previously read, userspace is expected to stay with that larger previous
|
||||
value until a monotonic update is seen.
|
||||
|
||||
- drm-maxfreq-<str> <uint> [Hz|MHz|KHz]
|
||||
- drm-maxfreq-<keystr>: <uint> [Hz|MHz|KHz]
|
||||
|
||||
Engine identifier string must be the same as the one specified in the
|
||||
drm-engine-<str> tag and shall contain the maximum frequency for the given
|
||||
engine. Taken together with drm-cycles-<str>, this can be used to calculate
|
||||
percentage utilization of the engine, whereas drm-engine-<str> only reflects
|
||||
drm-engine-<keystr> tag and shall contain the maximum frequency for the given
|
||||
engine. Taken together with drm-cycles-<keystr>, this can be used to calculate
|
||||
percentage utilization of the engine, whereas drm-engine-<keystr> only reflects
|
||||
time active without considering what frequency the engine is operating as a
|
||||
percentage of it's maximum frequency.
|
||||
|
||||
Memory
|
||||
^^^^^^
|
||||
|
||||
- drm-memory-<region>: <uint> [KiB|MiB]
|
||||
|
||||
Each possible memory type which can be used to store buffer objects by the
|
||||
GPU in question shall be given a stable and unique name to be returned as the
|
||||
string here. The name "memory" is reserved to refer to normal system memory.
|
||||
|
||||
Value shall reflect the amount of storage currently consumed by the buffer
|
||||
objects belong to this client, in the respective memory region.
|
||||
|
||||
Default unit shall be bytes with optional unit specifiers of 'KiB' or 'MiB'
|
||||
indicating kibi- or mebi-bytes.
|
||||
|
||||
- drm-shared-<region>: <uint> [KiB|MiB]
|
||||
|
||||
The total size of buffers that are shared with another file (ie. have more
|
||||
than a single handle).
|
||||
|
||||
- drm-total-<region>: <uint> [KiB|MiB]
|
||||
|
||||
The total size of buffers that including shared and private memory.
|
||||
|
||||
- drm-resident-<region>: <uint> [KiB|MiB]
|
||||
|
||||
The total size of buffers that are resident in the specified region.
|
||||
|
||||
- drm-purgeable-<region>: <uint> [KiB|MiB]
|
||||
|
||||
The total size of buffers that are purgeable.
|
||||
|
||||
- drm-active-<region>: <uint> [KiB|MiB]
|
||||
|
||||
The total size of buffers that are active on one or more engines.
|
||||
|
||||
Implementation Details
|
||||
======================
|
||||
|
||||
Drivers should use drm_show_fdinfo() in their `struct file_operations`, and
|
||||
implement &drm_driver.show_fdinfo if they wish to provide any stats which
|
||||
are not provided by drm_show_fdinfo(). But even driver specific stats should
|
||||
be documented above and where possible, aligned with other drivers.
|
||||
|
||||
Driver specific implementations
|
||||
===============================
|
||||
-------------------------------
|
||||
|
||||
:ref:`i915-usage-stats`
|
||||
|
||||
@ -6981,8 +6981,7 @@ F: Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml
|
||||
F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml
|
||||
F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
|
||||
F: Documentation/devicetree/bindings/display/renesas,du.yaml
|
||||
F: drivers/gpu/drm/rcar-du/
|
||||
F: drivers/gpu/drm/shmobile/
|
||||
F: drivers/gpu/drm/renesas/
|
||||
F: include/linux/platform_data/shmob_drm.h
|
||||
|
||||
DRM DRIVERS FOR ROCKCHIP
|
||||
@ -17388,6 +17387,8 @@ F: include/dt-bindings/clock/qcom,*
|
||||
|
||||
QUALCOMM CLOUD AI (QAIC) DRIVER
|
||||
M: Jeffrey Hugo <quic_jhugo@quicinc.com>
|
||||
R: Carl Vanderlip <quic_carlv@quicinc.com>
|
||||
R: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Supported
|
||||
|
||||
@ -27,12 +27,6 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!hdev->mmu_enable) {
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Cannot map CB because MMU is disabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cb->is_mmu_mapped)
|
||||
return 0;
|
||||
|
||||
|
||||
@ -280,14 +280,8 @@ bool cs_needs_timeout(struct hl_cs *cs)
|
||||
|
||||
static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
|
||||
{
|
||||
/*
|
||||
* Patched CB is created for external queues jobs, and for H/W queues
|
||||
* jobs if the user CB was allocated by driver and MMU is disabled.
|
||||
*/
|
||||
return (job->queue_type == QUEUE_TYPE_EXT ||
|
||||
(job->queue_type == QUEUE_TYPE_HW &&
|
||||
job->is_kernel_allocated_cb &&
|
||||
!hdev->mmu_enable));
|
||||
/* Patched CB is created for external queues jobs */
|
||||
return (job->queue_type == QUEUE_TYPE_EXT);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -363,14 +357,13 @@ static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
|
||||
}
|
||||
}
|
||||
|
||||
/* For H/W queue jobs, if a user CB was allocated by driver and MMU is
|
||||
* enabled, the user CB isn't released in cs_parser() and thus should be
|
||||
/* For H/W queue jobs, if a user CB was allocated by driver,
|
||||
* the user CB isn't released in cs_parser() and thus should be
|
||||
* released here. This is also true for INT queues jobs which were
|
||||
* allocated by driver.
|
||||
*/
|
||||
if ((job->is_kernel_allocated_cb &&
|
||||
((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
|
||||
job->queue_type == QUEUE_TYPE_INT))) {
|
||||
if (job->is_kernel_allocated_cb &&
|
||||
(job->queue_type == QUEUE_TYPE_HW || job->queue_type == QUEUE_TYPE_INT)) {
|
||||
atomic_dec(&job->user_cb->cs_cnt);
|
||||
hl_cb_put(job->user_cb);
|
||||
}
|
||||
@ -804,12 +797,14 @@ static void cs_do_release(struct kref *ref)
|
||||
|
||||
static void cs_timedout(struct work_struct *work)
|
||||
{
|
||||
struct hl_cs *cs = container_of(work, struct hl_cs, work_tdr.work);
|
||||
bool skip_reset_on_timeout, device_reset = false;
|
||||
struct hl_device *hdev;
|
||||
u64 event_mask = 0x0;
|
||||
uint timeout_sec;
|
||||
int rc;
|
||||
struct hl_cs *cs = container_of(work, struct hl_cs,
|
||||
work_tdr.work);
|
||||
bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false;
|
||||
|
||||
skip_reset_on_timeout = cs->skip_reset_on_timeout;
|
||||
|
||||
rc = cs_get_unless_zero(cs);
|
||||
if (!rc)
|
||||
@ -840,29 +835,31 @@ static void cs_timedout(struct work_struct *work)
|
||||
event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
|
||||
}
|
||||
|
||||
timeout_sec = jiffies_to_msecs(hdev->timeout_jiffies) / 1000;
|
||||
|
||||
switch (cs->type) {
|
||||
case CS_TYPE_SIGNAL:
|
||||
dev_err(hdev->dev,
|
||||
"Signal command submission %llu has not finished in time!\n",
|
||||
cs->sequence);
|
||||
"Signal command submission %llu has not finished in %u seconds!\n",
|
||||
cs->sequence, timeout_sec);
|
||||
break;
|
||||
|
||||
case CS_TYPE_WAIT:
|
||||
dev_err(hdev->dev,
|
||||
"Wait command submission %llu has not finished in time!\n",
|
||||
cs->sequence);
|
||||
"Wait command submission %llu has not finished in %u seconds!\n",
|
||||
cs->sequence, timeout_sec);
|
||||
break;
|
||||
|
||||
case CS_TYPE_COLLECTIVE_WAIT:
|
||||
dev_err(hdev->dev,
|
||||
"Collective Wait command submission %llu has not finished in time!\n",
|
||||
cs->sequence);
|
||||
"Collective Wait command submission %llu has not finished in %u seconds!\n",
|
||||
cs->sequence, timeout_sec);
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(hdev->dev,
|
||||
"Command submission %llu has not finished in time!\n",
|
||||
cs->sequence);
|
||||
"Command submission %llu has not finished in %u seconds!\n",
|
||||
cs->sequence, timeout_sec);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1139,11 +1136,10 @@ static void force_complete_cs(struct hl_device *hdev)
|
||||
spin_unlock(&hdev->cs_mirror_lock);
|
||||
}
|
||||
|
||||
void hl_abort_waitings_for_completion(struct hl_device *hdev)
|
||||
void hl_abort_waiting_for_cs_completions(struct hl_device *hdev)
|
||||
{
|
||||
force_complete_cs(hdev);
|
||||
force_complete_multi_cs(hdev);
|
||||
hl_release_pending_user_interrupts(hdev);
|
||||
}
|
||||
|
||||
static void job_wq_completion(struct work_struct *work)
|
||||
@ -1948,8 +1944,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
|
||||
else
|
||||
cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
|
||||
|
||||
cb = hl_cb_kernel_create(hdev, cb_size,
|
||||
q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
|
||||
cb = hl_cb_kernel_create(hdev, cb_size, q_type == QUEUE_TYPE_HW);
|
||||
if (!cb) {
|
||||
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
|
||||
atomic64_inc(&cntr->out_of_mem_drop_cnt);
|
||||
@ -2152,7 +2147,7 @@ static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
|
||||
|
||||
hdev->asic_funcs->hw_queues_unlock(hdev);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2167,15 +2162,21 @@ static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
|
||||
|
||||
/* Release the id and free allocated memory of the handle */
|
||||
idr_remove(&mgr->handles, handle_id);
|
||||
|
||||
/* unlock before calling ctx_put, where we might sleep */
|
||||
spin_unlock(&mgr->lock);
|
||||
hl_ctx_put(encaps_sig_hdl->ctx);
|
||||
kfree(encaps_sig_hdl);
|
||||
goto out;
|
||||
} else {
|
||||
rc = -EINVAL;
|
||||
dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
|
||||
}
|
||||
out:
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
@ -255,9 +255,6 @@ static int vm_show(struct seq_file *s, void *data)
|
||||
u64 j;
|
||||
int i;
|
||||
|
||||
if (!dev_entry->hdev->mmu_enable)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
|
||||
|
||||
list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
|
||||
@ -436,9 +433,6 @@ static int mmu_show(struct seq_file *s, void *data)
|
||||
u64 virt_addr = dev_entry->mmu_addr, phys_addr;
|
||||
int i;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return 0;
|
||||
|
||||
if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
|
||||
ctx = hdev->kernel_ctx;
|
||||
else
|
||||
@ -496,9 +490,6 @@ static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
|
||||
char *c;
|
||||
ssize_t rc;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return count;
|
||||
|
||||
if (count > sizeof(kbuf) - 1)
|
||||
goto err;
|
||||
if (copy_from_user(kbuf, buf, count))
|
||||
@ -535,9 +526,6 @@ static int mmu_ack_error(struct seq_file *s, void *data)
|
||||
struct hl_device *hdev = dev_entry->hdev;
|
||||
int rc;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return 0;
|
||||
|
||||
if (!dev_entry->mmu_cap_mask) {
|
||||
dev_err(hdev->dev, "mmu_cap_mask is not set\n");
|
||||
goto err;
|
||||
@ -563,9 +551,6 @@ static ssize_t mmu_ack_error_value_write(struct file *file,
|
||||
char kbuf[MMU_KBUF_SIZE];
|
||||
ssize_t rc;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return count;
|
||||
|
||||
if (count > sizeof(kbuf) - 1)
|
||||
goto err;
|
||||
|
||||
@ -661,9 +646,6 @@ static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
goto out;
|
||||
|
||||
if (prop->dram_supports_virtual_memory &&
|
||||
(addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
|
||||
return true;
|
||||
@ -675,7 +657,7 @@ static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
|
||||
if (addr >= prop->pmmu_huge.start_addr &&
|
||||
addr < prop->pmmu_huge.end_addr)
|
||||
return true;
|
||||
out:
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -685,9 +667,6 @@ static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
u64 dram_start_addr, dram_end_addr;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return false;
|
||||
|
||||
if (prop->dram_supports_virtual_memory) {
|
||||
dram_start_addr = prop->dmmu.start_addr;
|
||||
dram_end_addr = prop->dmmu.end_addr;
|
||||
@ -1756,17 +1735,15 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
|
||||
}
|
||||
}
|
||||
|
||||
void hl_debugfs_add_device(struct hl_device *hdev)
|
||||
int hl_debugfs_device_init(struct hl_device *hdev)
|
||||
{
|
||||
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
|
||||
int count = ARRAY_SIZE(hl_debugfs_list);
|
||||
|
||||
dev_entry->hdev = hdev;
|
||||
dev_entry->entry_arr = kmalloc_array(count,
|
||||
sizeof(struct hl_debugfs_entry),
|
||||
GFP_KERNEL);
|
||||
dev_entry->entry_arr = kmalloc_array(count, sizeof(struct hl_debugfs_entry), GFP_KERNEL);
|
||||
if (!dev_entry->entry_arr)
|
||||
return;
|
||||
return -ENOMEM;
|
||||
|
||||
dev_entry->data_dma_blob_desc.size = 0;
|
||||
dev_entry->data_dma_blob_desc.data = NULL;
|
||||
@ -1787,21 +1764,14 @@ void hl_debugfs_add_device(struct hl_device *hdev)
|
||||
spin_lock_init(&dev_entry->userptr_spinlock);
|
||||
mutex_init(&dev_entry->ctx_mem_hash_mutex);
|
||||
|
||||
dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
|
||||
hl_debug_root);
|
||||
|
||||
add_files_to_device(hdev, dev_entry, dev_entry->root);
|
||||
if (!hdev->asic_prop.fw_security_enabled)
|
||||
add_secured_nodes(dev_entry, dev_entry->root);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hl_debugfs_remove_device(struct hl_device *hdev)
|
||||
void hl_debugfs_device_fini(struct hl_device *hdev)
|
||||
{
|
||||
struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
|
||||
int i;
|
||||
|
||||
debugfs_remove_recursive(entry->root);
|
||||
|
||||
mutex_destroy(&entry->ctx_mem_hash_mutex);
|
||||
mutex_destroy(&entry->file_mutex);
|
||||
|
||||
@ -1814,6 +1784,24 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
|
||||
kfree(entry->entry_arr);
|
||||
}
|
||||
|
||||
void hl_debugfs_add_device(struct hl_device *hdev)
|
||||
{
|
||||
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
|
||||
|
||||
dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), hl_debug_root);
|
||||
|
||||
add_files_to_device(hdev, dev_entry, dev_entry->root);
|
||||
if (!hdev->asic_prop.fw_security_enabled)
|
||||
add_secured_nodes(dev_entry, dev_entry->root);
|
||||
}
|
||||
|
||||
void hl_debugfs_remove_device(struct hl_device *hdev)
|
||||
{
|
||||
struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
|
||||
|
||||
debugfs_remove_recursive(entry->root);
|
||||
}
|
||||
|
||||
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
|
||||
{
|
||||
struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
|
||||
|
||||
@ -674,7 +674,7 @@ static int device_init_cdev(struct hl_device *hdev, struct class *class,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int device_cdev_sysfs_add(struct hl_device *hdev)
|
||||
static int cdev_sysfs_debugfs_add(struct hl_device *hdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@ -699,7 +699,9 @@ static int device_cdev_sysfs_add(struct hl_device *hdev)
|
||||
goto delete_ctrl_cdev_device;
|
||||
}
|
||||
|
||||
hdev->cdev_sysfs_created = true;
|
||||
hl_debugfs_add_device(hdev);
|
||||
|
||||
hdev->cdev_sysfs_debugfs_created = true;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -710,11 +712,12 @@ static int device_cdev_sysfs_add(struct hl_device *hdev)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void device_cdev_sysfs_del(struct hl_device *hdev)
|
||||
static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)
|
||||
{
|
||||
if (!hdev->cdev_sysfs_created)
|
||||
if (!hdev->cdev_sysfs_debugfs_created)
|
||||
goto put_devices;
|
||||
|
||||
hl_debugfs_remove_device(hdev);
|
||||
hl_sysfs_fini(hdev);
|
||||
cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
|
||||
cdev_device_del(&hdev->cdev, hdev->dev);
|
||||
@ -981,6 +984,18 @@ static void device_early_fini(struct hl_device *hdev)
|
||||
hdev->asic_funcs->early_fini(hdev);
|
||||
}
|
||||
|
||||
static bool is_pci_link_healthy(struct hl_device *hdev)
|
||||
{
|
||||
u16 vendor_id;
|
||||
|
||||
if (!hdev->pdev)
|
||||
return false;
|
||||
|
||||
pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
|
||||
|
||||
return (vendor_id == PCI_VENDOR_ID_HABANALABS);
|
||||
}
|
||||
|
||||
static void hl_device_heartbeat(struct work_struct *work)
|
||||
{
|
||||
struct hl_device *hdev = container_of(work, struct hl_device,
|
||||
@ -995,7 +1010,8 @@ static void hl_device_heartbeat(struct work_struct *work)
|
||||
goto reschedule;
|
||||
|
||||
if (hl_device_operational(hdev, NULL))
|
||||
dev_err(hdev->dev, "Device heartbeat failed!\n");
|
||||
dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
|
||||
is_pci_link_healthy(hdev) ? "healthy" : "broken");
|
||||
|
||||
info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
|
||||
info.event_mask = &event_mask;
|
||||
@ -1157,6 +1173,16 @@ static void take_release_locks(struct hl_device *hdev)
|
||||
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
|
||||
}
|
||||
|
||||
static void hl_abort_waiting_for_completions(struct hl_device *hdev)
|
||||
{
|
||||
hl_abort_waiting_for_cs_completions(hdev);
|
||||
|
||||
/* Release all pending user interrupts, each pending user interrupt
|
||||
* holds a reference to a user context.
|
||||
*/
|
||||
hl_release_pending_user_interrupts(hdev);
|
||||
}
|
||||
|
||||
static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
|
||||
bool skip_wq_flush)
|
||||
{
|
||||
@ -1176,10 +1202,7 @@ static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_r
|
||||
/* flush the MMU prefetch workqueue */
|
||||
flush_workqueue(hdev->prefetch_wq);
|
||||
|
||||
/* Release all pending user interrupts, each pending user interrupt
|
||||
* holds a reference to user context
|
||||
*/
|
||||
hl_release_pending_user_interrupts(hdev);
|
||||
hl_abort_waiting_for_completions(hdev);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1921,7 +1944,7 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
|
||||
|
||||
hl_ctx_put(ctx);
|
||||
|
||||
hl_abort_waitings_for_completion(hdev);
|
||||
hl_abort_waiting_for_completions(hdev);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -2034,7 +2057,7 @@ static int create_cdev(struct hl_device *hdev)
|
||||
int hl_device_init(struct hl_device *hdev)
|
||||
{
|
||||
int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
|
||||
bool add_cdev_sysfs_on_err = false;
|
||||
bool expose_interfaces_on_err = false;
|
||||
|
||||
rc = create_cdev(hdev);
|
||||
if (rc)
|
||||
@ -2150,16 +2173,22 @@ int hl_device_init(struct hl_device *hdev)
|
||||
hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;
|
||||
|
||||
hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
|
||||
hl_debugfs_add_device(hdev);
|
||||
|
||||
/* debugfs nodes are created in hl_ctx_init so it must be called after
|
||||
* hl_debugfs_add_device.
|
||||
rc = hl_debugfs_device_init(hdev);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "failed to initialize debugfs entry structure\n");
|
||||
kfree(hdev->kernel_ctx);
|
||||
goto mmu_fini;
|
||||
}
|
||||
|
||||
/* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after
|
||||
* hl_debugfs_device_init().
|
||||
*/
|
||||
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "failed to initialize kernel context\n");
|
||||
kfree(hdev->kernel_ctx);
|
||||
goto remove_device_from_debugfs;
|
||||
goto debugfs_device_fini;
|
||||
}
|
||||
|
||||
rc = hl_cb_pool_init(hdev);
|
||||
@ -2175,11 +2204,10 @@ int hl_device_init(struct hl_device *hdev)
|
||||
}
|
||||
|
||||
/*
|
||||
* From this point, override rc (=0) in case of an error to allow
|
||||
* debugging (by adding char devices and create sysfs nodes as part of
|
||||
* the error flow).
|
||||
* From this point, override rc (=0) in case of an error to allow debugging
|
||||
* (by adding char devices and creating sysfs/debugfs files as part of the error flow).
|
||||
*/
|
||||
add_cdev_sysfs_on_err = true;
|
||||
expose_interfaces_on_err = true;
|
||||
|
||||
/* Device is now enabled as part of the initialization requires
|
||||
* communication with the device firmware to get information that
|
||||
@ -2221,15 +2249,13 @@ int hl_device_init(struct hl_device *hdev)
|
||||
}
|
||||
|
||||
/*
|
||||
* Expose devices and sysfs nodes to user.
|
||||
* From here there is no need to add char devices and create sysfs nodes
|
||||
* in case of an error.
|
||||
* Expose devices and sysfs/debugfs files to user.
|
||||
* From here there is no need to expose them in case of an error.
|
||||
*/
|
||||
add_cdev_sysfs_on_err = false;
|
||||
rc = device_cdev_sysfs_add(hdev);
|
||||
expose_interfaces_on_err = false;
|
||||
rc = cdev_sysfs_debugfs_add(hdev);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to add char devices and sysfs nodes\n");
|
||||
dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");
|
||||
rc = 0;
|
||||
goto out_disabled;
|
||||
}
|
||||
@ -2275,8 +2301,8 @@ int hl_device_init(struct hl_device *hdev)
|
||||
if (hl_ctx_put(hdev->kernel_ctx) != 1)
|
||||
dev_err(hdev->dev,
|
||||
"kernel ctx is still alive on initialization failure\n");
|
||||
remove_device_from_debugfs:
|
||||
hl_debugfs_remove_device(hdev);
|
||||
debugfs_device_fini:
|
||||
hl_debugfs_device_fini(hdev);
|
||||
mmu_fini:
|
||||
hl_mmu_fini(hdev);
|
||||
eq_fini:
|
||||
@ -2300,15 +2326,11 @@ int hl_device_init(struct hl_device *hdev)
|
||||
put_device(hdev->dev);
|
||||
out_disabled:
|
||||
hdev->disabled = true;
|
||||
if (add_cdev_sysfs_on_err)
|
||||
device_cdev_sysfs_add(hdev);
|
||||
if (hdev->pdev)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Failed to initialize hl%d. Device %s is NOT usable !\n",
|
||||
hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
|
||||
else
|
||||
pr_err("Failed to initialize hl%d. Device %s is NOT usable !\n",
|
||||
hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
|
||||
if (expose_interfaces_on_err)
|
||||
cdev_sysfs_debugfs_add(hdev);
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Failed to initialize hl%d. Device %s is NOT usable !\n",
|
||||
hdev->cdev_idx, dev_name(&hdev->pdev->dev));
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -2427,8 +2449,6 @@ void hl_device_fini(struct hl_device *hdev)
|
||||
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
|
||||
dev_err(hdev->dev, "kernel ctx is still alive\n");
|
||||
|
||||
hl_debugfs_remove_device(hdev);
|
||||
|
||||
hl_dec_fini(hdev);
|
||||
|
||||
hl_vm_fini(hdev);
|
||||
@ -2453,8 +2473,10 @@ void hl_device_fini(struct hl_device *hdev)
|
||||
|
||||
device_early_fini(hdev);
|
||||
|
||||
/* Hide devices and sysfs nodes from user */
|
||||
device_cdev_sysfs_del(hdev);
|
||||
/* Hide devices and sysfs/debugfs files from user */
|
||||
cdev_sysfs_debugfs_remove(hdev);
|
||||
|
||||
hl_debugfs_device_fini(hdev);
|
||||
|
||||
pr_info("removed device successfully\n");
|
||||
}
|
||||
@ -2667,3 +2689,11 @@ void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
|
||||
if (info->event_mask)
|
||||
*info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
|
||||
}
|
||||
|
||||
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
|
||||
{
|
||||
vfree(captured_err_info->page_fault_info.user_mappings);
|
||||
memset(captured_err_info, 0, sizeof(struct hl_error_info));
|
||||
atomic_set(&captured_err_info->cs_timeout.write_enable, 1);
|
||||
captured_err_info->undef_opcode.write_enable = true;
|
||||
}
|
||||
|
||||
@ -71,38 +71,124 @@ static char *extract_fw_ver_from_str(const char *fw_str)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* extract_u32_until_given_char() - given a string of the format "<u32><char>*", extract the u32.
|
||||
* @str: the given string
|
||||
* @ver_num: the pointer to the extracted u32 to be returned to the caller.
|
||||
* @given_char: the given char at the end of the u32 in the string
|
||||
*
|
||||
* Return: Upon success, return a pointer to the given_char in the string. Upon failure, return NULL
|
||||
*/
|
||||
static char *extract_u32_until_given_char(char *str, u32 *ver_num, char given_char)
|
||||
{
|
||||
char num_str[8] = {}, *ch;
|
||||
|
||||
ch = strchrnul(str, given_char);
|
||||
if (*ch == '\0' || ch == str || ch - str >= sizeof(num_str))
|
||||
return NULL;
|
||||
|
||||
memcpy(num_str, str, ch - str);
|
||||
if (kstrtou32(num_str, 10, ver_num))
|
||||
return NULL;
|
||||
return ch;
|
||||
}
|
||||
|
||||
/**
|
||||
* hl_get_sw_major_minor_subminor() - extract the FW's SW version major, minor, sub-minor
|
||||
* from the version string
|
||||
* @hdev: pointer to the hl_device
|
||||
* @fw_str: the FW's version string
|
||||
*
|
||||
* The extracted version is set in the hdev fields: fw_sw_{major/minor/sub_minor}_ver.
|
||||
*
|
||||
* fw_str is expected to have one of two possible formats, examples:
|
||||
* 1) 'Preboot version hl-gaudi2-1.9.0-fw-42.0.1-sec-3'
|
||||
* 2) 'Preboot version hl-gaudi2-1.9.0-rc-fw-42.0.1-sec-3'
|
||||
* In those examples, the SW major,minor,subminor are correspondingly: 1,9,0.
|
||||
*
|
||||
* Return: 0 for success or a negative error code for failure.
|
||||
*/
|
||||
static int hl_get_sw_major_minor_subminor(struct hl_device *hdev, const char *fw_str)
|
||||
{
|
||||
char *end, *start;
|
||||
|
||||
end = strnstr(fw_str, "-rc-", VERSION_MAX_LEN);
|
||||
if (end == fw_str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!end)
|
||||
end = strnstr(fw_str, "-fw-", VERSION_MAX_LEN);
|
||||
|
||||
if (end == fw_str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!end)
|
||||
return -EINVAL;
|
||||
|
||||
for (start = end - 1; start != fw_str; start--) {
|
||||
if (*start == '-')
|
||||
break;
|
||||
}
|
||||
|
||||
if (start == fw_str)
|
||||
return -EINVAL;
|
||||
|
||||
/* start/end point each to the starting and ending hyphen of the sw version e.g. -1.9.0- */
|
||||
start++;
|
||||
start = extract_u32_until_given_char(start, &hdev->fw_sw_major_ver, '.');
|
||||
if (!start)
|
||||
goto err_zero_ver;
|
||||
|
||||
start++;
|
||||
start = extract_u32_until_given_char(start, &hdev->fw_sw_minor_ver, '.');
|
||||
if (!start)
|
||||
goto err_zero_ver;
|
||||
|
||||
start++;
|
||||
start = extract_u32_until_given_char(start, &hdev->fw_sw_sub_minor_ver, '-');
|
||||
if (!start)
|
||||
goto err_zero_ver;
|
||||
|
||||
return 0;
|
||||
|
||||
err_zero_ver:
|
||||
hdev->fw_sw_major_ver = 0;
|
||||
hdev->fw_sw_minor_ver = 0;
|
||||
hdev->fw_sw_sub_minor_ver = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hl_get_preboot_major_minor() - extract the FW's version major, minor from the version string.
|
||||
* @hdev: pointer to the hl_device
|
||||
* @preboot_ver: the FW's version string
|
||||
*
|
||||
* preboot_ver is expected to be the format of <major>.<minor>.<sub minor>*, e.g: 42.0.1-sec-3
|
||||
* The extracted version is set in the hdev fields: fw_inner_{major/minor}_ver.
|
||||
*
|
||||
* Return: 0 on success, negative error code for failure.
|
||||
*/
|
||||
static int hl_get_preboot_major_minor(struct hl_device *hdev, char *preboot_ver)
|
||||
{
|
||||
char major[8], minor[8], *first_dot, *second_dot;
|
||||
int rc;
|
||||
|
||||
first_dot = strnstr(preboot_ver, ".", 10);
|
||||
if (first_dot) {
|
||||
strscpy(major, preboot_ver, first_dot - preboot_ver + 1);
|
||||
rc = kstrtou32(major, 10, &hdev->fw_major_version);
|
||||
} else {
|
||||
rc = -EINVAL;
|
||||
preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_major_ver, '.');
|
||||
if (!preboot_ver) {
|
||||
dev_err(hdev->dev, "Error parsing preboot major version\n");
|
||||
goto err_zero_ver;
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
|
||||
return rc;
|
||||
preboot_ver++;
|
||||
|
||||
preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_minor_ver, '.');
|
||||
if (!preboot_ver) {
|
||||
dev_err(hdev->dev, "Error parsing preboot minor version\n");
|
||||
goto err_zero_ver;
|
||||
}
|
||||
return 0;
|
||||
|
||||
/* skip the first dot */
|
||||
first_dot++;
|
||||
|
||||
second_dot = strnstr(first_dot, ".", 10);
|
||||
if (second_dot) {
|
||||
strscpy(minor, first_dot, second_dot - first_dot + 1);
|
||||
rc = kstrtou32(minor, 10, &hdev->fw_minor_version);
|
||||
} else {
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc);
|
||||
return rc;
|
||||
err_zero_ver:
|
||||
hdev->fw_inner_major_ver = 0;
|
||||
hdev->fw_inner_minor_ver = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int hl_request_fw(struct hl_device *hdev,
|
||||
@ -505,6 +591,20 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
|
||||
size);
|
||||
}
|
||||
|
||||
int hl_fw_send_soft_reset(struct hl_device *hdev)
|
||||
{
|
||||
struct cpucp_packet pkt;
|
||||
int rc;
|
||||
|
||||
memset(&pkt, 0, sizeof(pkt));
|
||||
pkt.ctl = cpu_to_le32(CPUCP_PACKET_SOFT_RESET << CPUCP_PKT_CTL_OPCODE_SHIFT);
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "failed to send soft-reset msg (err = %d)\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
|
||||
{
|
||||
struct cpucp_packet pkt;
|
||||
@ -1268,8 +1368,10 @@ void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
|
||||
|
||||
void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
|
||||
{
|
||||
struct static_fw_load_mgr *static_loader =
|
||||
&hdev->fw_loader.static_loader;
|
||||
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
|
||||
u32 status, cpu_boot_status_reg, cpu_timeout;
|
||||
struct static_fw_load_mgr *static_loader;
|
||||
struct pre_fw_load_props *pre_fw_load;
|
||||
int rc;
|
||||
|
||||
if (hdev->device_cpu_is_halted)
|
||||
@ -1277,12 +1379,28 @@ void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
|
||||
|
||||
/* Stop device CPU to make sure nothing bad happens */
|
||||
if (hdev->asic_prop.dynamic_fw_load) {
|
||||
pre_fw_load = &fw_loader->pre_fw_load;
|
||||
cpu_timeout = fw_loader->cpu_timeout;
|
||||
cpu_boot_status_reg = pre_fw_load->cpu_boot_status_reg;
|
||||
|
||||
rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
|
||||
COMMS_GOTO_WFE, 0, false,
|
||||
hdev->fw_loader.cpu_timeout);
|
||||
if (rc)
|
||||
COMMS_GOTO_WFE, 0, false, cpu_timeout);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
|
||||
} else {
|
||||
rc = hl_poll_timeout(
|
||||
hdev,
|
||||
cpu_boot_status_reg,
|
||||
status,
|
||||
status == CPU_BOOT_STATUS_IN_WFE,
|
||||
hdev->fw_poll_interval_usec,
|
||||
cpu_timeout);
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Current status=%u. Timed-out updating to WFE\n",
|
||||
status);
|
||||
}
|
||||
} else {
|
||||
static_loader = &hdev->fw_loader.static_loader;
|
||||
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
|
||||
msleep(static_loader->cpu_reset_wait_msec);
|
||||
|
||||
@ -2151,6 +2269,7 @@ static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
char *preboot_ver, *boot_ver;
|
||||
char btl_ver[32];
|
||||
int rc;
|
||||
|
||||
switch (fwc) {
|
||||
case FW_COMP_BOOT_FIT:
|
||||
@ -2164,20 +2283,20 @@ static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
|
||||
break;
|
||||
case FW_COMP_PREBOOT:
|
||||
strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
|
||||
preboot_ver = strnstr(prop->preboot_ver, "Preboot",
|
||||
VERSION_MAX_LEN);
|
||||
preboot_ver = strnstr(prop->preboot_ver, "Preboot", VERSION_MAX_LEN);
|
||||
dev_info(hdev->dev, "preboot full version: '%s'\n", preboot_ver);
|
||||
|
||||
if (preboot_ver && preboot_ver != prop->preboot_ver) {
|
||||
strscpy(btl_ver, prop->preboot_ver,
|
||||
min((int) (preboot_ver - prop->preboot_ver), 31));
|
||||
dev_info(hdev->dev, "%s\n", btl_ver);
|
||||
}
|
||||
|
||||
rc = hl_get_sw_major_minor_subminor(hdev, preboot_ver);
|
||||
if (rc)
|
||||
return rc;
|
||||
preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
|
||||
if (preboot_ver) {
|
||||
int rc;
|
||||
|
||||
dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
|
||||
|
||||
rc = hl_get_preboot_major_minor(hdev, preboot_ver);
|
||||
kfree(preboot_ver);
|
||||
if (rc)
|
||||
@ -2367,16 +2486,6 @@ static int hl_fw_dynamic_load_image(struct hl_device *hdev,
|
||||
if (rc)
|
||||
goto release_fw;
|
||||
|
||||
/* update state according to boot stage */
|
||||
if (cur_fwc == FW_COMP_BOOT_FIT) {
|
||||
struct cpu_dyn_regs *dyn_regs;
|
||||
|
||||
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
|
||||
hl_fw_boot_fit_update_state(hdev,
|
||||
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
|
||||
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
|
||||
}
|
||||
|
||||
/* copy boot fit to space allocated by FW */
|
||||
rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
|
||||
if (rc)
|
||||
@ -2679,6 +2788,14 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
|
||||
goto protocol_err;
|
||||
}
|
||||
|
||||
rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
|
||||
if (rc)
|
||||
goto protocol_err;
|
||||
|
||||
hl_fw_boot_fit_update_state(hdev,
|
||||
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
|
||||
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
|
||||
|
||||
/*
|
||||
* when testing FW load (without Linux) on PLDM we don't want to
|
||||
* wait until boot fit is active as it may take several hours.
|
||||
@ -2688,10 +2805,6 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
|
||||
if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
|
||||
return 0;
|
||||
|
||||
rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
|
||||
if (rc)
|
||||
goto protocol_err;
|
||||
|
||||
/* Enable DRAM scrambling before Linux boot and after successful
|
||||
* UBoot
|
||||
*/
|
||||
@ -2725,7 +2838,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
|
||||
if (rc)
|
||||
goto protocol_err;
|
||||
|
||||
hl_fw_linux_update_state(hdev, le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
|
||||
hl_fw_linux_update_state(hdev,
|
||||
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
|
||||
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
|
||||
|
||||
hl_fw_dynamic_update_linux_interrupt_if(hdev);
|
||||
|
||||
@ -36,6 +36,8 @@
|
||||
struct hl_device;
|
||||
struct hl_fpriv;
|
||||
|
||||
#define PCI_VENDOR_ID_HABANALABS 0x1da3
|
||||
|
||||
/* Use upper bits of mmap offset to store habana driver specific information.
|
||||
* bits[63:59] - Encode mmap type
|
||||
* bits[45:0] - mmap offset value
|
||||
@ -113,18 +115,6 @@ enum hl_mmu_page_table_location {
|
||||
MMU_NUM_PGT_LOCATIONS /* num of PGT locations */
|
||||
};
|
||||
|
||||
/**
|
||||
* enum hl_mmu_enablement - what mmu modules to enable
|
||||
* @MMU_EN_NONE: mmu disabled.
|
||||
* @MMU_EN_ALL: enable all.
|
||||
* @MMU_EN_PMMU_ONLY: Enable only the PMMU leaving the DMMU disabled.
|
||||
*/
|
||||
enum hl_mmu_enablement {
|
||||
MMU_EN_NONE = 0,
|
||||
MMU_EN_ALL = 1,
|
||||
MMU_EN_PMMU_ONLY = 3, /* N/A for Goya/Gaudi */
|
||||
};
|
||||
|
||||
/*
|
||||
* HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
|
||||
* HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
|
||||
@ -2568,12 +2558,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
ktime_t __timeout; \
|
||||
u32 __elbi_read; \
|
||||
int __rc = 0; \
|
||||
if (hdev->pdev) \
|
||||
__timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
else \
|
||||
__timeout = ktime_add_us(ktime_get(),\
|
||||
min((u64)(timeout_us * 10), \
|
||||
(u64) HL_SIM_MAX_TIMEOUT_US)); \
|
||||
__timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
might_sleep_if(sleep_us); \
|
||||
for (;;) { \
|
||||
if (elbi) { \
|
||||
@ -2625,13 +2610,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
u8 __arr_idx; \
|
||||
int __rc = 0; \
|
||||
\
|
||||
if (hdev->pdev) \
|
||||
__timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
else \
|
||||
__timeout = ktime_add_us(ktime_get(),\
|
||||
min(((u64)timeout_us * 10), \
|
||||
(u64) HL_SIM_MAX_TIMEOUT_US)); \
|
||||
\
|
||||
__timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
might_sleep_if(sleep_us); \
|
||||
if (arr_size >= 64) \
|
||||
__rc = -EINVAL; \
|
||||
@ -2689,12 +2668,8 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
mem_written_by_device) \
|
||||
({ \
|
||||
ktime_t __timeout; \
|
||||
if (hdev->pdev) \
|
||||
__timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
else \
|
||||
__timeout = ktime_add_us(ktime_get(),\
|
||||
min((u64)(timeout_us * 100), \
|
||||
(u64) HL_SIM_MAX_TIMEOUT_US)); \
|
||||
\
|
||||
__timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
might_sleep_if(sleep_us); \
|
||||
for (;;) { \
|
||||
/* Verify we read updates done by other cores or by device */ \
|
||||
@ -3225,8 +3200,11 @@ struct hl_reset_info {
|
||||
* @captured_err_info: holds information about errors.
|
||||
* @reset_info: holds current device reset information.
|
||||
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
|
||||
* @fw_major_version: major version of current loaded preboot.
|
||||
* @fw_minor_version: minor version of current loaded preboot.
|
||||
* @fw_inner_major_ver: the major of current loaded preboot inner version.
|
||||
* @fw_inner_minor_ver: the minor of current loaded preboot inner version.
|
||||
* @fw_sw_major_ver: the major of current loaded preboot SW version.
|
||||
* @fw_sw_minor_ver: the minor of current loaded preboot SW version.
|
||||
* @fw_sw_sub_minor_ver: the sub-minor of current loaded preboot SW version.
|
||||
* @dram_used_mem: current DRAM memory consumption.
|
||||
* @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
|
||||
* @timeout_jiffies: device CS timeout value.
|
||||
@ -3287,7 +3265,7 @@ struct hl_reset_info {
|
||||
* @in_debug: whether the device is in a state where the profiling/tracing infrastructure
|
||||
* can be used. This indication is needed because in some ASICs we need to do
|
||||
* specific operations to enable that infrastructure.
|
||||
* @cdev_sysfs_created: were char devices and sysfs nodes created.
|
||||
* @cdev_sysfs_debugfs_created: were char devices and sysfs/debugfs files created.
|
||||
* @stop_on_err: true if engines should stop on error.
|
||||
* @supports_sync_stream: is sync stream supported.
|
||||
* @sync_stream_queue_idx: helper index for sync stream queues initialization.
|
||||
@ -3314,7 +3292,7 @@ struct hl_reset_info {
|
||||
* @nic_ports_mask: Controls which NIC ports are enabled. Used only for testing.
|
||||
* @fw_components: Controls which f/w components to load to the device. There are multiple f/w
|
||||
* stages and sometimes we want to stop at a certain stage. Used only for testing.
|
||||
* @mmu_enable: Whether to enable or disable the device MMU(s). Used only for testing.
|
||||
* @mmu_disable: Disable the device MMU(s). Used only for testing.
|
||||
* @cpu_queues_enable: Whether to enable queues communication vs. the f/w. Used only for testing.
|
||||
* @pldm: Whether we are running in Palladium environment. Used only for testing.
|
||||
* @hard_reset_on_fw_events: Whether to do device hard-reset when a fatal event is received from
|
||||
@ -3412,8 +3390,11 @@ struct hl_device {
|
||||
struct hl_reset_info reset_info;
|
||||
|
||||
u32 *stream_master_qid_arr;
|
||||
u32 fw_major_version;
|
||||
u32 fw_minor_version;
|
||||
u32 fw_inner_major_ver;
|
||||
u32 fw_inner_minor_ver;
|
||||
u32 fw_sw_major_ver;
|
||||
u32 fw_sw_minor_ver;
|
||||
u32 fw_sw_sub_minor_ver;
|
||||
atomic64_t dram_used_mem;
|
||||
u64 memory_scrub_val;
|
||||
u64 timeout_jiffies;
|
||||
@ -3451,7 +3432,7 @@ struct hl_device {
|
||||
u8 init_done;
|
||||
u8 device_cpu_disabled;
|
||||
u8 in_debug;
|
||||
u8 cdev_sysfs_created;
|
||||
u8 cdev_sysfs_debugfs_created;
|
||||
u8 stop_on_err;
|
||||
u8 supports_sync_stream;
|
||||
u8 sync_stream_queue_idx;
|
||||
@ -3474,7 +3455,7 @@ struct hl_device {
|
||||
/* Parameters for bring-up to be upstreamed */
|
||||
u64 nic_ports_mask;
|
||||
u64 fw_components;
|
||||
u8 mmu_enable;
|
||||
u8 mmu_disable;
|
||||
u8 cpu_queues_enable;
|
||||
u8 pldm;
|
||||
u8 hard_reset_on_fw_events;
|
||||
@ -3547,9 +3528,15 @@ struct hl_ioctl_desc {
|
||||
hl_ioctl_t *func;
|
||||
};
|
||||
|
||||
static inline bool hl_is_fw_ver_below_1_9(struct hl_device *hdev)
|
||||
static inline bool hl_is_fw_sw_ver_below(struct hl_device *hdev, u32 fw_sw_major, u32 fw_sw_minor)
|
||||
{
|
||||
return (hdev->fw_major_version < 42);
|
||||
if (hdev->fw_sw_major_ver < fw_sw_major)
|
||||
return true;
|
||||
if (hdev->fw_sw_major_ver > fw_sw_major)
|
||||
return false;
|
||||
if (hdev->fw_sw_minor_ver < fw_sw_minor)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3813,8 +3800,6 @@ struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx,
|
||||
u64 curr_pte, bool *is_new_hop);
|
||||
int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops,
|
||||
struct hl_hr_mmu_funcs *hr_func);
|
||||
void hl_mmu_swap_out(struct hl_ctx *ctx);
|
||||
void hl_mmu_swap_in(struct hl_ctx *ctx);
|
||||
int hl_mmu_if_set_funcs(struct hl_device *hdev);
|
||||
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
|
||||
void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
|
||||
@ -3872,6 +3857,7 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
|
||||
int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num);
|
||||
int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid);
|
||||
int hl_fw_send_device_activity(struct hl_device *hdev, bool open);
|
||||
int hl_fw_send_soft_reset(struct hl_device *hdev);
|
||||
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
|
||||
bool is_wc[3]);
|
||||
int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
|
||||
@ -3921,7 +3907,7 @@ void hl_dec_fini(struct hl_device *hdev);
|
||||
void hl_dec_ctx_fini(struct hl_ctx *ctx);
|
||||
|
||||
void hl_release_pending_user_interrupts(struct hl_device *hdev);
|
||||
void hl_abort_waitings_for_completion(struct hl_device *hdev);
|
||||
void hl_abort_waiting_for_cs_completions(struct hl_device *hdev);
|
||||
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
|
||||
struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
|
||||
|
||||
@ -3958,11 +3944,14 @@ void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_
|
||||
u64 *event_mask);
|
||||
void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask);
|
||||
void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info);
|
||||
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
void hl_debugfs_init(void);
|
||||
void hl_debugfs_fini(void);
|
||||
int hl_debugfs_device_init(struct hl_device *hdev);
|
||||
void hl_debugfs_device_fini(struct hl_device *hdev);
|
||||
void hl_debugfs_add_device(struct hl_device *hdev);
|
||||
void hl_debugfs_remove_device(struct hl_device *hdev);
|
||||
void hl_debugfs_add_file(struct hl_fpriv *hpriv);
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/habanalabs.h>
|
||||
@ -54,8 +55,6 @@ module_param(boot_error_status_mask, ulong, 0444);
|
||||
MODULE_PARM_DESC(boot_error_status_mask,
|
||||
"Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
|
||||
|
||||
#define PCI_VENDOR_ID_HABANALABS 0x1da3
|
||||
|
||||
#define PCI_IDS_GOYA 0x0001
|
||||
#define PCI_IDS_GAUDI 0x1000
|
||||
#define PCI_IDS_GAUDI_SEC 0x1010
|
||||
@ -220,9 +219,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
|
||||
|
||||
hl_debugfs_add_file(hpriv);
|
||||
|
||||
memset(&hdev->captured_err_info, 0, sizeof(hdev->captured_err_info));
|
||||
atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
|
||||
hdev->captured_err_info.undef_opcode.write_enable = true;
|
||||
hl_enable_err_info_capture(&hdev->captured_err_info);
|
||||
|
||||
hdev->open_counter++;
|
||||
hdev->last_successful_open_jif = jiffies;
|
||||
@ -307,7 +304,6 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
|
||||
{
|
||||
hdev->nic_ports_mask = 0;
|
||||
hdev->fw_components = FW_TYPE_ALL_TYPES;
|
||||
hdev->mmu_enable = MMU_EN_ALL;
|
||||
hdev->cpu_queues_enable = 1;
|
||||
hdev->pldm = 0;
|
||||
hdev->hard_reset_on_fw_events = 1;
|
||||
@ -382,7 +378,6 @@ static int fixup_device_params(struct hl_device *hdev)
|
||||
/* If CPU queues not enabled, no way to do heartbeat */
|
||||
if (!hdev->cpu_queues_enable)
|
||||
hdev->heartbeat = 0;
|
||||
|
||||
fixup_device_params_per_asic(hdev, tmp_timeout);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -62,7 +62,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
|
||||
hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
|
||||
hw_ip.sram_base_address = prop->sram_user_base_address;
|
||||
hw_ip.dram_base_address =
|
||||
hdev->mmu_enable && prop->dram_supports_virtual_memory ?
|
||||
prop->dram_supports_virtual_memory ?
|
||||
prop->dmmu.start_addr : prop->dram_user_base_address;
|
||||
hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF;
|
||||
hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask;
|
||||
@ -71,11 +71,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
|
||||
|
||||
dram_available_size = prop->dram_size - dram_kmd_size;
|
||||
|
||||
if (hdev->mmu_enable == MMU_EN_ALL)
|
||||
hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size,
|
||||
prop->dram_page_size) * prop->dram_page_size;
|
||||
else
|
||||
hw_ip.dram_size = dram_available_size;
|
||||
hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, prop->dram_page_size) *
|
||||
prop->dram_page_size;
|
||||
|
||||
if (hw_ip.dram_size > PAGE_SIZE)
|
||||
hw_ip.dram_enabled = 1;
|
||||
@ -842,15 +839,15 @@ static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
|
||||
struct hw_err_info *info;
|
||||
int rc;
|
||||
|
||||
if ((!user_buf_size) || (!user_buf))
|
||||
if (!user_buf)
|
||||
return -EINVAL;
|
||||
|
||||
if (user_buf_size < sizeof(struct hl_info_hw_err_event))
|
||||
return -ENOMEM;
|
||||
|
||||
info = &hdev->captured_err_info.hw_err;
|
||||
if (!info->event_info_available)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
|
||||
if (user_buf_size < sizeof(struct hl_info_hw_err_event))
|
||||
return -ENOMEM;
|
||||
|
||||
rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event));
|
||||
return rc ? -EFAULT : 0;
|
||||
@ -864,15 +861,15 @@ static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
|
||||
struct fw_err_info *info;
|
||||
int rc;
|
||||
|
||||
if ((!user_buf_size) || (!user_buf))
|
||||
if (!user_buf)
|
||||
return -EINVAL;
|
||||
|
||||
if (user_buf_size < sizeof(struct hl_info_fw_err_event))
|
||||
return -ENOMEM;
|
||||
|
||||
info = &hdev->captured_err_info.fw_err;
|
||||
if (!info->event_info_available)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
|
||||
if (user_buf_size < sizeof(struct hl_info_fw_err_event))
|
||||
return -ENOMEM;
|
||||
|
||||
rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event));
|
||||
return rc ? -EFAULT : 0;
|
||||
@ -1198,7 +1195,7 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
|
||||
|
||||
out_err:
|
||||
if (retcode)
|
||||
dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
|
||||
dev_dbg_ratelimited(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
|
||||
task_pid_nr(current), cmd, nr);
|
||||
|
||||
if (kdata != stack_kdata)
|
||||
@ -1222,7 +1219,7 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
|
||||
ioctl = &hl_ioctls[nr];
|
||||
} else {
|
||||
dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
|
||||
dev_dbg_ratelimited(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
|
||||
task_pid_nr(current), nr);
|
||||
return -ENOTTY;
|
||||
}
|
||||
@ -1245,7 +1242,7 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
if (nr == _IOC_NR(HL_IOCTL_INFO)) {
|
||||
ioctl = &hl_ioctls_control[nr];
|
||||
} else {
|
||||
dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
|
||||
dev_dbg_ratelimited(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
|
||||
task_pid_nr(current), nr);
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
@ -430,7 +430,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
|
||||
cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
|
||||
if ((hdev->event_queue.check_eqe_index) &&
|
||||
(((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) {
|
||||
dev_dbg(hdev->dev,
|
||||
dev_err(hdev->dev,
|
||||
"EQE %#x in queue is ready but index does not match %d!=%d",
|
||||
cur_eqe,
|
||||
((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
|
||||
|
||||
@ -1034,30 +1034,6 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
|
||||
}
|
||||
}
|
||||
|
||||
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
|
||||
u64 *paddr)
|
||||
{
|
||||
struct hl_device *hdev = ctx->hdev;
|
||||
struct hl_vm *vm = &hdev->vm;
|
||||
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
||||
u32 handle;
|
||||
|
||||
handle = lower_32_bits(args->map_device.handle);
|
||||
spin_lock(&vm->idr_lock);
|
||||
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
|
||||
if (!phys_pg_pack) {
|
||||
spin_unlock(&vm->idr_lock);
|
||||
dev_err(hdev->dev, "no match for handle %u\n", handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*paddr = phys_pg_pack->pages[0];
|
||||
|
||||
spin_unlock(&vm->idr_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* map_device_va() - map the given memory.
|
||||
* @ctx: pointer to the context structure.
|
||||
@ -2094,76 +2070,6 @@ static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 o
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
|
||||
{
|
||||
struct hl_device *hdev = hpriv->hdev;
|
||||
u64 block_handle, device_addr = 0;
|
||||
struct hl_ctx *ctx = hpriv->ctx;
|
||||
u32 handle = 0, block_size;
|
||||
int rc;
|
||||
|
||||
switch (args->in.op) {
|
||||
case HL_MEM_OP_ALLOC:
|
||||
if (args->in.alloc.mem_size == 0) {
|
||||
dev_err(hdev->dev, "alloc size must be larger than 0\n");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Force contiguous as there are no real MMU
|
||||
* translations to overcome physical memory gaps
|
||||
*/
|
||||
args->in.flags |= HL_MEM_CONTIGUOUS;
|
||||
rc = alloc_device_memory(ctx, &args->in, &handle);
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.handle = (__u64) handle;
|
||||
break;
|
||||
|
||||
case HL_MEM_OP_FREE:
|
||||
rc = free_device_memory(ctx, &args->in);
|
||||
break;
|
||||
|
||||
case HL_MEM_OP_MAP:
|
||||
if (args->in.flags & HL_MEM_USERPTR) {
|
||||
dev_err(hdev->dev, "Failed to map host memory when MMU is disabled\n");
|
||||
rc = -EPERM;
|
||||
} else {
|
||||
rc = get_paddr_from_handle(ctx, &args->in, &device_addr);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.device_virt_addr = device_addr;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case HL_MEM_OP_UNMAP:
|
||||
rc = 0;
|
||||
break;
|
||||
|
||||
case HL_MEM_OP_MAP_BLOCK:
|
||||
rc = map_block(hdev, args->in.map_block.block_addr, &block_handle, &block_size);
|
||||
args->out.block_handle = block_handle;
|
||||
args->out.block_size = block_size;
|
||||
break;
|
||||
|
||||
case HL_MEM_OP_EXPORT_DMABUF_FD:
|
||||
dev_err(hdev->dev, "Failed to export dma-buf object when MMU is disabled\n");
|
||||
rc = -EPERM;
|
||||
break;
|
||||
|
||||
case HL_MEM_OP_TS_ALLOC:
|
||||
rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
|
||||
break;
|
||||
default:
|
||||
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void ts_buff_release(struct hl_mmap_mem_buf *buf)
|
||||
{
|
||||
struct hl_ts_buff *ts_buff = buf->private;
|
||||
@ -2282,9 +2188,6 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return mem_ioctl_no_mmu(hpriv, args);
|
||||
|
||||
switch (args->in.op) {
|
||||
case HL_MEM_OP_ALLOC:
|
||||
if (args->in.alloc.mem_size == 0) {
|
||||
@ -2779,13 +2682,10 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
|
||||
atomic64_set(&ctx->dram_phys_mem, 0);
|
||||
|
||||
/*
|
||||
* - If MMU is enabled, init the ranges as usual.
|
||||
* - If MMU is disabled, in case of host mapping, the returned address
|
||||
* is the given one.
|
||||
* In case of DRAM mapping, the returned address is the physical
|
||||
* address of the memory related to the given handle.
|
||||
*/
|
||||
if (!ctx->hdev->mmu_enable)
|
||||
if (ctx->hdev->mmu_disable)
|
||||
return 0;
|
||||
|
||||
dram_range_start = prop->dmmu.start_addr;
|
||||
@ -2835,7 +2735,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
|
||||
struct hl_mem_in args;
|
||||
int i;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return;
|
||||
|
||||
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
|
||||
|
||||
@ -44,7 +44,7 @@ int hl_mmu_init(struct hl_device *hdev)
|
||||
{
|
||||
int rc = -EOPNOTSUPP;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return 0;
|
||||
|
||||
mutex_init(&hdev->mmu_lock);
|
||||
@ -82,7 +82,7 @@ int hl_mmu_init(struct hl_device *hdev)
|
||||
*/
|
||||
void hl_mmu_fini(struct hl_device *hdev)
|
||||
{
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return;
|
||||
|
||||
if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
|
||||
@ -107,7 +107,7 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
|
||||
struct hl_device *hdev = ctx->hdev;
|
||||
int rc = -EOPNOTSUPP;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return 0;
|
||||
|
||||
if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) {
|
||||
@ -145,7 +145,7 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
|
||||
{
|
||||
struct hl_device *hdev = ctx->hdev;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return;
|
||||
|
||||
if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL)
|
||||
@ -233,7 +233,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flu
|
||||
u64 real_virt_addr;
|
||||
bool is_dram_addr;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return 0;
|
||||
|
||||
is_dram_addr = hl_is_dram_va(hdev, virt_addr);
|
||||
@ -301,7 +301,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_s
|
||||
bool is_dram_addr;
|
||||
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return 0;
|
||||
|
||||
is_dram_addr = hl_is_dram_va(hdev, virt_addr);
|
||||
@ -472,46 +472,6 @@ int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
|
||||
*
|
||||
* @ctx: pointer to the context structure
|
||||
*
|
||||
*/
|
||||
void hl_mmu_swap_out(struct hl_ctx *ctx)
|
||||
{
|
||||
struct hl_device *hdev = ctx->hdev;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return;
|
||||
|
||||
if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL)
|
||||
hdev->mmu_func[MMU_DR_PGT].swap_out(ctx);
|
||||
|
||||
if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL)
|
||||
hdev->mmu_func[MMU_HR_PGT].swap_out(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
|
||||
*
|
||||
* @ctx: pointer to the context structure
|
||||
*
|
||||
*/
|
||||
void hl_mmu_swap_in(struct hl_ctx *ctx)
|
||||
{
|
||||
struct hl_device *hdev = ctx->hdev;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return;
|
||||
|
||||
if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL)
|
||||
hdev->mmu_func[MMU_DR_PGT].swap_in(ctx);
|
||||
|
||||
if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL)
|
||||
hdev->mmu_func[MMU_HR_PGT].swap_in(ctx);
|
||||
}
|
||||
|
||||
static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
|
||||
struct hl_mmu_hop_info *hops,
|
||||
u64 *phys_addr)
|
||||
@ -594,7 +554,7 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
|
||||
int pgt_residency, rc;
|
||||
bool is_dram_addr;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
prop = &hdev->asic_prop;
|
||||
@ -625,7 +585,7 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
|
||||
|
||||
int hl_mmu_if_set_funcs(struct hl_device *hdev)
|
||||
{
|
||||
if (!hdev->mmu_enable)
|
||||
if (hdev->mmu_disable)
|
||||
return 0;
|
||||
|
||||
switch (hdev->asic_type) {
|
||||
|
||||
@ -284,14 +284,14 @@ void hl_secure_block(struct hl_device *hdev,
|
||||
* @instance_offset: offset between instances
|
||||
* @pb_blocks: blocks array
|
||||
* @blocks_array_size: blocks array size
|
||||
* @regs_array: register array
|
||||
* @regs_array_size: register array size
|
||||
* @user_regs_array: unsecured register array
|
||||
* @user_regs_array_size: unsecured register array size
|
||||
* @mask: enabled instances mask: 1- enabled, 0- disabled
|
||||
*/
|
||||
int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
|
||||
u32 dcore_offset, u32 num_instances, u32 instance_offset,
|
||||
const u32 pb_blocks[], u32 blocks_array_size,
|
||||
const u32 *regs_array, u32 regs_array_size, u64 mask)
|
||||
const u32 *user_regs_array, u32 user_regs_array_size, u64 mask)
|
||||
{
|
||||
int i, j;
|
||||
struct hl_block_glbl_sec *glbl_sec;
|
||||
@ -303,8 +303,8 @@ int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
|
||||
return -ENOMEM;
|
||||
|
||||
hl_secure_block(hdev, glbl_sec, blocks_array_size);
|
||||
hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, pb_blocks,
|
||||
glbl_sec, blocks_array_size);
|
||||
hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size, 0,
|
||||
pb_blocks, glbl_sec, blocks_array_size);
|
||||
|
||||
/* Fill all blocks with the same configuration */
|
||||
for (i = 0 ; i < num_dcores ; i++) {
|
||||
@ -336,19 +336,19 @@ int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
|
||||
* @instance_offset: offset between instances
|
||||
* @pb_blocks: blocks array
|
||||
* @blocks_array_size: blocks array size
|
||||
* @regs_array: register array
|
||||
* @regs_array_size: register array size
|
||||
* @user_regs_array: unsecured register array
|
||||
* @user_regs_array_size: unsecured register array size
|
||||
*
|
||||
*/
|
||||
int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
|
||||
u32 num_instances, u32 instance_offset,
|
||||
const u32 pb_blocks[], u32 blocks_array_size,
|
||||
const u32 *regs_array, u32 regs_array_size)
|
||||
const u32 *user_regs_array, u32 user_regs_array_size)
|
||||
{
|
||||
return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
|
||||
num_instances, instance_offset, pb_blocks,
|
||||
blocks_array_size, regs_array, regs_array_size,
|
||||
ULLONG_MAX);
|
||||
blocks_array_size, user_regs_array,
|
||||
user_regs_array_size, ULLONG_MAX);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -364,15 +364,15 @@ int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
|
||||
* @instance_offset: offset between instances
|
||||
* @pb_blocks: blocks array
|
||||
* @blocks_array_size: blocks array size
|
||||
* @regs_range_array: register range array
|
||||
* @regs_range_array_size: register range array size
|
||||
* @user_regs_range_array: unsecured register range array
|
||||
* @user_regs_range_array_size: unsecured register range array size
|
||||
* @mask: enabled instances mask: 1- enabled, 0- disabled
|
||||
*/
|
||||
int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
|
||||
u32 dcore_offset, u32 num_instances, u32 instance_offset,
|
||||
const u32 pb_blocks[], u32 blocks_array_size,
|
||||
const struct range *regs_range_array, u32 regs_range_array_size,
|
||||
u64 mask)
|
||||
const struct range *user_regs_range_array,
|
||||
u32 user_regs_range_array_size, u64 mask)
|
||||
{
|
||||
int i, j, rc = 0;
|
||||
struct hl_block_glbl_sec *glbl_sec;
|
||||
@ -384,8 +384,8 @@ int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
|
||||
return -ENOMEM;
|
||||
|
||||
hl_secure_block(hdev, glbl_sec, blocks_array_size);
|
||||
rc = hl_unsecure_registers_range(hdev, regs_range_array,
|
||||
regs_range_array_size, 0, pb_blocks, glbl_sec,
|
||||
rc = hl_unsecure_registers_range(hdev, user_regs_range_array,
|
||||
user_regs_range_array_size, 0, pb_blocks, glbl_sec,
|
||||
blocks_array_size);
|
||||
if (rc)
|
||||
goto free_glbl_sec;
|
||||
@ -422,19 +422,20 @@ int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
|
||||
* @instance_offset: offset between instances
|
||||
* @pb_blocks: blocks array
|
||||
* @blocks_array_size: blocks array size
|
||||
* @regs_range_array: register range array
|
||||
* @regs_range_array_size: register range array size
|
||||
* @user_regs_range_array: unsecured register range array
|
||||
* @user_regs_range_array_size: unsecured register range array size
|
||||
*
|
||||
*/
|
||||
int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
|
||||
u32 dcore_offset, u32 num_instances, u32 instance_offset,
|
||||
const u32 pb_blocks[], u32 blocks_array_size,
|
||||
const struct range *regs_range_array, u32 regs_range_array_size)
|
||||
const struct range *user_regs_range_array,
|
||||
u32 user_regs_range_array_size)
|
||||
{
|
||||
return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
|
||||
num_instances, instance_offset, pb_blocks,
|
||||
blocks_array_size, regs_range_array,
|
||||
regs_range_array_size, ULLONG_MAX);
|
||||
blocks_array_size, user_regs_range_array,
|
||||
user_regs_range_array_size, ULLONG_MAX);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -447,14 +448,14 @@ int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
|
||||
* @instance_offset: offset between instances
|
||||
* @pb_blocks: blocks array
|
||||
* @blocks_array_size: blocks array size
|
||||
* @regs_array: register array
|
||||
* @regs_array_size: register array size
|
||||
* @user_regs_array: unsecured register array
|
||||
* @user_regs_array_size: unsecured register array size
|
||||
*
|
||||
*/
|
||||
int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
|
||||
u32 num_instances, u32 instance_offset,
|
||||
const u32 pb_blocks[], u32 blocks_array_size,
|
||||
const u32 *regs_array, u32 regs_array_size)
|
||||
const u32 *user_regs_array, u32 user_regs_array_size)
|
||||
{
|
||||
int i, rc = 0;
|
||||
struct hl_block_glbl_sec *glbl_sec;
|
||||
@ -466,8 +467,8 @@ int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
|
||||
return -ENOMEM;
|
||||
|
||||
hl_secure_block(hdev, glbl_sec, blocks_array_size);
|
||||
rc = hl_unsecure_registers(hdev, regs_array, regs_array_size, 0,
|
||||
pb_blocks, glbl_sec, blocks_array_size);
|
||||
rc = hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size,
|
||||
0, pb_blocks, glbl_sec, blocks_array_size);
|
||||
if (rc)
|
||||
goto free_glbl_sec;
|
||||
|
||||
@ -495,8 +496,8 @@ int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
|
||||
* @instance_offset: offset between instances
|
||||
* @pb_blocks: blocks array
|
||||
* @blocks_array_size: blocks array size
|
||||
* @regs_range_array: register range array
|
||||
* @regs_range_array_size: register range array size
|
||||
* @user_regs_range_array: unsecured register range array
|
||||
* @user_regs_range_array_size: unsecured register range array size
|
||||
*
|
||||
*/
|
||||
int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
|
||||
|
||||
@ -114,13 +114,6 @@ static u32 gaudi_stream_master[GAUDI_STREAM_MASTER_ARR_SIZE] = {
|
||||
GAUDI_QUEUE_ID_DMA_1_3
|
||||
};
|
||||
|
||||
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
|
||||
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
|
||||
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
|
||||
"gaudi cq 5_0", "gaudi cq 5_1", "gaudi cq 5_2", "gaudi cq 5_3",
|
||||
"gaudi cpu eq"
|
||||
};
|
||||
|
||||
static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
|
||||
[GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
|
||||
[GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
|
||||
@ -1476,8 +1469,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
|
||||
}
|
||||
|
||||
/* Allocate internal mapped CB for non patched CBs */
|
||||
cb = hl_cb_kernel_create(hdev, cb_size,
|
||||
hdev->mmu_enable && !patched_cb);
|
||||
cb = hl_cb_kernel_create(hdev, cb_size, !patched_cb);
|
||||
if (!cb) {
|
||||
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
|
||||
atomic64_inc(&cntr->out_of_mem_drop_cnt);
|
||||
@ -3651,9 +3643,6 @@ static int gaudi_mmu_init(struct hl_device *hdev)
|
||||
u64 hop0_addr;
|
||||
int rc, i;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return 0;
|
||||
|
||||
if (gaudi->hw_cap_initialized & HW_CAP_MMU)
|
||||
return 0;
|
||||
|
||||
|
||||
@ -57,13 +57,13 @@
|
||||
|
||||
#define GAUDI2_NA_EVENT_CAUSE 0xFF
|
||||
#define GAUDI2_NUM_OF_QM_ERR_CAUSE 18
|
||||
#define GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE 25
|
||||
#define GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE 25
|
||||
#define GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE 3
|
||||
#define GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE 14
|
||||
#define GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE 3
|
||||
#define GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE 2
|
||||
#define GAUDI2_NUM_OF_ROT_ERR_CAUSE 22
|
||||
#define GAUDI2_NUM_OF_TPC_INTR_CAUSE 30
|
||||
#define GAUDI2_NUM_OF_TPC_INTR_CAUSE 31
|
||||
#define GAUDI2_NUM_OF_DEC_ERR_CAUSE 25
|
||||
#define GAUDI2_NUM_OF_MME_ERR_CAUSE 16
|
||||
#define GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE 5
|
||||
@ -162,6 +162,9 @@
|
||||
#define PSOC_RAZWI_ENG_STR_SIZE 128
|
||||
#define PSOC_RAZWI_MAX_ENG_PER_RTR 5
|
||||
|
||||
/* HW scrambles only bits 0-25 */
|
||||
#define HW_UNSCRAMBLED_BITS_MASK GENMASK_ULL(63, 26)
|
||||
|
||||
struct gaudi2_razwi_info {
|
||||
u32 axuser_xy;
|
||||
u32 rtr_ctrl;
|
||||
@ -801,7 +804,7 @@ static const char * const gaudi2_qman_error_cause[GAUDI2_NUM_OF_QM_ERR_CAUSE] =
|
||||
"PQC L2H error"
|
||||
};
|
||||
|
||||
static const char * const gaudi2_qman_lower_cp_error_cause[GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE] = {
|
||||
static const char * const gaudi2_lower_qman_error_cause[GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE] = {
|
||||
"RSVD0",
|
||||
"CQ AXI HBW error",
|
||||
"CP AXI HBW error",
|
||||
@ -891,6 +894,7 @@ static const char * const gaudi2_tpc_interrupts_cause[GAUDI2_NUM_OF_TPC_INTR_CAU
|
||||
"invalid_lock_access",
|
||||
"LD_L protection violation",
|
||||
"ST_L protection violation",
|
||||
"D$ L0CS mismatch",
|
||||
};
|
||||
|
||||
static const char * const guadi2_mme_error_cause[GAUDI2_NUM_OF_MME_ERR_CAUSE] = {
|
||||
@ -3615,6 +3619,12 @@ static int gaudi2_sw_init(struct hl_device *hdev)
|
||||
|
||||
prop->supports_compute_reset = true;
|
||||
|
||||
/* Event queue sanity check added in FW version 1.11 */
|
||||
if (hl_is_fw_sw_ver_below(hdev, 1, 11))
|
||||
hdev->event_queue.check_eqe_index = false;
|
||||
else
|
||||
hdev->event_queue.check_eqe_index = true;
|
||||
|
||||
hdev->asic_funcs->set_pci_memory_regions(hdev);
|
||||
|
||||
rc = gaudi2_special_blocks_iterator_config(hdev);
|
||||
@ -3630,8 +3640,8 @@ static int gaudi2_sw_init(struct hl_device *hdev)
|
||||
special_blocks_free:
|
||||
gaudi2_special_blocks_iterator_free(hdev);
|
||||
free_scratchpad_mem:
|
||||
hl_asic_dma_pool_free(hdev, gaudi2->scratchpad_kernel_address,
|
||||
gaudi2->scratchpad_bus_address);
|
||||
hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address,
|
||||
gaudi2->scratchpad_bus_address);
|
||||
free_virt_msix_db_mem:
|
||||
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
|
||||
free_cpu_accessible_dma_pool:
|
||||
@ -4526,7 +4536,7 @@ static int gaudi2_set_tpc_engine_mode(struct hl_device *hdev, u32 engine_id, u32
|
||||
reg_base = gaudi2_tpc_cfg_blocks_bases[tpc_id];
|
||||
reg_addr = reg_base + TPC_CFG_STALL_OFFSET;
|
||||
reg_val = FIELD_PREP(DCORE0_TPC0_CFG_TPC_STALL_V_MASK,
|
||||
!!(engine_command == HL_ENGINE_STALL));
|
||||
(engine_command == HL_ENGINE_STALL) ? 1 : 0);
|
||||
WREG32(reg_addr, reg_val);
|
||||
|
||||
if (engine_command == HL_ENGINE_RESUME) {
|
||||
@ -4550,7 +4560,7 @@ static int gaudi2_set_mme_engine_mode(struct hl_device *hdev, u32 engine_id, u32
|
||||
reg_base = gaudi2_mme_ctrl_lo_blocks_bases[mme_id];
|
||||
reg_addr = reg_base + MME_CTRL_LO_QM_STALL_OFFSET;
|
||||
reg_val = FIELD_PREP(DCORE0_MME_CTRL_LO_QM_STALL_V_MASK,
|
||||
!!(engine_command == HL_ENGINE_STALL));
|
||||
(engine_command == HL_ENGINE_STALL) ? 1 : 0);
|
||||
WREG32(reg_addr, reg_val);
|
||||
|
||||
return 0;
|
||||
@ -4571,7 +4581,7 @@ static int gaudi2_set_edma_engine_mode(struct hl_device *hdev, u32 engine_id, u3
|
||||
reg_base = gaudi2_dma_core_blocks_bases[edma_id];
|
||||
reg_addr = reg_base + EDMA_CORE_CFG_STALL_OFFSET;
|
||||
reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK,
|
||||
!!(engine_command == HL_ENGINE_STALL));
|
||||
(engine_command == HL_ENGINE_STALL) ? 1 : 0);
|
||||
WREG32(reg_addr, reg_val);
|
||||
|
||||
if (engine_command == HL_ENGINE_STALL) {
|
||||
@ -6148,18 +6158,24 @@ static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_perform
|
||||
u32 poll_timeout_us)
|
||||
{
|
||||
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
|
||||
int rc = 0;
|
||||
|
||||
if (!driver_performs_reset) {
|
||||
/* set SP to indicate reset request sent to FW */
|
||||
if (dyn_regs->cpu_rst_status)
|
||||
WREG32(le32_to_cpu(dyn_regs->cpu_rst_status), CPU_RST_STATUS_NA);
|
||||
else
|
||||
WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
|
||||
if (hl_is_fw_sw_ver_below(hdev, 1, 10)) {
|
||||
/* set SP to indicate reset request sent to FW */
|
||||
if (dyn_regs->cpu_rst_status)
|
||||
WREG32(le32_to_cpu(dyn_regs->cpu_rst_status), CPU_RST_STATUS_NA);
|
||||
else
|
||||
WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
|
||||
WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq),
|
||||
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
|
||||
|
||||
WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq),
|
||||
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
|
||||
|
||||
return gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
|
||||
/* wait for f/w response */
|
||||
rc = gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
|
||||
} else {
|
||||
rc = hl_fw_send_soft_reset(hdev);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Block access to engines, QMANs and SM during reset, these
|
||||
@ -7231,7 +7247,7 @@ static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8
|
||||
|
||||
gaudi2_iterate_tpcs(hdev, &tpc_iter);
|
||||
|
||||
return tpc_idle_data.is_idle;
|
||||
return *tpc_idle_data.is_idle;
|
||||
}
|
||||
|
||||
static bool gaudi2_get_decoder_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
|
||||
@ -7737,137 +7753,28 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
|
||||
return !!ecc_data->is_critical;
|
||||
}
|
||||
|
||||
/*
|
||||
* gaudi2_queue_idx_dec - decrement queue index (pi/ci) and handle wrap
|
||||
*
|
||||
* @idx: the current pi/ci value
|
||||
* @q_len: the queue length (power of 2)
|
||||
*
|
||||
* @return the cyclically decremented index
|
||||
*/
|
||||
static inline u32 gaudi2_queue_idx_dec(u32 idx, u32 q_len)
|
||||
static void print_lower_qman_data_on_err(struct hl_device *hdev, u64 qman_base)
|
||||
{
|
||||
u32 mask = q_len - 1;
|
||||
u32 lo, hi, cq_ptr_size, arc_cq_ptr_size;
|
||||
u64 cq_ptr, arc_cq_ptr, cp_current_inst;
|
||||
|
||||
/*
|
||||
* modular decrement is equivalent to adding (queue_size -1)
|
||||
* later we take LSBs to make sure the value is in the
|
||||
* range [0, queue_len - 1]
|
||||
*/
|
||||
return (idx + q_len - 1) & mask;
|
||||
}
|
||||
lo = RREG32(qman_base + QM_CQ_PTR_LO_4_OFFSET);
|
||||
hi = RREG32(qman_base + QM_CQ_PTR_HI_4_OFFSET);
|
||||
cq_ptr = ((u64) hi) << 32 | lo;
|
||||
cq_ptr_size = RREG32(qman_base + QM_CQ_TSIZE_4_OFFSET);
|
||||
|
||||
/**
|
||||
* gaudi2_print_sw_config_stream_data - print SW config stream data
|
||||
*
|
||||
* @hdev: pointer to the habanalabs device structure
|
||||
* @stream: the QMAN's stream
|
||||
* @qman_base: base address of QMAN registers block
|
||||
*/
|
||||
static void gaudi2_print_sw_config_stream_data(struct hl_device *hdev,
|
||||
u32 stream, u64 qman_base)
|
||||
{
|
||||
u64 cq_ptr_lo, cq_ptr_hi, cq_tsize, cq_ptr;
|
||||
u32 cq_ptr_lo_off, size;
|
||||
lo = RREG32(qman_base + QM_ARC_CQ_PTR_LO_OFFSET);
|
||||
hi = RREG32(qman_base + QM_ARC_CQ_PTR_HI_OFFSET);
|
||||
arc_cq_ptr = ((u64) hi) << 32 | lo;
|
||||
arc_cq_ptr_size = RREG32(qman_base + QM_ARC_CQ_TSIZE_OFFSET);
|
||||
|
||||
cq_ptr_lo_off = mmDCORE0_TPC0_QM_CQ_PTR_LO_1 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0;
|
||||
lo = RREG32(qman_base + QM_CP_CURRENT_INST_LO_4_OFFSET);
|
||||
hi = RREG32(qman_base + QM_CP_CURRENT_INST_HI_4_OFFSET);
|
||||
cp_current_inst = ((u64) hi) << 32 | lo;
|
||||
|
||||
cq_ptr_lo = qman_base + (mmDCORE0_TPC0_QM_CQ_PTR_LO_0 - mmDCORE0_TPC0_QM_BASE) +
|
||||
stream * cq_ptr_lo_off;
|
||||
|
||||
cq_ptr_hi = cq_ptr_lo + (mmDCORE0_TPC0_QM_CQ_PTR_HI_0 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0);
|
||||
|
||||
cq_tsize = cq_ptr_lo + (mmDCORE0_TPC0_QM_CQ_TSIZE_0 - mmDCORE0_TPC0_QM_CQ_PTR_LO_0);
|
||||
|
||||
cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo);
|
||||
size = RREG32(cq_tsize);
|
||||
dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %x\n",
|
||||
stream, cq_ptr, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* gaudi2_print_last_pqes_on_err - print last PQEs on error
|
||||
*
|
||||
* @hdev: pointer to the habanalabs device structure
|
||||
* @qid_base: first QID of the QMAN (out of 4 streams)
|
||||
* @stream: the QMAN's stream
|
||||
* @qman_base: base address of QMAN registers block
|
||||
* @pr_sw_conf: if true print the SW config stream data (CQ PTR and SIZE)
|
||||
*/
|
||||
static void gaudi2_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base, u32 stream,
|
||||
u64 qman_base, bool pr_sw_conf)
|
||||
{
|
||||
u32 ci, qm_ci_stream_off;
|
||||
struct hl_hw_queue *q;
|
||||
u64 pq_ci;
|
||||
int i;
|
||||
|
||||
q = &hdev->kernel_queues[qid_base + stream];
|
||||
|
||||
qm_ci_stream_off = mmDCORE0_TPC0_QM_PQ_CI_1 - mmDCORE0_TPC0_QM_PQ_CI_0;
|
||||
pq_ci = qman_base + (mmDCORE0_TPC0_QM_PQ_CI_0 - mmDCORE0_TPC0_QM_BASE) +
|
||||
stream * qm_ci_stream_off;
|
||||
|
||||
hdev->asic_funcs->hw_queues_lock(hdev);
|
||||
|
||||
if (pr_sw_conf)
|
||||
gaudi2_print_sw_config_stream_data(hdev, stream, qman_base);
|
||||
|
||||
ci = RREG32(pq_ci);
|
||||
|
||||
/* we should start printing form ci -1 */
|
||||
ci = gaudi2_queue_idx_dec(ci, HL_QUEUE_LENGTH);
|
||||
|
||||
for (i = 0; i < PQ_FETCHER_CACHE_SIZE; i++) {
|
||||
struct hl_bd *bd;
|
||||
u64 addr;
|
||||
u32 len;
|
||||
|
||||
bd = q->kernel_address;
|
||||
bd += ci;
|
||||
|
||||
len = le32_to_cpu(bd->len);
|
||||
/* len 0 means uninitialized entry- break */
|
||||
if (!len)
|
||||
break;
|
||||
|
||||
addr = le64_to_cpu(bd->ptr);
|
||||
|
||||
dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %x\n",
|
||||
stream, ci, addr, len);
|
||||
|
||||
/* get previous ci, wrap if needed */
|
||||
ci = gaudi2_queue_idx_dec(ci, HL_QUEUE_LENGTH);
|
||||
}
|
||||
|
||||
hdev->asic_funcs->hw_queues_unlock(hdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* print_qman_data_on_err - extract QMAN data on error
|
||||
*
|
||||
* @hdev: pointer to the habanalabs device structure
|
||||
* @qid_base: first QID of the QMAN (out of 4 streams)
|
||||
* @stream: the QMAN's stream
|
||||
* @qman_base: base address of QMAN registers block
|
||||
*
|
||||
* This function attempt to extract as much data as possible on QMAN error.
|
||||
* On upper CP print the SW config stream data and last 8 PQEs.
|
||||
* On lower CP print SW config data and last PQEs of ALL 4 upper CPs
|
||||
*/
|
||||
static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base, u32 stream, u64 qman_base)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (stream != QMAN_STREAMS) {
|
||||
gaudi2_print_last_pqes_on_err(hdev, qid_base, stream, qman_base, true);
|
||||
return;
|
||||
}
|
||||
|
||||
gaudi2_print_sw_config_stream_data(hdev, stream, qman_base);
|
||||
|
||||
for (i = 0 ; i < QMAN_STREAMS ; i++)
|
||||
gaudi2_print_last_pqes_on_err(hdev, qid_base, i, qman_base, false);
|
||||
dev_info(hdev->dev,
|
||||
"LowerQM. CQ: {ptr %#llx, size %u}, ARC_CQ: {ptr %#llx, size %u}, CP: {instruction %#llx}\n",
|
||||
cq_ptr, cq_ptr_size, arc_cq_ptr, arc_cq_ptr_size, cp_current_inst);
|
||||
}
|
||||
|
||||
static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type,
|
||||
@ -7888,8 +7795,8 @@ static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type
|
||||
continue;
|
||||
|
||||
if (i == QMAN_STREAMS) {
|
||||
snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP");
|
||||
num_error_causes = GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE;
|
||||
snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerQM");
|
||||
num_error_causes = GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE;
|
||||
} else {
|
||||
snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i);
|
||||
num_error_causes = GAUDI2_NUM_OF_QM_ERR_CAUSE;
|
||||
@ -7900,12 +7807,13 @@ static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type
|
||||
gaudi2_print_event(hdev, event_type, true,
|
||||
"%s. err cause: %s", reg_desc,
|
||||
i == QMAN_STREAMS ?
|
||||
gaudi2_qman_lower_cp_error_cause[j] :
|
||||
gaudi2_lower_qman_error_cause[j] :
|
||||
gaudi2_qman_error_cause[j]);
|
||||
error_count++;
|
||||
}
|
||||
|
||||
print_qman_data_on_err(hdev, qid_base, i, qman_base);
|
||||
if (i == QMAN_STREAMS)
|
||||
print_lower_qman_data_on_err(hdev, qman_base);
|
||||
}
|
||||
|
||||
arb_err_val = RREG32(arb_err_addr);
|
||||
@ -8033,7 +7941,7 @@ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
|
||||
u8 module_sub_idx, u64 *event_mask)
|
||||
{
|
||||
bool via_sft = false;
|
||||
u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id;
|
||||
u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id, binned_idx;
|
||||
u64 hbw_rtr_mstr_if_base_addr, lbw_rtr_mstr_if_base_addr;
|
||||
u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0;
|
||||
u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0;
|
||||
@ -8041,15 +7949,21 @@ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
|
||||
|
||||
switch (module) {
|
||||
case RAZWI_TPC:
|
||||
sprintf(initiator_name, "TPC_%u", module_idx);
|
||||
if (hdev->tpc_binning) {
|
||||
binned_idx = __ffs(hdev->tpc_binning);
|
||||
if (binned_idx == module_idx)
|
||||
module_idx = TPC_ID_DCORE0_TPC6;
|
||||
}
|
||||
|
||||
hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx];
|
||||
|
||||
if (hl_is_fw_ver_below_1_9(hdev) &&
|
||||
if (hl_is_fw_sw_ver_below(hdev, 1, 9) &&
|
||||
!hdev->asic_prop.fw_security_enabled &&
|
||||
((module_idx == 0) || (module_idx == 1)))
|
||||
lbw_rtr_id = DCORE0_RTR0;
|
||||
else
|
||||
lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
|
||||
sprintf(initiator_name, "TPC_%u", module_idx);
|
||||
break;
|
||||
case RAZWI_MME:
|
||||
sprintf(initiator_name, "MME_%u", module_idx);
|
||||
@ -8108,9 +8022,14 @@ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
|
||||
sprintf(initiator_name, "NIC_%u", module_idx);
|
||||
break;
|
||||
case RAZWI_DEC:
|
||||
sprintf(initiator_name, "DEC_%u", module_idx);
|
||||
if (hdev->decoder_binning) {
|
||||
binned_idx = __ffs(hdev->decoder_binning);
|
||||
if (binned_idx == module_idx)
|
||||
module_idx = DEC_ID_PCIE_VDEC1;
|
||||
}
|
||||
hbw_rtr_id = gaudi2_dec_initiator_hbw_rtr_id[module_idx];
|
||||
lbw_rtr_id = gaudi2_dec_initiator_lbw_rtr_id[module_idx];
|
||||
sprintf(initiator_name, "DEC_%u", module_idx);
|
||||
break;
|
||||
case RAZWI_ROT:
|
||||
hbw_rtr_id = gaudi2_rot_initiator_hbw_rtr_id[module_idx];
|
||||
@ -8251,6 +8170,7 @@ static bool gaudi2_handle_psoc_razwi_happened(struct hl_device *hdev, u32 razwi_
|
||||
u16 num_of_eng, eng_id[PSOC_RAZWI_MAX_ENG_PER_RTR];
|
||||
char eng_name_str[PSOC_RAZWI_ENG_STR_SIZE];
|
||||
bool razwi_happened = false;
|
||||
u64 addr;
|
||||
int i;
|
||||
|
||||
num_of_eng = gaudi2_psoc_razwi_get_engines(common_razwi_info, ARRAY_SIZE(common_razwi_info),
|
||||
@ -8269,43 +8189,53 @@ static bool gaudi2_handle_psoc_razwi_happened(struct hl_device *hdev, u32 razwi_
|
||||
if (RREG32(base[i] + DEC_RAZWI_HBW_AW_SET)) {
|
||||
addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_HI);
|
||||
addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_LO);
|
||||
dev_err(hdev->dev,
|
||||
addr = ((u64)addr_hi << 32) + addr_lo;
|
||||
if (addr) {
|
||||
dev_err(hdev->dev,
|
||||
"PSOC HBW AW RAZWI: %s, address (aligned to 128 byte): 0x%llX\n",
|
||||
eng_name_str, ((u64)addr_hi << 32) + addr_lo);
|
||||
hl_handle_razwi(hdev, ((u64)addr_hi << 32) + addr_lo, &eng_id[0],
|
||||
eng_name_str, addr);
|
||||
hl_handle_razwi(hdev, addr, &eng_id[0],
|
||||
num_of_eng, HL_RAZWI_HBW | HL_RAZWI_WRITE, event_mask);
|
||||
razwi_happened = true;
|
||||
razwi_happened = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (RREG32(base[i] + DEC_RAZWI_HBW_AR_SET)) {
|
||||
addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_HI);
|
||||
addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_LO);
|
||||
dev_err(hdev->dev,
|
||||
addr = ((u64)addr_hi << 32) + addr_lo;
|
||||
if (addr) {
|
||||
dev_err(hdev->dev,
|
||||
"PSOC HBW AR RAZWI: %s, address (aligned to 128 byte): 0x%llX\n",
|
||||
eng_name_str, ((u64)addr_hi << 32) + addr_lo);
|
||||
hl_handle_razwi(hdev, ((u64)addr_hi << 32) + addr_lo, &eng_id[0],
|
||||
eng_name_str, addr);
|
||||
hl_handle_razwi(hdev, addr, &eng_id[0],
|
||||
num_of_eng, HL_RAZWI_HBW | HL_RAZWI_READ, event_mask);
|
||||
razwi_happened = true;
|
||||
razwi_happened = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (RREG32(base[i] + DEC_RAZWI_LBW_AW_SET)) {
|
||||
addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AW_ADDR);
|
||||
dev_err(hdev->dev,
|
||||
if (addr_lo) {
|
||||
dev_err(hdev->dev,
|
||||
"PSOC LBW AW RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
|
||||
eng_name_str, addr_lo);
|
||||
hl_handle_razwi(hdev, addr_lo, &eng_id[0],
|
||||
hl_handle_razwi(hdev, addr_lo, &eng_id[0],
|
||||
num_of_eng, HL_RAZWI_LBW | HL_RAZWI_WRITE, event_mask);
|
||||
razwi_happened = true;
|
||||
razwi_happened = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (RREG32(base[i] + DEC_RAZWI_LBW_AR_SET)) {
|
||||
addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AR_ADDR);
|
||||
dev_err(hdev->dev,
|
||||
"PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
|
||||
eng_name_str, addr_lo);
|
||||
hl_handle_razwi(hdev, addr_lo, &eng_id[0],
|
||||
if (addr_lo) {
|
||||
dev_err(hdev->dev,
|
||||
"PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
|
||||
eng_name_str, addr_lo);
|
||||
hl_handle_razwi(hdev, addr_lo, &eng_id[0],
|
||||
num_of_eng, HL_RAZWI_LBW | HL_RAZWI_READ, event_mask);
|
||||
razwi_happened = true;
|
||||
razwi_happened = true;
|
||||
}
|
||||
}
|
||||
/* In common case the loop will break, when there is only one engine id, or
|
||||
* several engines with the same router. The exceptional case is with psoc razwi
|
||||
@ -8789,13 +8719,13 @@ static int gaudi2_handle_kdma_core_event(struct hl_device *hdev, u16 event_type,
|
||||
return error_count;
|
||||
}
|
||||
|
||||
static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, int sts_addr)
|
||||
static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, u64 intr_cause)
|
||||
{
|
||||
u32 error_count = 0, sts_val = RREG32(sts_addr);
|
||||
u32 error_count = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
|
||||
if (sts_val & BIT(i)) {
|
||||
if (intr_cause & BIT(i)) {
|
||||
gaudi2_print_event(hdev, event_type, true,
|
||||
"err cause: %s", gaudi2_dma_core_interrupts_cause[i]);
|
||||
error_count++;
|
||||
@ -8806,27 +8736,6 @@ static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type,
|
||||
return error_count;
|
||||
}
|
||||
|
||||
static int gaudi2_handle_pdma_core_event(struct hl_device *hdev, u16 event_type, int pdma_idx)
|
||||
{
|
||||
u32 sts_addr;
|
||||
|
||||
sts_addr = mmPDMA0_CORE_ERR_CAUSE + pdma_idx * PDMA_OFFSET;
|
||||
return gaudi2_handle_dma_core_event(hdev, event_type, sts_addr);
|
||||
}
|
||||
|
||||
static int gaudi2_handle_edma_core_event(struct hl_device *hdev, u16 event_type, int edma_idx)
|
||||
{
|
||||
static const int edma_event_index_map[] = {2, 3, 0, 1, 6, 7, 4, 5};
|
||||
u32 sts_addr, index;
|
||||
|
||||
index = edma_event_index_map[edma_idx];
|
||||
|
||||
sts_addr = mmDCORE0_EDMA0_CORE_ERR_CAUSE +
|
||||
DCORE_OFFSET * (index / NUM_OF_EDMA_PER_DCORE) +
|
||||
DCORE_EDMA_OFFSET * (index % NUM_OF_EDMA_PER_DCORE);
|
||||
return gaudi2_handle_dma_core_event(hdev, event_type, sts_addr);
|
||||
}
|
||||
|
||||
static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask)
|
||||
{
|
||||
u32 mstr_if_base_addr = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE, razwi_happened_addr;
|
||||
@ -8866,6 +8775,9 @@ static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_typ
|
||||
u32 error_count = 0;
|
||||
int i;
|
||||
|
||||
gaudi2_print_event(hdev, event_type, true,
|
||||
"intr_cause_data: %#llx", intr_cause_data);
|
||||
|
||||
for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) {
|
||||
if (!(intr_cause_data & BIT_ULL(i)))
|
||||
continue;
|
||||
@ -8874,16 +8786,15 @@ static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_typ
|
||||
"err cause: %s", gaudi2_pcie_addr_dec_error_cause[i]);
|
||||
error_count++;
|
||||
|
||||
switch (intr_cause_data & BIT_ULL(i)) {
|
||||
case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK:
|
||||
hl_check_for_glbl_errors(hdev);
|
||||
break;
|
||||
case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK:
|
||||
gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Always check for LBW and HBW additional info as the indication itself is
|
||||
* sometimes missing
|
||||
*/
|
||||
}
|
||||
|
||||
hl_check_for_glbl_errors(hdev);
|
||||
gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
|
||||
|
||||
return error_count;
|
||||
}
|
||||
|
||||
@ -8937,11 +8848,16 @@ static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool
|
||||
addr <<= 32;
|
||||
addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA));
|
||||
|
||||
if (!is_pmmu)
|
||||
addr = gaudi2_mmu_descramble_addr(hdev, addr);
|
||||
if (is_pmmu) {
|
||||
dev_err_ratelimited(hdev->dev, "PMMU page fault on va 0x%llx\n", addr);
|
||||
} else {
|
||||
|
||||
addr = gaudi2_mmu_descramble_addr(hdev, addr);
|
||||
addr &= HW_UNSCRAMBLED_BITS_MASK;
|
||||
dev_err_ratelimited(hdev->dev, "HMMU page fault on va range 0x%llx - 0x%llx\n",
|
||||
addr, addr + ~HW_UNSCRAMBLED_BITS_MASK);
|
||||
}
|
||||
|
||||
dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx\n",
|
||||
is_pmmu ? "PMMU" : "HMMU", addr);
|
||||
hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask);
|
||||
|
||||
WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0);
|
||||
@ -9709,19 +9625,19 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
|
||||
case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP:
|
||||
case GAUDI2_EVENT_KDMA0_CORE:
|
||||
error_count = gaudi2_handle_kdma_core_event(hdev, event_type,
|
||||
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
|
||||
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
break;
|
||||
|
||||
case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_HDMA5_CORE:
|
||||
index = event_type - GAUDI2_EVENT_HDMA2_CORE;
|
||||
error_count = gaudi2_handle_edma_core_event(hdev, event_type, index);
|
||||
error_count = gaudi2_handle_dma_core_event(hdev, event_type,
|
||||
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
|
||||
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
|
||||
break;
|
||||
|
||||
case GAUDI2_EVENT_PDMA0_CORE ... GAUDI2_EVENT_PDMA1_CORE:
|
||||
index = event_type - GAUDI2_EVENT_PDMA0_CORE;
|
||||
error_count = gaudi2_handle_pdma_core_event(hdev, event_type, index);
|
||||
error_count = gaudi2_handle_dma_core_event(hdev, event_type,
|
||||
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
|
||||
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
|
||||
break;
|
||||
|
||||
|
||||
@ -98,7 +98,7 @@
|
||||
#define GAUDI2_DEFAULT_CARD_NAME "HL225"
|
||||
|
||||
#define QMAN_STREAMS 4
|
||||
#define PQ_FETCHER_CACHE_SIZE 8
|
||||
|
||||
#define NUM_OF_MME_SBTE_PORTS 5
|
||||
#define NUM_OF_MME_WB_PORTS 2
|
||||
|
||||
|
||||
@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
|
||||
mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
|
||||
mmDCORE0_EDMA0_CORE_CTX_IDX,
|
||||
mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
|
||||
mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
|
||||
mmDCORE0_EDMA0_QM_CQ_CFG0_0,
|
||||
mmDCORE0_EDMA0_QM_CQ_CFG0_1,
|
||||
mmDCORE0_EDMA0_QM_CQ_CFG0_2,
|
||||
@ -1533,6 +1534,10 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
|
||||
mmDCORE0_TPC0_CFG_QM_KERNEL_CONFIG,
|
||||
mmDCORE0_TPC0_CFG_QM_KERNEL_ID,
|
||||
mmDCORE0_TPC0_CFG_QM_POWER_LOOP,
|
||||
mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_0,
|
||||
mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_1,
|
||||
mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_2,
|
||||
mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_3,
|
||||
mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_LO,
|
||||
mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_HI,
|
||||
mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_LO,
|
||||
@ -1541,6 +1546,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
|
||||
mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_HI,
|
||||
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_LO,
|
||||
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_HI,
|
||||
mmDCORE0_TPC0_CFG_FP8_143_BIAS,
|
||||
mmDCORE0_TPC0_CFG_ROUND_CSR,
|
||||
mmDCORE0_TPC0_CFG_CONV_ROUND_CSR,
|
||||
mmDCORE0_TPC0_CFG_SEMAPHORE,
|
||||
@ -3442,15 +3448,6 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev)
|
||||
ARRAY_SIZE(gaudi2_pb_thermal_sensor0), NULL, HL_PB_NA);
|
||||
}
|
||||
|
||||
/* HBM */
|
||||
/* Temporarily skip until SW-63348 is solved
|
||||
* instance_offset = mmHBM1_MC0_BASE - mmHBM0_MC0_BASE;
|
||||
* rc |= hl_init_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, GAUDI2_HBM_NUM,
|
||||
* instance_offset, gaudi2_pb_hbm,
|
||||
* ARRAY_SIZE(gaudi2_pb_hbm), NULL, HL_PB_NA,
|
||||
* prop->dram_enabled_mask);
|
||||
*/
|
||||
|
||||
/* Scheduler ARCs */
|
||||
instance_offset = mmARC_FARM_ARC1_AUX_BASE - mmARC_FARM_ARC0_AUX_BASE;
|
||||
rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA,
|
||||
|
||||
@ -2671,9 +2671,6 @@ int goya_mmu_init(struct hl_device *hdev)
|
||||
u64 hop0_addr;
|
||||
int rc, i;
|
||||
|
||||
if (!hdev->mmu_enable)
|
||||
return 0;
|
||||
|
||||
if (goya->hw_cap_initialized & HW_CAP_MMU)
|
||||
return 0;
|
||||
|
||||
|
||||
@ -371,13 +371,8 @@ static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (hdev->mmu_enable) {
|
||||
range_start = prop->dmmu.start_addr;
|
||||
range_end = prop->dmmu.end_addr;
|
||||
} else {
|
||||
range_start = prop->dram_user_base_address;
|
||||
range_end = prop->dram_end_address;
|
||||
}
|
||||
range_start = prop->dmmu.start_addr;
|
||||
range_end = prop->dmmu.end_addr;
|
||||
|
||||
return hl_mem_area_inside_range(addr, size, range_start, range_end);
|
||||
}
|
||||
|
||||
@ -359,7 +359,7 @@ struct hl_eq_entry {
|
||||
union {
|
||||
__le64 data_placeholder;
|
||||
struct hl_eq_ecc_data ecc_data;
|
||||
struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Gaudi1 HBM */
|
||||
struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */
|
||||
struct hl_eq_sm_sei_data sm_sei_data;
|
||||
struct cpucp_pkt_sync_err pkt_sync_err;
|
||||
struct hl_eq_fw_alive fw_alive;
|
||||
@ -653,7 +653,7 @@ enum pq_init_status {
|
||||
* which address is passed via the CpuCp packet. In addition, the host's driver
|
||||
* passes the max size it allows the CpuCP to write to the structure, to prevent
|
||||
* data corruption in case of mismatched driver/FW versions.
|
||||
* Relevant only to Gaudi.
|
||||
* Obsolete.
|
||||
*
|
||||
* CPUCP_PACKET_GENERIC_PASSTHROUGH -
|
||||
* Generic opcode for all firmware info that is only passed to host
|
||||
@ -665,6 +665,9 @@ enum pq_init_status {
|
||||
*
|
||||
* CPUCP_PACKET_REGISTER_INTERRUPTS -
|
||||
* Packet to register interrupts indicating LKD is ready to receive events from FW.
|
||||
*
|
||||
* CPUCP_PACKET_SOFT_RESET -
|
||||
* Packet to perform soft-reset.
|
||||
*/
|
||||
|
||||
enum cpucp_packet_id {
|
||||
@ -731,6 +734,7 @@ enum cpucp_packet_id {
|
||||
CPUCP_PACKET_RESERVED11, /* not used */
|
||||
CPUCP_PACKET_RESERVED12, /* internal */
|
||||
CPUCP_PACKET_REGISTER_INTERRUPTS, /* internal */
|
||||
CPUCP_PACKET_SOFT_RESET, /* internal */
|
||||
CPUCP_PACKET_ID_MAX /* must be last */
|
||||
};
|
||||
|
||||
@ -864,19 +868,19 @@ struct cpucp_array_data_packet {
|
||||
enum cpucp_led_index {
|
||||
CPUCP_LED0_INDEX = 0,
|
||||
CPUCP_LED1_INDEX,
|
||||
CPUCP_LED2_INDEX
|
||||
CPUCP_LED2_INDEX,
|
||||
CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX
|
||||
};
|
||||
|
||||
/*
|
||||
* enum cpucp_packet_rc - Error return code
|
||||
* @cpucp_packet_success -> in case of success.
|
||||
* @cpucp_packet_invalid -> this is to support Goya and Gaudi platform.
|
||||
* @cpucp_packet_invalid -> this is to support first generation platforms.
|
||||
* @cpucp_packet_fault -> in case of processing error like failing to
|
||||
* get device binding or semaphore etc.
|
||||
* @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported. This is
|
||||
* supported Greco onwards.
|
||||
* @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported.
|
||||
* @cpucp_packet_invalid_params -> when checking parameter like length of buffer
|
||||
* or attribute value etc. Supported Greco onwards.
|
||||
* or attribute value etc.
|
||||
* @cpucp_packet_rc_max -> It indicates size of enum so should be at last.
|
||||
*/
|
||||
enum cpucp_packet_rc {
|
||||
@ -1361,7 +1365,7 @@ struct cpucp_dev_info_signed {
|
||||
#define DCORE_MON_REGS_SZ 512
|
||||
/*
|
||||
* struct dcore_monitor_regs_data - DCORE monitor regs data.
|
||||
* the structure follows sync manager block layout. relevant only to Gaudi.
|
||||
* the structure follows sync manager block layout. Obsolete.
|
||||
* @mon_pay_addrl: array of payload address low bits.
|
||||
* @mon_pay_addrh: array of payload address high bits.
|
||||
* @mon_pay_data: array of payload data.
|
||||
@ -1376,7 +1380,7 @@ struct dcore_monitor_regs_data {
|
||||
__le32 mon_status[DCORE_MON_REGS_SZ];
|
||||
};
|
||||
|
||||
/* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */
|
||||
/* contains SM data for each SYNC_MNGR (Obsolete) */
|
||||
struct cpucp_monitor_dump {
|
||||
struct dcore_monitor_regs_data sync_mngr_w_s;
|
||||
struct dcore_monitor_regs_data sync_mngr_e_s;
|
||||
|
||||
@ -35,6 +35,7 @@ enum cpu_boot_err {
|
||||
CPU_BOOT_ERR_TPM_FAIL = 20,
|
||||
CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21,
|
||||
CPU_BOOT_ERR_EEPROM_FAIL = 22,
|
||||
CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23,
|
||||
CPU_BOOT_ERR_ENABLED = 31,
|
||||
CPU_BOOT_ERR_SCND_EN = 63,
|
||||
CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
|
||||
@ -51,6 +52,7 @@ enum cpu_boot_err {
|
||||
(1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \
|
||||
(1 << CPU_BOOT_ERR_BINNING_FAIL) | \
|
||||
(1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
|
||||
(1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \
|
||||
(1 << CPU_BOOT_ERR_EEPROM_FAIL))
|
||||
|
||||
/*
|
||||
@ -132,6 +134,9 @@ enum cpu_boot_err {
|
||||
* CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults
|
||||
* are used.
|
||||
*
|
||||
* CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm
|
||||
* memories. Boot disabled until reset.
|
||||
*
|
||||
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
|
||||
* This is a main indication that the
|
||||
* running FW populates the error
|
||||
@ -157,6 +162,7 @@ enum cpu_boot_err {
|
||||
#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
|
||||
#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL)
|
||||
#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL)
|
||||
#define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL)
|
||||
#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
|
||||
#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
|
||||
|
||||
@ -744,36 +750,6 @@ struct comms_status {
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* HL_MODULES_MAX_NUM is determined by the size of modules_mask in struct
|
||||
* hl_component_versions
|
||||
*/
|
||||
enum hl_modules {
|
||||
HL_MODULES_BOOT_INFO = 0,
|
||||
HL_MODULES_EEPROM,
|
||||
HL_MODULES_FDT,
|
||||
HL_MODULES_I2C,
|
||||
HL_MODULES_LZ4,
|
||||
HL_MODULES_MBEDTLS,
|
||||
HL_MODULES_MAX_NUM = 16
|
||||
};
|
||||
|
||||
/**
|
||||
* HL_COMPONENTS_MAX_NUM is determined by the size of components_mask in
|
||||
* struct cpucp_versions
|
||||
*/
|
||||
enum hl_components {
|
||||
HL_COMPONENTS_PID = 0,
|
||||
HL_COMPONENTS_MGMT,
|
||||
HL_COMPONENTS_PREBOOT,
|
||||
HL_COMPONENTS_PPBOOT,
|
||||
HL_COMPONENTS_ARMCP,
|
||||
HL_COMPONENTS_CPLD,
|
||||
HL_COMPONENTS_UBOOT,
|
||||
HL_COMPONENTS_FUSE,
|
||||
HL_COMPONENTS_MAX_NUM = 16
|
||||
};
|
||||
|
||||
#define NAME_MAX_LEN 32 /* bytes */
|
||||
struct hl_module_data {
|
||||
__u8 name[NAME_MAX_LEN];
|
||||
@ -787,8 +763,6 @@ struct hl_module_data {
|
||||
* @component: version of the component itself.
|
||||
* @fw_os: Firmware OS Version.
|
||||
* @comp_name: Name of the component.
|
||||
* @modules_mask: i'th bit (from LSB) is a flag - on if module i in enum
|
||||
* hl_modules is used.
|
||||
* @modules_counter: number of set bits in modules_mask.
|
||||
* @reserved: reserved for future use.
|
||||
* @modules: versions of the component's modules. Elborated explanation in
|
||||
@ -800,9 +774,8 @@ struct hl_component_versions {
|
||||
__u8 component[VERSION_MAX_LEN];
|
||||
__u8 fw_os[VERSION_MAX_LEN];
|
||||
__u8 comp_name[NAME_MAX_LEN];
|
||||
__le16 modules_mask;
|
||||
__u8 modules_counter;
|
||||
__u8 reserved[1];
|
||||
__u8 reserved[3];
|
||||
struct hl_module_data modules[];
|
||||
};
|
||||
|
||||
|
||||
@ -242,6 +242,17 @@
|
||||
#define QM_FENCE2_OFFSET (mmPDMA0_QM_CP_FENCE2_RDATA_0 - mmPDMA0_QM_BASE)
|
||||
#define QM_SEI_STATUS_OFFSET (mmPDMA0_QM_SEI_STATUS - mmPDMA0_QM_BASE)
|
||||
|
||||
#define QM_CQ_PTR_LO_4_OFFSET (mmPDMA0_QM_CQ_PTR_LO_4 - mmPDMA0_QM_BASE)
|
||||
#define QM_CQ_PTR_HI_4_OFFSET (mmPDMA0_QM_CQ_PTR_HI_4 - mmPDMA0_QM_BASE)
|
||||
#define QM_CQ_TSIZE_4_OFFSET (mmPDMA0_QM_CQ_TSIZE_4 - mmPDMA0_QM_BASE)
|
||||
|
||||
#define QM_ARC_CQ_PTR_LO_OFFSET (mmPDMA0_QM_ARC_CQ_PTR_LO - mmPDMA0_QM_BASE)
|
||||
#define QM_ARC_CQ_PTR_HI_OFFSET (mmPDMA0_QM_ARC_CQ_PTR_HI - mmPDMA0_QM_BASE)
|
||||
#define QM_ARC_CQ_TSIZE_OFFSET (mmPDMA0_QM_ARC_CQ_TSIZE - mmPDMA0_QM_BASE)
|
||||
|
||||
#define QM_CP_CURRENT_INST_LO_4_OFFSET (mmPDMA0_QM_CP_CURRENT_INST_LO_4 - mmPDMA0_QM_BASE)
|
||||
#define QM_CP_CURRENT_INST_HI_4_OFFSET (mmPDMA0_QM_CP_CURRENT_INST_HI_4 - mmPDMA0_QM_BASE)
|
||||
|
||||
#define SFT_OFFSET (mmSFT1_HBW_RTR_IF0_RTR_H3_BASE - mmSFT0_HBW_RTR_IF0_RTR_H3_BASE)
|
||||
#define SFT_IF_RTR_OFFSET (mmSFT0_HBW_RTR_IF1_RTR_H3_BASE - mmSFT0_HBW_RTR_IF0_RTR_H3_BASE)
|
||||
|
||||
|
||||
@ -62,7 +62,7 @@ struct gaudi2_cold_rst_data {
|
||||
u32 fake_security_enable : 1;
|
||||
u32 fake_sig_validation_en : 1;
|
||||
u32 bist_skip_enable : 1;
|
||||
u32 bist_need_iatu_config : 1;
|
||||
u32 reserved1 : 1;
|
||||
u32 fake_bis_compliant : 1;
|
||||
u32 wd_rst_cause_arm : 1;
|
||||
u32 wd_rst_cause_arcpid : 1;
|
||||
|
||||
@ -95,6 +95,7 @@ config DRM_KUNIT_TEST
|
||||
config DRM_KMS_HELPER
|
||||
tristate
|
||||
depends on DRM
|
||||
select FB_SYS_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
|
||||
help
|
||||
CRTC helpers for KMS drivers.
|
||||
|
||||
@ -132,14 +133,6 @@ config DRM_FBDEV_EMULATION
|
||||
bool "Enable legacy fbdev support for your modesetting driver"
|
||||
depends on DRM_KMS_HELPER
|
||||
depends on FB=y || FB=DRM_KMS_HELPER
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select FB_DEFERRED_IO
|
||||
select FB_SYS_FOPS
|
||||
select FB_SYS_FILLRECT
|
||||
select FB_SYS_COPYAREA
|
||||
select FB_SYS_IMAGEBLIT
|
||||
select FRAMEBUFFER_CONSOLE if !EXPERT
|
||||
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
|
||||
default y
|
||||
@ -223,6 +216,7 @@ config DRM_TTM_HELPER
|
||||
config DRM_GEM_DMA_HELPER
|
||||
tristate
|
||||
depends on DRM
|
||||
select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
|
||||
help
|
||||
Choose this if you need the GEM DMA helper functions
|
||||
|
||||
@ -295,9 +289,7 @@ source "drivers/gpu/drm/armada/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/rcar-du/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/shmobile/Kconfig"
|
||||
source "drivers/gpu/drm/renesas/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/sun4i/Kconfig"
|
||||
|
||||
|
||||
@ -156,8 +156,7 @@ obj-$(CONFIG_DRM_UDL) += udl/
|
||||
obj-$(CONFIG_DRM_AST) += ast/
|
||||
obj-$(CONFIG_DRM_ARMADA) += armada/
|
||||
obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/
|
||||
obj-y += rcar-du/
|
||||
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
|
||||
obj-y += renesas/
|
||||
obj-y += omapdrm/
|
||||
obj-$(CONFIG_DRM_SUN4I) += sun4i/
|
||||
obj-y += tilcdc/
|
||||
|
||||
@ -2747,7 +2747,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
|
||||
.compat_ioctl = amdgpu_kms_compat_ioctl,
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show_fdinfo = amdgpu_show_fdinfo
|
||||
.show_fdinfo = drm_show_fdinfo,
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -2802,6 +2802,9 @@ static const struct drm_driver amdgpu_kms_driver = {
|
||||
.dumb_map_offset = amdgpu_mode_dumb_mmap,
|
||||
.fops = &amdgpu_driver_kms_fops,
|
||||
.release = &amdgpu_driver_release_kms,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show_fdinfo = amdgpu_show_fdinfo,
|
||||
#endif
|
||||
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
|
||||
@ -53,9 +53,8 @@ static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = {
|
||||
[AMDGPU_HW_IP_VCN_JPEG] = "jpeg",
|
||||
};
|
||||
|
||||
void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
|
||||
void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
|
||||
{
|
||||
struct drm_file *file = f->private_data;
|
||||
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
|
||||
struct amdgpu_fpriv *fpriv = file->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
@ -87,31 +86,30 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
|
||||
* ******************************************************************
|
||||
*/
|
||||
|
||||
seq_printf(m, "pasid:\t%u\n", fpriv->vm.pasid);
|
||||
seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name);
|
||||
seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
|
||||
seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
|
||||
seq_printf(m, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
|
||||
seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
|
||||
seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
|
||||
seq_printf(m, "amd-memory-visible-vram:\t%llu KiB\n",
|
||||
drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
|
||||
drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name);
|
||||
drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
|
||||
drm_printf(p, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
|
||||
drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
|
||||
drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
|
||||
drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
|
||||
drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n",
|
||||
stats.visible_vram/1024UL);
|
||||
seq_printf(m, "amd-evicted-vram:\t%llu KiB\n",
|
||||
drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
|
||||
stats.evicted_vram/1024UL);
|
||||
seq_printf(m, "amd-evicted-visible-vram:\t%llu KiB\n",
|
||||
drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n",
|
||||
stats.evicted_visible_vram/1024UL);
|
||||
seq_printf(m, "amd-requested-vram:\t%llu KiB\n",
|
||||
drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
|
||||
stats.requested_vram/1024UL);
|
||||
seq_printf(m, "amd-requested-visible-vram:\t%llu KiB\n",
|
||||
drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n",
|
||||
stats.requested_visible_vram/1024UL);
|
||||
seq_printf(m, "amd-requested-gtt:\t%llu KiB\n",
|
||||
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
|
||||
stats.requested_gtt/1024UL);
|
||||
|
||||
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
|
||||
if (!usage[hw_ip])
|
||||
continue;
|
||||
|
||||
seq_printf(m, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip],
|
||||
drm_printf(p, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip],
|
||||
ktime_to_ns(usage[hw_ip]));
|
||||
}
|
||||
}
|
||||
|
||||
@ -37,6 +37,6 @@
|
||||
#include "amdgpu_ids.h"
|
||||
|
||||
uint32_t amdgpu_get_ip_count(struct amdgpu_device *adev, int id);
|
||||
void amdgpu_show_fdinfo(struct seq_file *m, struct file *f);
|
||||
void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file);
|
||||
|
||||
#endif
|
||||
|
||||
@ -3,6 +3,7 @@ config DRM_ARMADA
|
||||
tristate "DRM support for Marvell Armada SoCs"
|
||||
depends on DRM && HAVE_CLK && ARM && MMU
|
||||
select DRM_KMS_HELPER
|
||||
select FB_IO_HELPERS if DRM_FBDEV_EMULATION
|
||||
help
|
||||
Support the "LCD" controllers found on the Marvell Armada 510
|
||||
devices. There are two controllers on the device, each controller
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
@ -33,12 +34,8 @@ static void armada_fbdev_fb_destroy(struct fb_info *info)
|
||||
|
||||
static const struct fb_ops armada_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
FB_DEFAULT_IO_OPS,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_read = drm_fb_helper_cfb_read,
|
||||
.fb_write = drm_fb_helper_cfb_write,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
.fb_destroy = armada_fbdev_fb_destroy,
|
||||
};
|
||||
|
||||
|
||||
@ -227,6 +227,7 @@ config DRM_SAMSUNG_DSIM
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_MIPI_DSI
|
||||
select DRM_PANEL_BRIDGE
|
||||
select GENERIC_PHY_MIPI_DPHY
|
||||
help
|
||||
The Samsung MIPI DSIM bridge controller driver.
|
||||
This MIPI DSIM bridge can be found it on Exynos SoCs and
|
||||
|
||||
@ -1393,7 +1393,7 @@ static struct i2c_driver adv7511_driver = {
|
||||
.of_match_table = adv7511_of_ids,
|
||||
},
|
||||
.id_table = adv7511_i2c_ids,
|
||||
.probe_new = adv7511_probe,
|
||||
.probe = adv7511_probe,
|
||||
.remove = adv7511_remove,
|
||||
};
|
||||
|
||||
|
||||
@ -815,7 +815,7 @@ static struct i2c_driver anx6345_driver = {
|
||||
.name = "anx6345",
|
||||
.of_match_table = of_match_ptr(anx6345_match_table),
|
||||
},
|
||||
.probe_new = anx6345_i2c_probe,
|
||||
.probe = anx6345_i2c_probe,
|
||||
.remove = anx6345_i2c_remove,
|
||||
.id_table = anx6345_id,
|
||||
};
|
||||
|
||||
@ -1389,7 +1389,7 @@ static struct i2c_driver anx78xx_driver = {
|
||||
.name = "anx7814",
|
||||
.of_match_table = of_match_ptr(anx78xx_match_table),
|
||||
},
|
||||
.probe_new = anx78xx_i2c_probe,
|
||||
.probe = anx78xx_i2c_probe,
|
||||
.remove = anx78xx_i2c_remove,
|
||||
.id_table = anx78xx_id,
|
||||
};
|
||||
|
||||
@ -2800,7 +2800,7 @@ static struct i2c_driver anx7625_driver = {
|
||||
.of_match_table = anx_match_table,
|
||||
.pm = &anx7625_pm_ops,
|
||||
},
|
||||
.probe_new = anx7625_i2c_probe,
|
||||
.probe = anx7625_i2c_probe,
|
||||
.remove = anx7625_i2c_remove,
|
||||
|
||||
.id_table = anx7625_id,
|
||||
|
||||
@ -795,7 +795,7 @@ static struct i2c_device_id chipone_i2c_id[] = {
|
||||
MODULE_DEVICE_TABLE(i2c, chipone_i2c_id);
|
||||
|
||||
static struct i2c_driver chipone_i2c_driver = {
|
||||
.probe_new = chipone_i2c_probe,
|
||||
.probe = chipone_i2c_probe,
|
||||
.id_table = chipone_i2c_id,
|
||||
.driver = {
|
||||
.name = "chipone-icn6211-i2c",
|
||||
|
||||
@ -603,7 +603,7 @@ static const struct i2c_device_id ch7033_ids[] = {
|
||||
MODULE_DEVICE_TABLE(i2c, ch7033_ids);
|
||||
|
||||
static struct i2c_driver ch7033_driver = {
|
||||
.probe_new = ch7033_probe,
|
||||
.probe = ch7033_probe,
|
||||
.remove = ch7033_remove,
|
||||
.driver = {
|
||||
.name = "ch7033",
|
||||
|
||||
@ -173,7 +173,7 @@ static const struct of_device_id cros_ec_anx7688_bridge_match_table[] = {
|
||||
MODULE_DEVICE_TABLE(of, cros_ec_anx7688_bridge_match_table);
|
||||
|
||||
static struct i2c_driver cros_ec_anx7688_bridge_driver = {
|
||||
.probe_new = cros_ec_anx7688_bridge_probe,
|
||||
.probe = cros_ec_anx7688_bridge_probe,
|
||||
.remove = cros_ec_anx7688_bridge_remove,
|
||||
.driver = {
|
||||
.name = "cros-ec-anx7688-bridge",
|
||||
|
||||
@ -24,7 +24,7 @@ struct display_connector {
|
||||
struct gpio_desc *hpd_gpio;
|
||||
int hpd_irq;
|
||||
|
||||
struct regulator *dp_pwr;
|
||||
struct regulator *supply;
|
||||
struct gpio_desc *ddc_en;
|
||||
};
|
||||
|
||||
@ -191,6 +191,18 @@ static irqreturn_t display_connector_hpd_irq(int irq, void *arg)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int display_connector_get_supply(struct platform_device *pdev,
|
||||
struct display_connector *conn,
|
||||
const char *name)
|
||||
{
|
||||
conn->supply = devm_regulator_get_optional(&pdev->dev, name);
|
||||
|
||||
if (conn->supply == ERR_PTR(-ENODEV))
|
||||
conn->supply = NULL;
|
||||
|
||||
return PTR_ERR_OR_ZERO(conn->supply);
|
||||
}
|
||||
|
||||
static int display_connector_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct display_connector *conn;
|
||||
@ -316,36 +328,15 @@ static int display_connector_probe(struct platform_device *pdev)
|
||||
if (type == DRM_MODE_CONNECTOR_DisplayPort) {
|
||||
int ret;
|
||||
|
||||
conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr");
|
||||
|
||||
if (IS_ERR(conn->dp_pwr)) {
|
||||
ret = PTR_ERR(conn->dp_pwr);
|
||||
|
||||
switch (ret) {
|
||||
case -ENODEV:
|
||||
conn->dp_pwr = NULL;
|
||||
break;
|
||||
|
||||
case -EPROBE_DEFER:
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
default:
|
||||
dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (conn->dp_pwr) {
|
||||
ret = regulator_enable(conn->dp_pwr);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ret = display_connector_get_supply(pdev, conn, "dp-pwr");
|
||||
if (ret < 0)
|
||||
return dev_err_probe(&pdev->dev, ret, "failed to get DP PWR regulator\n");
|
||||
}
|
||||
|
||||
/* enable DDC */
|
||||
if (type == DRM_MODE_CONNECTOR_HDMIA) {
|
||||
int ret;
|
||||
|
||||
conn->ddc_en = devm_gpiod_get_optional(&pdev->dev, "ddc-en",
|
||||
GPIOD_OUT_HIGH);
|
||||
|
||||
@ -353,6 +344,18 @@ static int display_connector_probe(struct platform_device *pdev)
|
||||
dev_err(&pdev->dev, "Couldn't get ddc-en gpio\n");
|
||||
return PTR_ERR(conn->ddc_en);
|
||||
}
|
||||
|
||||
ret = display_connector_get_supply(pdev, conn, "hdmi-pwr");
|
||||
if (ret < 0)
|
||||
return dev_err_probe(&pdev->dev, ret, "failed to get HDMI +5V Power regulator\n");
|
||||
}
|
||||
|
||||
if (conn->supply) {
|
||||
ret = regulator_enable(conn->supply);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to enable PWR regulator: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
conn->bridge.funcs = &display_connector_bridge_funcs;
|
||||
@ -386,8 +389,8 @@ static void display_connector_remove(struct platform_device *pdev)
|
||||
if (conn->ddc_en)
|
||||
gpiod_set_value(conn->ddc_en, 0);
|
||||
|
||||
if (conn->dp_pwr)
|
||||
regulator_disable(conn->dp_pwr);
|
||||
if (conn->supply)
|
||||
regulator_disable(conn->supply);
|
||||
|
||||
drm_bridge_remove(&conn->bridge);
|
||||
|
||||
|
||||
@ -1,9 +1,13 @@
|
||||
if ARCH_MXC || COMPILE_TEST
|
||||
|
||||
config DRM_IMX_LDB_HELPER
|
||||
tristate
|
||||
|
||||
config DRM_IMX8QM_LDB
|
||||
tristate "Freescale i.MX8QM LVDS display bridge"
|
||||
depends on OF
|
||||
depends on COMMON_CLK
|
||||
select DRM_IMX_LDB_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
Choose this to enable the internal LVDS Display Bridge(LDB) found in
|
||||
@ -13,6 +17,7 @@ config DRM_IMX8QXP_LDB
|
||||
tristate "Freescale i.MX8QXP LVDS display bridge"
|
||||
depends on OF
|
||||
depends on COMMON_CLK
|
||||
select DRM_IMX_LDB_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
Choose this to enable the internal LVDS Display Bridge(LDB) found in
|
||||
|
||||
@ -1,9 +1,6 @@
|
||||
imx8qm-ldb-objs := imx-ldb-helper.o imx8qm-ldb-drv.o
|
||||
obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o
|
||||
obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
|
||||
|
||||
imx8qxp-ldb-objs := imx-ldb-helper.o imx8qxp-ldb-drv.o
|
||||
obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
|
||||
|
||||
obj-$(CONFIG_DRM_IMX8QXP_PIXEL_COMBINER) += imx8qxp-pixel-combiner.o
|
||||
obj-$(CONFIG_DRM_IMX8QXP_PIXEL_LINK) += imx8qxp-pixel-link.o
|
||||
obj-$(CONFIG_DRM_IMX8QXP_PIXEL_LINK_TO_DPI) += imx8qxp-pxl2dpi.o
|
||||
|
||||
@ -4,8 +4,10 @@
|
||||
* Copyright 2019,2020,2022 NXP
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/media-bus-format.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
@ -19,12 +21,14 @@ bool ldb_channel_is_single_link(struct ldb_channel *ldb_ch)
|
||||
{
|
||||
return ldb_ch->link_type == LDB_CH_SINGLE_LINK;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_channel_is_single_link);
|
||||
|
||||
bool ldb_channel_is_split_link(struct ldb_channel *ldb_ch)
|
||||
{
|
||||
return ldb_ch->link_type == LDB_CH_DUAL_LINK_EVEN_ODD_PIXELS ||
|
||||
ldb_ch->link_type == LDB_CH_DUAL_LINK_ODD_EVEN_PIXELS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_channel_is_split_link);
|
||||
|
||||
int ldb_bridge_atomic_check_helper(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *bridge_state,
|
||||
@ -38,6 +42,7 @@ int ldb_bridge_atomic_check_helper(struct drm_bridge *bridge,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_bridge_atomic_check_helper);
|
||||
|
||||
void ldb_bridge_mode_set_helper(struct drm_bridge *bridge,
|
||||
const struct drm_display_mode *mode,
|
||||
@ -69,6 +74,7 @@ void ldb_bridge_mode_set_helper(struct drm_bridge *bridge,
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_bridge_mode_set_helper);
|
||||
|
||||
void ldb_bridge_enable_helper(struct drm_bridge *bridge)
|
||||
{
|
||||
@ -81,6 +87,7 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge)
|
||||
*/
|
||||
regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_bridge_enable_helper);
|
||||
|
||||
void ldb_bridge_disable_helper(struct drm_bridge *bridge)
|
||||
{
|
||||
@ -95,6 +102,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge)
|
||||
|
||||
regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper);
|
||||
|
||||
int ldb_bridge_attach_helper(struct drm_bridge *bridge,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
@ -117,6 +125,7 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
|
||||
ldb_ch->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper);
|
||||
|
||||
int ldb_init_helper(struct ldb *ldb)
|
||||
{
|
||||
@ -157,6 +166,7 @@ int ldb_init_helper(struct ldb *ldb)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_init_helper);
|
||||
|
||||
int ldb_find_next_bridge_helper(struct ldb *ldb)
|
||||
{
|
||||
@ -184,6 +194,7 @@ int ldb_find_next_bridge_helper(struct ldb *ldb)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper);
|
||||
|
||||
void ldb_add_bridge_helper(struct ldb *ldb,
|
||||
const struct drm_bridge_funcs *bridge_funcs)
|
||||
@ -204,6 +215,7 @@ void ldb_add_bridge_helper(struct ldb *ldb,
|
||||
drm_bridge_add(&ldb_ch->bridge);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_add_bridge_helper);
|
||||
|
||||
void ldb_remove_bridge_helper(struct ldb *ldb)
|
||||
{
|
||||
@ -219,3 +231,8 @@ void ldb_remove_bridge_helper(struct ldb *ldb)
|
||||
drm_bridge_remove(&ldb_ch->bridge);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_remove_bridge_helper);
|
||||
|
||||
MODULE_DESCRIPTION("i.MX8 LVDS Display Bridge(LDB)/Pixel Mapper bridge helper");
|
||||
MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@ -3479,7 +3479,7 @@ static struct i2c_driver it6505_i2c_driver = {
|
||||
.of_match_table = it6505_of_match,
|
||||
.pm = &it6505_bridge_pm_ops,
|
||||
},
|
||||
.probe_new = it6505_i2c_probe,
|
||||
.probe = it6505_i2c_probe,
|
||||
.remove = it6505_i2c_remove,
|
||||
.shutdown = it6505_shutdown,
|
||||
.id_table = it6505_id,
|
||||
|
||||
@ -1640,7 +1640,7 @@ static struct i2c_driver it66121_driver = {
|
||||
.name = "it66121",
|
||||
.of_match_table = it66121_dt_match,
|
||||
},
|
||||
.probe_new = it66121_probe,
|
||||
.probe = it66121_probe,
|
||||
.remove = it66121_remove,
|
||||
.id_table = it66121_id,
|
||||
};
|
||||
|
||||
@ -773,7 +773,7 @@ static struct i2c_driver lt8912_i2c_driver = {
|
||||
.name = "lt8912",
|
||||
.of_match_table = lt8912_dt_match,
|
||||
},
|
||||
.probe_new = lt8912_probe,
|
||||
.probe = lt8912_probe,
|
||||
.remove = lt8912_remove,
|
||||
.id_table = lt8912_id,
|
||||
};
|
||||
|
||||
@ -787,7 +787,7 @@ static const struct of_device_id lt9211_match_table[] = {
|
||||
MODULE_DEVICE_TABLE(of, lt9211_match_table);
|
||||
|
||||
static struct i2c_driver lt9211_driver = {
|
||||
.probe_new = lt9211_probe,
|
||||
.probe = lt9211_probe,
|
||||
.remove = lt9211_remove,
|
||||
.id_table = lt9211_id,
|
||||
.driver = {
|
||||
|
||||
@ -1192,7 +1192,7 @@ static struct i2c_driver lt9611_driver = {
|
||||
.name = "lt9611",
|
||||
.of_match_table = lt9611_match_table,
|
||||
},
|
||||
.probe_new = lt9611_probe,
|
||||
.probe = lt9611_probe,
|
||||
.remove = lt9611_remove,
|
||||
.id_table = lt9611_id,
|
||||
};
|
||||
|
||||
@ -1011,7 +1011,7 @@ static struct i2c_driver lt9611uxc_driver = {
|
||||
.of_match_table = lt9611uxc_match_table,
|
||||
.dev_groups = lt9611uxc_attr_groups,
|
||||
},
|
||||
.probe_new = lt9611uxc_probe,
|
||||
.probe = lt9611uxc_probe,
|
||||
.remove = lt9611uxc_remove,
|
||||
.id_table = lt9611uxc_id,
|
||||
};
|
||||
|
||||
@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(of, stdp4028_ge_b850v3_fw_match);
|
||||
|
||||
static struct i2c_driver stdp4028_ge_b850v3_fw_driver = {
|
||||
.id_table = stdp4028_ge_b850v3_fw_i2c_table,
|
||||
.probe_new = stdp4028_ge_b850v3_fw_probe,
|
||||
.probe = stdp4028_ge_b850v3_fw_probe,
|
||||
.remove = stdp4028_ge_b850v3_fw_remove,
|
||||
.driver = {
|
||||
.name = "stdp4028-ge-b850v3-fw",
|
||||
@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, stdp2690_ge_b850v3_fw_match);
|
||||
|
||||
static struct i2c_driver stdp2690_ge_b850v3_fw_driver = {
|
||||
.id_table = stdp2690_ge_b850v3_fw_i2c_table,
|
||||
.probe_new = stdp2690_ge_b850v3_fw_probe,
|
||||
.probe = stdp2690_ge_b850v3_fw_probe,
|
||||
.remove = stdp2690_ge_b850v3_fw_remove,
|
||||
.driver = {
|
||||
.name = "stdp2690-ge-b850v3-fw",
|
||||
|
||||
@ -335,7 +335,7 @@ MODULE_DEVICE_TABLE(of, ptn3460_match);
|
||||
|
||||
static struct i2c_driver ptn3460_driver = {
|
||||
.id_table = ptn3460_i2c_table,
|
||||
.probe_new = ptn3460_probe,
|
||||
.probe = ptn3460_probe,
|
||||
.remove = ptn3460_remove,
|
||||
.driver = {
|
||||
.name = "nxp,ptn3460",
|
||||
|
||||
@ -538,7 +538,7 @@ MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table);
|
||||
|
||||
static struct i2c_driver ps8622_driver = {
|
||||
.id_table = ps8622_i2c_table,
|
||||
.probe_new = ps8622_probe,
|
||||
.probe = ps8622_probe,
|
||||
.remove = ps8622_remove,
|
||||
.driver = {
|
||||
.name = "ps8622",
|
||||
|
||||
@ -791,7 +791,7 @@ static const struct of_device_id ps8640_match[] = {
|
||||
MODULE_DEVICE_TABLE(of, ps8640_match);
|
||||
|
||||
static struct i2c_driver ps8640_driver = {
|
||||
.probe_new = ps8640_probe,
|
||||
.probe = ps8640_probe,
|
||||
.remove = ps8640_remove,
|
||||
.driver = {
|
||||
.name = "ps8640",
|
||||
|
||||
@ -220,6 +220,8 @@
|
||||
|
||||
#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
|
||||
|
||||
#define PS_TO_CYCLE(ps, hz) DIV64_U64_ROUND_CLOSEST(((ps) * (hz)), 1000000000000ULL)
|
||||
|
||||
static const char *const clk_names[5] = {
|
||||
"bus_clk",
|
||||
"sclk_mipi",
|
||||
@ -407,6 +409,9 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
|
||||
.num_bits_resol = 11,
|
||||
.pll_p_offset = 13,
|
||||
.reg_values = reg_values,
|
||||
.m_min = 41,
|
||||
.m_max = 125,
|
||||
.min_freq = 500,
|
||||
};
|
||||
|
||||
static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
|
||||
@ -420,6 +425,9 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
|
||||
.num_bits_resol = 11,
|
||||
.pll_p_offset = 13,
|
||||
.reg_values = reg_values,
|
||||
.m_min = 41,
|
||||
.m_max = 125,
|
||||
.min_freq = 500,
|
||||
};
|
||||
|
||||
static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
|
||||
@ -431,6 +439,9 @@ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
|
||||
.num_bits_resol = 11,
|
||||
.pll_p_offset = 13,
|
||||
.reg_values = reg_values,
|
||||
.m_min = 41,
|
||||
.m_max = 125,
|
||||
.min_freq = 500,
|
||||
};
|
||||
|
||||
static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
|
||||
@ -443,6 +454,9 @@ static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
|
||||
.num_bits_resol = 12,
|
||||
.pll_p_offset = 13,
|
||||
.reg_values = exynos5433_reg_values,
|
||||
.m_min = 41,
|
||||
.m_max = 125,
|
||||
.min_freq = 500,
|
||||
};
|
||||
|
||||
static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
|
||||
@ -455,6 +469,9 @@ static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
|
||||
.num_bits_resol = 12,
|
||||
.pll_p_offset = 13,
|
||||
.reg_values = exynos5422_reg_values,
|
||||
.m_min = 41,
|
||||
.m_max = 125,
|
||||
.min_freq = 500,
|
||||
};
|
||||
|
||||
static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
|
||||
@ -471,6 +488,9 @@ static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
|
||||
*/
|
||||
.pll_p_offset = 14,
|
||||
.reg_values = imx8mm_dsim_reg_values,
|
||||
.m_min = 64,
|
||||
.m_max = 1023,
|
||||
.min_freq = 1050,
|
||||
};
|
||||
|
||||
static const struct samsung_dsim_driver_data *
|
||||
@ -549,12 +569,12 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
|
||||
tmp = (u64)fout * (_p << _s);
|
||||
do_div(tmp, fin);
|
||||
_m = tmp;
|
||||
if (_m < 41 || _m > 125)
|
||||
if (_m < driver_data->m_min || _m > driver_data->m_max)
|
||||
continue;
|
||||
|
||||
tmp = (u64)_m * fin;
|
||||
do_div(tmp, _p);
|
||||
if (tmp < 500 * MHZ ||
|
||||
if (tmp < driver_data->min_freq * MHZ ||
|
||||
tmp > driver_data->max_freq * MHZ)
|
||||
continue;
|
||||
|
||||
@ -640,16 +660,28 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
|
||||
reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
|
||||
} while ((reg & DSIM_PLL_STABLE) == 0);
|
||||
|
||||
dsi->hs_clock = fout;
|
||||
|
||||
return fout;
|
||||
}
|
||||
|
||||
static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
|
||||
{
|
||||
unsigned long hs_clk, byte_clk, esc_clk;
|
||||
unsigned long hs_clk, byte_clk, esc_clk, pix_clk;
|
||||
unsigned long esc_div;
|
||||
u32 reg;
|
||||
struct drm_display_mode *m = &dsi->mode;
|
||||
int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
|
||||
|
||||
/* m->clock is in KHz */
|
||||
pix_clk = m->clock * 1000;
|
||||
|
||||
/* Use burst_clk_rate if available, otherwise use the pix_clk */
|
||||
if (dsi->burst_clk_rate)
|
||||
hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate);
|
||||
else
|
||||
hs_clk = samsung_dsim_set_pll(dsi, DIV_ROUND_UP(pix_clk * bpp, dsi->lanes));
|
||||
|
||||
hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate);
|
||||
if (!hs_clk) {
|
||||
dev_err(dsi->dev, "failed to configure DSI PLL\n");
|
||||
return -EFAULT;
|
||||
@ -687,13 +719,47 @@ static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
|
||||
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
|
||||
const unsigned int *reg_values = driver_data->reg_values;
|
||||
u32 reg;
|
||||
struct phy_configure_opts_mipi_dphy cfg;
|
||||
int clk_prepare, lpx, clk_zero, clk_post, clk_trail;
|
||||
int hs_exit, hs_prepare, hs_zero, hs_trail;
|
||||
unsigned long long byte_clock = dsi->hs_clock / 8;
|
||||
|
||||
if (driver_data->has_freqband)
|
||||
return;
|
||||
|
||||
phy_mipi_dphy_get_default_config_for_hsclk(dsi->hs_clock,
|
||||
dsi->lanes, &cfg);
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* The tech Applications Processor manuals for i.MX8M Mini, Nano,
|
||||
* and Plus don't state what the definition of the PHYTIMING
|
||||
* bits are beyond their address and bit position.
|
||||
* After reviewing NXP's downstream code, it appears
|
||||
* that the various PHYTIMING registers take the number
|
||||
* of cycles and use various dividers on them. This
|
||||
* calculation does not result in an exact match to the
|
||||
* downstream code, but it is very close to the values
|
||||
* generated by their lookup table, and it appears
|
||||
* to sync at a variety of resolutions. If someone
|
||||
* can get a more accurate mathematical equation needed
|
||||
* for these registers, this should be updated.
|
||||
*/
|
||||
|
||||
lpx = PS_TO_CYCLE(cfg.lpx, byte_clock);
|
||||
hs_exit = PS_TO_CYCLE(cfg.hs_exit, byte_clock);
|
||||
clk_prepare = PS_TO_CYCLE(cfg.clk_prepare, byte_clock);
|
||||
clk_zero = PS_TO_CYCLE(cfg.clk_zero, byte_clock);
|
||||
clk_post = PS_TO_CYCLE(cfg.clk_post, byte_clock);
|
||||
clk_trail = PS_TO_CYCLE(cfg.clk_trail, byte_clock);
|
||||
hs_prepare = PS_TO_CYCLE(cfg.hs_prepare, byte_clock);
|
||||
hs_zero = PS_TO_CYCLE(cfg.hs_zero, byte_clock);
|
||||
hs_trail = PS_TO_CYCLE(cfg.hs_trail, byte_clock);
|
||||
|
||||
/* B D-PHY: D-PHY Master & Slave Analog Block control */
|
||||
reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
|
||||
reg_values[PHYCTRL_SLEW_UP];
|
||||
|
||||
samsung_dsim_write(dsi, DSIM_PHYCTRL_REG, reg);
|
||||
|
||||
/*
|
||||
@ -701,7 +767,9 @@ static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
|
||||
* T HS-EXIT: Time that the transmitter drives LP-11 following a HS
|
||||
* burst
|
||||
*/
|
||||
reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
|
||||
|
||||
reg = DSIM_PHYTIMING_LPX(lpx) | DSIM_PHYTIMING_HS_EXIT(hs_exit);
|
||||
|
||||
samsung_dsim_write(dsi, DSIM_PHYTIMING_REG, reg);
|
||||
|
||||
/*
|
||||
@ -717,10 +785,11 @@ static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
|
||||
* T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
|
||||
* the last payload clock bit of a HS transmission burst
|
||||
*/
|
||||
reg = reg_values[PHYTIMING_CLK_PREPARE] |
|
||||
reg_values[PHYTIMING_CLK_ZERO] |
|
||||
reg_values[PHYTIMING_CLK_POST] |
|
||||
reg_values[PHYTIMING_CLK_TRAIL];
|
||||
|
||||
reg = DSIM_PHYTIMING1_CLK_PREPARE(clk_prepare) |
|
||||
DSIM_PHYTIMING1_CLK_ZERO(clk_zero) |
|
||||
DSIM_PHYTIMING1_CLK_POST(clk_post) |
|
||||
DSIM_PHYTIMING1_CLK_TRAIL(clk_trail);
|
||||
|
||||
samsung_dsim_write(dsi, DSIM_PHYTIMING1_REG, reg);
|
||||
|
||||
@ -733,8 +802,11 @@ static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
|
||||
* T HS-TRAIL: Time that the transmitter drives the flipped differential
|
||||
* state after last payload data bit of a HS transmission burst
|
||||
*/
|
||||
reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
|
||||
reg_values[PHYTIMING_HS_TRAIL];
|
||||
|
||||
reg = DSIM_PHYTIMING2_HS_PREPARE(hs_prepare) |
|
||||
DSIM_PHYTIMING2_HS_ZERO(hs_zero) |
|
||||
DSIM_PHYTIMING2_HS_TRAIL(hs_trail);
|
||||
|
||||
samsung_dsim_write(dsi, DSIM_PHYTIMING2_REG, reg);
|
||||
}
|
||||
|
||||
@ -866,6 +938,10 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
|
||||
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
|
||||
reg &= ~DSIM_STOP_STATE_CNT_MASK;
|
||||
reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
|
||||
|
||||
if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
|
||||
reg |= DSIM_FORCE_STOP_STATE;
|
||||
|
||||
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
|
||||
|
||||
reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
|
||||
@ -881,17 +957,29 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
|
||||
u32 reg;
|
||||
|
||||
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
|
||||
int byte_clk_khz = dsi->hs_clock / 1000 / 8;
|
||||
int hfp = (m->hsync_start - m->hdisplay) * byte_clk_khz / m->clock;
|
||||
int hbp = (m->htotal - m->hsync_end) * byte_clk_khz / m->clock;
|
||||
int hsa = (m->hsync_end - m->hsync_start) * byte_clk_khz / m->clock;
|
||||
|
||||
/* remove packet overhead when possible */
|
||||
hfp = max(hfp - 6, 0);
|
||||
hbp = max(hbp - 6, 0);
|
||||
hsa = max(hsa - 6, 0);
|
||||
|
||||
dev_dbg(dsi->dev, "calculated hfp: %u, hbp: %u, hsa: %u",
|
||||
hfp, hbp, hsa);
|
||||
|
||||
reg = DSIM_CMD_ALLOW(0xf)
|
||||
| DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
|
||||
| DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
|
||||
samsung_dsim_write(dsi, DSIM_MVPORCH_REG, reg);
|
||||
|
||||
reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
|
||||
| DSIM_MAIN_HBP(m->htotal - m->hsync_end);
|
||||
reg = DSIM_MAIN_HFP(hfp) | DSIM_MAIN_HBP(hbp);
|
||||
samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg);
|
||||
|
||||
reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
|
||||
| DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
|
||||
| DSIM_MAIN_HSA(hsa);
|
||||
samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg);
|
||||
}
|
||||
reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
|
||||
@ -1347,6 +1435,9 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
|
||||
ret = samsung_dsim_init(dsi);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
samsung_dsim_set_display_mode(dsi);
|
||||
samsung_dsim_set_display_enable(dsi, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1354,9 +1445,16 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *old_bridge_state)
|
||||
{
|
||||
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
|
||||
u32 reg;
|
||||
|
||||
samsung_dsim_set_display_mode(dsi);
|
||||
samsung_dsim_set_display_enable(dsi, true);
|
||||
if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
|
||||
samsung_dsim_set_display_mode(dsi);
|
||||
samsung_dsim_set_display_enable(dsi, true);
|
||||
} else {
|
||||
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
|
||||
reg &= ~DSIM_FORCE_STOP_STATE;
|
||||
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
|
||||
}
|
||||
|
||||
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
|
||||
}
|
||||
@ -1365,10 +1463,17 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *old_bridge_state)
|
||||
{
|
||||
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
|
||||
u32 reg;
|
||||
|
||||
if (!(dsi->state & DSIM_STATE_ENABLED))
|
||||
return;
|
||||
|
||||
if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
|
||||
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
|
||||
reg |= DSIM_FORCE_STOP_STATE;
|
||||
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
|
||||
}
|
||||
|
||||
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
|
||||
}
|
||||
|
||||
@ -1689,11 +1794,11 @@ static const struct mipi_dsi_host_ops samsung_dsim_ops = {
|
||||
};
|
||||
|
||||
static int samsung_dsim_of_read_u32(const struct device_node *np,
|
||||
const char *propname, u32 *out_value)
|
||||
const char *propname, u32 *out_value, bool optional)
|
||||
{
|
||||
int ret = of_property_read_u32(np, propname, out_value);
|
||||
|
||||
if (ret < 0)
|
||||
if (ret < 0 && !optional)
|
||||
pr_err("%pOF: failed to get '%s' property\n", np, propname);
|
||||
|
||||
return ret;
|
||||
@ -1706,19 +1811,30 @@ static int samsung_dsim_parse_dt(struct samsung_dsim *dsi)
|
||||
u32 lane_polarities[5] = { 0 };
|
||||
struct device_node *endpoint;
|
||||
int i, nr_lanes, ret;
|
||||
struct clk *pll_clk;
|
||||
|
||||
ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency",
|
||||
&dsi->pll_clk_rate);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
&dsi->pll_clk_rate, 1);
|
||||
/* If it doesn't exist, read it from the clock instead of failing */
|
||||
if (ret < 0) {
|
||||
dev_dbg(dev, "Using sclk_mipi for pll clock frequency\n");
|
||||
pll_clk = devm_clk_get(dev, "sclk_mipi");
|
||||
if (!IS_ERR(pll_clk))
|
||||
dsi->pll_clk_rate = clk_get_rate(pll_clk);
|
||||
else
|
||||
return PTR_ERR(pll_clk);
|
||||
}
|
||||
|
||||
/* If it doesn't exist, use pixel clock instead of failing */
|
||||
ret = samsung_dsim_of_read_u32(node, "samsung,burst-clock-frequency",
|
||||
&dsi->burst_clk_rate);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
&dsi->burst_clk_rate, 1);
|
||||
if (ret < 0) {
|
||||
dev_dbg(dev, "Using pixel clock for HS clock frequency\n");
|
||||
dsi->burst_clk_rate = 0;
|
||||
}
|
||||
|
||||
ret = samsung_dsim_of_read_u32(node, "samsung,esc-clock-frequency",
|
||||
&dsi->esc_clk_rate);
|
||||
&dsi->esc_clk_rate, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
||||
@ -1151,7 +1151,7 @@ static const struct i2c_device_id sii902x_i2c_ids[] = {
|
||||
MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
|
||||
|
||||
static struct i2c_driver sii902x_driver = {
|
||||
.probe_new = sii902x_probe,
|
||||
.probe = sii902x_probe,
|
||||
.remove = sii902x_remove,
|
||||
.driver = {
|
||||
.name = "sii902x",
|
||||
|
||||
@ -955,7 +955,7 @@ static struct i2c_driver sii9234_driver = {
|
||||
.name = "sii9234",
|
||||
.of_match_table = sii9234_dt_match,
|
||||
},
|
||||
.probe_new = sii9234_probe,
|
||||
.probe = sii9234_probe,
|
||||
.remove = sii9234_remove,
|
||||
.id_table = sii9234_id,
|
||||
};
|
||||
|
||||
@ -2378,7 +2378,7 @@ static struct i2c_driver sii8620_driver = {
|
||||
.name = "sii8620",
|
||||
.of_match_table = of_match_ptr(sii8620_dt_match),
|
||||
},
|
||||
.probe_new = sii8620_probe,
|
||||
.probe = sii8620_probe,
|
||||
.remove = sii8620_remove,
|
||||
.id_table = sii8620_id,
|
||||
};
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_graph.h>
|
||||
@ -63,6 +64,7 @@ struct tc358762 {
|
||||
struct drm_bridge bridge;
|
||||
struct regulator *regulator;
|
||||
struct drm_bridge *panel_bridge;
|
||||
struct gpio_desc *reset_gpio;
|
||||
bool pre_enabled;
|
||||
int error;
|
||||
};
|
||||
@ -138,6 +140,9 @@ static void tc358762_post_disable(struct drm_bridge *bridge)
|
||||
|
||||
ctx->pre_enabled = false;
|
||||
|
||||
if (ctx->reset_gpio)
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
|
||||
|
||||
ret = regulator_disable(ctx->regulator);
|
||||
if (ret < 0)
|
||||
dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
|
||||
@ -152,6 +157,11 @@ static void tc358762_pre_enable(struct drm_bridge *bridge)
|
||||
if (ret < 0)
|
||||
dev_err(ctx->dev, "error enabling regulators (%d)\n", ret);
|
||||
|
||||
if (ctx->reset_gpio) {
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
|
||||
usleep_range(5000, 10000);
|
||||
}
|
||||
|
||||
ret = tc358762_init(ctx);
|
||||
if (ret < 0)
|
||||
dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
|
||||
@ -185,6 +195,11 @@ static int tc358762_parse_dt(struct tc358762 *ctx)
|
||||
|
||||
ctx->panel_bridge = panel_bridge;
|
||||
|
||||
/* Reset GPIO is optional */
|
||||
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(ctx->reset_gpio))
|
||||
return PTR_ERR(ctx->reset_gpio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -1781,7 +1781,200 @@ static const struct drm_bridge_funcs tc_edp_bridge_funcs = {
|
||||
|
||||
static bool tc_readable_reg(struct device *dev, unsigned int reg)
|
||||
{
|
||||
return reg != SYSCTRL;
|
||||
switch (reg) {
|
||||
/* DSI D-PHY Layer */
|
||||
case 0x004:
|
||||
case 0x020:
|
||||
case 0x024:
|
||||
case 0x028:
|
||||
case 0x02c:
|
||||
case 0x030:
|
||||
case 0x038:
|
||||
case 0x040:
|
||||
case 0x044:
|
||||
case 0x048:
|
||||
case 0x04c:
|
||||
case 0x050:
|
||||
case 0x054:
|
||||
/* DSI PPI Layer */
|
||||
case PPI_STARTPPI:
|
||||
case 0x108:
|
||||
case 0x110:
|
||||
case PPI_LPTXTIMECNT:
|
||||
case PPI_LANEENABLE:
|
||||
case PPI_TX_RX_TA:
|
||||
case 0x140:
|
||||
case PPI_D0S_ATMR:
|
||||
case PPI_D1S_ATMR:
|
||||
case 0x14c:
|
||||
case 0x150:
|
||||
case PPI_D0S_CLRSIPOCOUNT:
|
||||
case PPI_D1S_CLRSIPOCOUNT:
|
||||
case PPI_D2S_CLRSIPOCOUNT:
|
||||
case PPI_D3S_CLRSIPOCOUNT:
|
||||
case 0x180:
|
||||
case 0x184:
|
||||
case 0x188:
|
||||
case 0x18c:
|
||||
case 0x190:
|
||||
case 0x1a0:
|
||||
case 0x1a4:
|
||||
case 0x1a8:
|
||||
case 0x1ac:
|
||||
case 0x1b0:
|
||||
case 0x1c0:
|
||||
case 0x1c4:
|
||||
case 0x1c8:
|
||||
case 0x1cc:
|
||||
case 0x1d0:
|
||||
case 0x1e0:
|
||||
case 0x1e4:
|
||||
case 0x1f0:
|
||||
case 0x1f4:
|
||||
/* DSI Protocol Layer */
|
||||
case DSI_STARTDSI:
|
||||
case 0x208:
|
||||
case DSI_LANEENABLE:
|
||||
case 0x214:
|
||||
case 0x218:
|
||||
case 0x220:
|
||||
case 0x224:
|
||||
case 0x228:
|
||||
case 0x230:
|
||||
/* DSI General */
|
||||
case 0x300:
|
||||
/* DSI Application Layer */
|
||||
case 0x400:
|
||||
case 0x404:
|
||||
/* DPI */
|
||||
case DPIPXLFMT:
|
||||
/* Parallel Output */
|
||||
case POCTRL:
|
||||
/* Video Path0 Configuration */
|
||||
case VPCTRL0:
|
||||
case HTIM01:
|
||||
case HTIM02:
|
||||
case VTIM01:
|
||||
case VTIM02:
|
||||
case VFUEN0:
|
||||
/* System */
|
||||
case TC_IDREG:
|
||||
case 0x504:
|
||||
case SYSSTAT:
|
||||
case SYSRSTENB:
|
||||
case SYSCTRL:
|
||||
/* I2C */
|
||||
case 0x520:
|
||||
/* GPIO */
|
||||
case GPIOM:
|
||||
case GPIOC:
|
||||
case GPIOO:
|
||||
case GPIOI:
|
||||
/* Interrupt */
|
||||
case INTCTL_G:
|
||||
case INTSTS_G:
|
||||
case 0x570:
|
||||
case 0x574:
|
||||
case INT_GP0_LCNT:
|
||||
case INT_GP1_LCNT:
|
||||
/* DisplayPort Control */
|
||||
case DP0CTL:
|
||||
/* DisplayPort Clock */
|
||||
case DP0_VIDMNGEN0:
|
||||
case DP0_VIDMNGEN1:
|
||||
case DP0_VMNGENSTATUS:
|
||||
case 0x628:
|
||||
case 0x62c:
|
||||
case 0x630:
|
||||
/* DisplayPort Main Channel */
|
||||
case DP0_SECSAMPLE:
|
||||
case DP0_VIDSYNCDELAY:
|
||||
case DP0_TOTALVAL:
|
||||
case DP0_STARTVAL:
|
||||
case DP0_ACTIVEVAL:
|
||||
case DP0_SYNCVAL:
|
||||
case DP0_MISC:
|
||||
/* DisplayPort Aux Channel */
|
||||
case DP0_AUXCFG0:
|
||||
case DP0_AUXCFG1:
|
||||
case DP0_AUXADDR:
|
||||
case 0x66c:
|
||||
case 0x670:
|
||||
case 0x674:
|
||||
case 0x678:
|
||||
case 0x67c:
|
||||
case 0x680:
|
||||
case 0x684:
|
||||
case 0x688:
|
||||
case DP0_AUXSTATUS:
|
||||
case DP0_AUXI2CADR:
|
||||
/* DisplayPort Link Training */
|
||||
case DP0_SRCCTRL:
|
||||
case DP0_LTSTAT:
|
||||
case DP0_SNKLTCHGREQ:
|
||||
case DP0_LTLOOPCTRL:
|
||||
case DP0_SNKLTCTRL:
|
||||
case 0x6e8:
|
||||
case 0x6ec:
|
||||
case 0x6f0:
|
||||
case 0x6f4:
|
||||
/* DisplayPort Audio */
|
||||
case 0x700:
|
||||
case 0x704:
|
||||
case 0x708:
|
||||
case 0x70c:
|
||||
case 0x710:
|
||||
case 0x714:
|
||||
case 0x718:
|
||||
case 0x71c:
|
||||
case 0x720:
|
||||
/* DisplayPort Source Control */
|
||||
case DP1_SRCCTRL:
|
||||
/* DisplayPort PHY */
|
||||
case DP_PHY_CTRL:
|
||||
case 0x810:
|
||||
case 0x814:
|
||||
case 0x820:
|
||||
case 0x840:
|
||||
/* I2S */
|
||||
case 0x880:
|
||||
case 0x888:
|
||||
case 0x88c:
|
||||
case 0x890:
|
||||
case 0x894:
|
||||
case 0x898:
|
||||
case 0x89c:
|
||||
case 0x8a0:
|
||||
case 0x8a4:
|
||||
case 0x8a8:
|
||||
case 0x8ac:
|
||||
case 0x8b0:
|
||||
case 0x8b4:
|
||||
/* PLL */
|
||||
case DP0_PLLCTRL:
|
||||
case DP1_PLLCTRL:
|
||||
case PXL_PLLCTRL:
|
||||
case PXL_PLLPARAM:
|
||||
case SYS_PLLPARAM:
|
||||
/* HDCP */
|
||||
case 0x980:
|
||||
case 0x984:
|
||||
case 0x988:
|
||||
case 0x98c:
|
||||
case 0x990:
|
||||
case 0x994:
|
||||
case 0x998:
|
||||
case 0x99c:
|
||||
case 0x9a0:
|
||||
case 0x9a4:
|
||||
case 0x9a8:
|
||||
case 0x9ac:
|
||||
/* Debug */
|
||||
case TSTCTL:
|
||||
case PLL_DBG:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct regmap_range tc_volatile_ranges[] = {
|
||||
@ -2209,7 +2402,7 @@ static struct i2c_driver tc358767_driver = {
|
||||
.of_match_table = tc358767_of_ids,
|
||||
},
|
||||
.id_table = tc358767_i2c_ids,
|
||||
.probe_new = tc_probe,
|
||||
.probe = tc_probe,
|
||||
.remove = tc_remove,
|
||||
};
|
||||
module_i2c_driver(tc358767_driver);
|
||||
|
||||
@ -1134,7 +1134,7 @@ static struct i2c_driver tc358768_driver = {
|
||||
.of_match_table = tc358768_of_ids,
|
||||
},
|
||||
.id_table = tc358768_i2c_ids,
|
||||
.probe_new = tc358768_i2c_probe,
|
||||
.probe = tc358768_i2c_probe,
|
||||
.remove = tc358768_i2c_remove,
|
||||
};
|
||||
module_i2c_driver(tc358768_driver);
|
||||
|
||||
@ -728,7 +728,7 @@ static struct i2c_driver tc358775_driver = {
|
||||
.of_match_table = tc358775_of_ids,
|
||||
},
|
||||
.id_table = tc358775_i2c_ids,
|
||||
.probe_new = tc_probe,
|
||||
.probe = tc_probe,
|
||||
.remove = tc_remove,
|
||||
};
|
||||
module_i2c_driver(tc358775_driver);
|
||||
|
||||
@ -400,7 +400,7 @@ static const struct of_device_id dlpc3433_match_table[] = {
|
||||
MODULE_DEVICE_TABLE(of, dlpc3433_match_table);
|
||||
|
||||
static struct i2c_driver dlpc3433_driver = {
|
||||
.probe_new = dlpc3433_probe,
|
||||
.probe = dlpc3433_probe,
|
||||
.remove = dlpc3433_remove,
|
||||
.id_table = dlpc3433_id,
|
||||
.driver = {
|
||||
|
||||
@ -321,8 +321,8 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
|
||||
return dsi_div - 1;
|
||||
}
|
||||
|
||||
static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *old_bridge_state)
|
||||
static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *old_bridge_state)
|
||||
{
|
||||
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
|
||||
struct drm_atomic_state *state = old_bridge_state->base.state;
|
||||
@ -485,11 +485,22 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
|
||||
/* Trigger reset after CSR register update. */
|
||||
regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET);
|
||||
|
||||
/* Wait for 10ms after soft reset as specified in datasheet */
|
||||
usleep_range(10000, 12000);
|
||||
}
|
||||
|
||||
static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *old_bridge_state)
|
||||
{
|
||||
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
|
||||
unsigned int pval;
|
||||
|
||||
/* Clear all errors that got asserted during initialization. */
|
||||
regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
|
||||
regmap_write(ctx->regmap, REG_IRQ_STAT, pval);
|
||||
|
||||
usleep_range(10000, 12000);
|
||||
/* Wait for 1ms and check for errors in status register */
|
||||
usleep_range(1000, 1100);
|
||||
regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
|
||||
if (pval)
|
||||
dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval);
|
||||
@ -556,6 +567,7 @@ static const struct drm_bridge_funcs sn65dsi83_funcs = {
|
||||
.attach = sn65dsi83_attach,
|
||||
.detach = sn65dsi83_detach,
|
||||
.atomic_enable = sn65dsi83_atomic_enable,
|
||||
.atomic_pre_enable = sn65dsi83_atomic_pre_enable,
|
||||
.atomic_disable = sn65dsi83_atomic_disable,
|
||||
.mode_valid = sn65dsi83_mode_valid,
|
||||
|
||||
@ -698,6 +710,7 @@ static int sn65dsi83_probe(struct i2c_client *client)
|
||||
|
||||
ctx->bridge.funcs = &sn65dsi83_funcs;
|
||||
ctx->bridge.of_node = dev->of_node;
|
||||
ctx->bridge.pre_enable_prev_first = true;
|
||||
drm_bridge_add(&ctx->bridge);
|
||||
|
||||
ret = sn65dsi83_host_attach(ctx);
|
||||
@ -735,7 +748,7 @@ static const struct of_device_id sn65dsi83_match_table[] = {
|
||||
MODULE_DEVICE_TABLE(of, sn65dsi83_match_table);
|
||||
|
||||
static struct i2c_driver sn65dsi83_driver = {
|
||||
.probe_new = sn65dsi83_probe,
|
||||
.probe = sn65dsi83_probe,
|
||||
.remove = sn65dsi83_remove,
|
||||
.id_table = sn65dsi83_id,
|
||||
.driver = {
|
||||
|
||||
@ -1970,7 +1970,7 @@ static struct i2c_driver ti_sn65dsi86_driver = {
|
||||
.of_match_table = ti_sn65dsi86_match_table,
|
||||
.pm = &ti_sn65dsi86_pm_ops,
|
||||
},
|
||||
.probe_new = ti_sn65dsi86_probe,
|
||||
.probe = ti_sn65dsi86_probe,
|
||||
.id_table = ti_sn65dsi86_id,
|
||||
};
|
||||
|
||||
|
||||
@ -408,7 +408,7 @@ static struct i2c_driver tfp410_i2c_driver = {
|
||||
.of_match_table = of_match_ptr(tfp410_match),
|
||||
},
|
||||
.id_table = tfp410_i2c_ids,
|
||||
.probe_new = tfp410_i2c_probe,
|
||||
.probe = tfp410_i2c_probe,
|
||||
.remove = tfp410_i2c_remove,
|
||||
};
|
||||
#endif /* IS_ENABLED(CONFIG_I2C) */
|
||||
|
||||
@ -670,6 +670,28 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off,
|
||||
drm_rect_init(clip, x1, y1, x2 - x1, y2 - y1);
|
||||
}
|
||||
|
||||
/* Don't use in new code. */
|
||||
void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct drm_rect damage_area;
|
||||
|
||||
drm_fb_helper_memory_range_to_clip(info, off, len, &damage_area);
|
||||
drm_fb_helper_damage(fb_helper, damage_area.x1, damage_area.y1,
|
||||
drm_rect_width(&damage_area),
|
||||
drm_rect_height(&damage_area));
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_damage_range);
|
||||
|
||||
/* Don't use in new code. */
|
||||
void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
|
||||
drm_fb_helper_damage(fb_helper, x, y, width, height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_damage_area);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
|
||||
* @info: fb_info struct pointer
|
||||
@ -714,220 +736,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_sys_read - Implements struct &fb_ops.fb_read for system memory
|
||||
* @info: fb_info struct pointer
|
||||
* @buf: userspace buffer to read from framebuffer memory
|
||||
* @count: number of bytes to read from framebuffer memory
|
||||
* @ppos: read offset within framebuffer memory
|
||||
*
|
||||
* Returns:
|
||||
* The number of bytes read on success, or an error code otherwise.
|
||||
*/
|
||||
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
return fb_sys_read(info, buf, count, ppos);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_read);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_sys_write - Implements struct &fb_ops.fb_write for system memory
|
||||
* @info: fb_info struct pointer
|
||||
* @buf: userspace buffer to write to framebuffer memory
|
||||
* @count: number of bytes to write to framebuffer memory
|
||||
* @ppos: write offset within framebuffer memory
|
||||
*
|
||||
* Returns:
|
||||
* The number of bytes written on success, or an error code otherwise.
|
||||
*/
|
||||
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
loff_t pos = *ppos;
|
||||
ssize_t ret;
|
||||
struct drm_rect damage_area;
|
||||
|
||||
ret = fb_sys_write(info, buf, count, ppos);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
if (helper->funcs->fb_dirty) {
|
||||
drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
|
||||
drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1,
|
||||
drm_rect_width(&damage_area),
|
||||
drm_rect_height(&damage_area));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_write);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_sys_fillrect - wrapper around sys_fillrect
|
||||
* @info: fbdev registered by the helper
|
||||
* @rect: info about rectangle to fill
|
||||
*
|
||||
* A wrapper around sys_fillrect implemented by fbdev core
|
||||
*/
|
||||
void drm_fb_helper_sys_fillrect(struct fb_info *info,
|
||||
const struct fb_fillrect *rect)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
|
||||
sys_fillrect(info, rect);
|
||||
|
||||
if (helper->funcs->fb_dirty)
|
||||
drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_sys_copyarea - wrapper around sys_copyarea
|
||||
* @info: fbdev registered by the helper
|
||||
* @area: info about area to copy
|
||||
*
|
||||
* A wrapper around sys_copyarea implemented by fbdev core
|
||||
*/
|
||||
void drm_fb_helper_sys_copyarea(struct fb_info *info,
|
||||
const struct fb_copyarea *area)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
|
||||
sys_copyarea(info, area);
|
||||
|
||||
if (helper->funcs->fb_dirty)
|
||||
drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_sys_imageblit - wrapper around sys_imageblit
|
||||
* @info: fbdev registered by the helper
|
||||
* @image: info about image to blit
|
||||
*
|
||||
* A wrapper around sys_imageblit implemented by fbdev core
|
||||
*/
|
||||
void drm_fb_helper_sys_imageblit(struct fb_info *info,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
|
||||
sys_imageblit(info, image);
|
||||
|
||||
if (helper->funcs->fb_dirty)
|
||||
drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_cfb_read - Implements struct &fb_ops.fb_read for I/O memory
|
||||
* @info: fb_info struct pointer
|
||||
* @buf: userspace buffer to read from framebuffer memory
|
||||
* @count: number of bytes to read from framebuffer memory
|
||||
* @ppos: read offset within framebuffer memory
|
||||
*
|
||||
* Returns:
|
||||
* The number of bytes read on success, or an error code otherwise.
|
||||
*/
|
||||
ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
return fb_io_read(info, buf, count, ppos);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_cfb_read);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_cfb_write - Implements struct &fb_ops.fb_write for I/O memory
|
||||
* @info: fb_info struct pointer
|
||||
* @buf: userspace buffer to write to framebuffer memory
|
||||
* @count: number of bytes to write to framebuffer memory
|
||||
* @ppos: write offset within framebuffer memory
|
||||
*
|
||||
* Returns:
|
||||
* The number of bytes written on success, or an error code otherwise.
|
||||
*/
|
||||
ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
loff_t pos = *ppos;
|
||||
ssize_t ret;
|
||||
struct drm_rect damage_area;
|
||||
|
||||
ret = fb_io_write(info, buf, count, ppos);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
if (helper->funcs->fb_dirty) {
|
||||
drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
|
||||
drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1,
|
||||
drm_rect_width(&damage_area),
|
||||
drm_rect_height(&damage_area));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_cfb_write);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect
|
||||
* @info: fbdev registered by the helper
|
||||
* @rect: info about rectangle to fill
|
||||
*
|
||||
* A wrapper around cfb_fillrect implemented by fbdev core
|
||||
*/
|
||||
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
|
||||
const struct fb_fillrect *rect)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
|
||||
cfb_fillrect(info, rect);
|
||||
|
||||
if (helper->funcs->fb_dirty)
|
||||
drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea
|
||||
* @info: fbdev registered by the helper
|
||||
* @area: info about area to copy
|
||||
*
|
||||
* A wrapper around cfb_copyarea implemented by fbdev core
|
||||
*/
|
||||
void drm_fb_helper_cfb_copyarea(struct fb_info *info,
|
||||
const struct fb_copyarea *area)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
|
||||
cfb_copyarea(info, area);
|
||||
|
||||
if (helper->funcs->fb_dirty)
|
||||
drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit
|
||||
* @info: fbdev registered by the helper
|
||||
* @image: info about image to blit
|
||||
*
|
||||
* A wrapper around cfb_imageblit implemented by fbdev core
|
||||
*/
|
||||
void drm_fb_helper_cfb_imageblit(struct fb_info *info,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
|
||||
cfb_imageblit(info, image);
|
||||
|
||||
if (helper->funcs->fb_dirty)
|
||||
drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_set_suspend - wrapper around fb_set_suspend
|
||||
* @fb_helper: driver-allocated fbdev helper, can be NULL
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <linux/fb.h>
|
||||
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
@ -64,14 +66,11 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_open = drm_fbdev_dma_fb_open,
|
||||
.fb_release = drm_fbdev_dma_fb_release,
|
||||
.fb_read = drm_fb_helper_sys_read,
|
||||
.fb_write = drm_fb_helper_sys_write,
|
||||
__FB_DEFAULT_SYS_OPS_RDWR,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_fillrect = drm_fb_helper_sys_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_sys_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_sys_imageblit,
|
||||
.fb_destroy = drm_fbdev_dma_fb_destroy,
|
||||
__FB_DEFAULT_SYS_OPS_DRAW,
|
||||
.fb_mmap = drm_fbdev_dma_fb_mmap,
|
||||
.fb_destroy = drm_fbdev_dma_fb_destroy,
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@ -34,6 +34,10 @@ static int drm_fbdev_generic_fb_release(struct fb_info *info, int user)
|
||||
return 0;
|
||||
}
|
||||
|
||||
FB_GEN_DEFAULT_DEFERRED_SYS_OPS(drm_fbdev_generic,
|
||||
drm_fb_helper_damage_range,
|
||||
drm_fb_helper_damage_area);
|
||||
|
||||
static void drm_fbdev_generic_fb_destroy(struct fb_info *info)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
@ -56,13 +60,8 @@ static const struct fb_ops drm_fbdev_generic_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_open = drm_fbdev_generic_fb_open,
|
||||
.fb_release = drm_fbdev_generic_fb_release,
|
||||
.fb_read = drm_fb_helper_sys_read,
|
||||
.fb_write = drm_fb_helper_sys_write,
|
||||
FB_DEFAULT_DEFERRED_OPS(drm_fbdev_generic),
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_fillrect = drm_fb_helper_sys_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_sys_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_sys_imageblit,
|
||||
.fb_mmap = fb_deferred_io_mmap,
|
||||
.fb_destroy = drm_fbdev_generic_fb_destroy,
|
||||
};
|
||||
|
||||
|
||||
@ -42,6 +42,7 @@
|
||||
#include <drm/drm_client.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "drm_crtc_internal.h"
|
||||
@ -148,6 +149,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
|
||||
*/
|
||||
struct drm_file *drm_file_alloc(struct drm_minor *minor)
|
||||
{
|
||||
static atomic64_t ident = ATOMIC_INIT(0);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_file *file;
|
||||
int ret;
|
||||
@ -156,6 +158,8 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
|
||||
if (!file)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Get a unique identifier for fdinfo: */
|
||||
file->client_id = atomic64_inc_return(&ident);
|
||||
file->pid = get_pid(task_tgid(current));
|
||||
file->minor = minor;
|
||||
|
||||
@ -868,6 +872,134 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_send_event);
|
||||
|
||||
static void print_size(struct drm_printer *p, const char *stat,
|
||||
const char *region, u64 sz)
|
||||
{
|
||||
const char *units[] = {"", " KiB", " MiB"};
|
||||
unsigned u;
|
||||
|
||||
for (u = 0; u < ARRAY_SIZE(units) - 1; u++) {
|
||||
if (sz < SZ_1K)
|
||||
break;
|
||||
sz = div_u64(sz, SZ_1K);
|
||||
}
|
||||
|
||||
drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_print_memory_stats - A helper to print memory stats
|
||||
* @p: The printer to print output to
|
||||
* @stats: The collected memory stats
|
||||
* @supported_status: Bitmask of optional stats which are available
|
||||
* @region: The memory region
|
||||
*
|
||||
*/
|
||||
void drm_print_memory_stats(struct drm_printer *p,
|
||||
const struct drm_memory_stats *stats,
|
||||
enum drm_gem_object_status supported_status,
|
||||
const char *region)
|
||||
{
|
||||
print_size(p, "total", region, stats->private + stats->shared);
|
||||
print_size(p, "shared", region, stats->shared);
|
||||
print_size(p, "active", region, stats->active);
|
||||
|
||||
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
|
||||
print_size(p, "resident", region, stats->resident);
|
||||
|
||||
if (supported_status & DRM_GEM_OBJECT_PURGEABLE)
|
||||
print_size(p, "purgeable", region, stats->purgeable);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_print_memory_stats);
|
||||
|
||||
/**
|
||||
* drm_show_memory_stats - Helper to collect and show standard fdinfo memory stats
|
||||
* @p: the printer to print output to
|
||||
* @file: the DRM file
|
||||
*
|
||||
* Helper to iterate over GEM objects with a handle allocated in the specified
|
||||
* file.
|
||||
*/
|
||||
void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_memory_stats status = {};
|
||||
enum drm_gem_object_status supported_status;
|
||||
int id;
|
||||
|
||||
spin_lock(&file->table_lock);
|
||||
idr_for_each_entry (&file->object_idr, obj, id) {
|
||||
enum drm_gem_object_status s = 0;
|
||||
|
||||
if (obj->funcs && obj->funcs->status) {
|
||||
s = obj->funcs->status(obj);
|
||||
supported_status = DRM_GEM_OBJECT_RESIDENT |
|
||||
DRM_GEM_OBJECT_PURGEABLE;
|
||||
}
|
||||
|
||||
if (obj->handle_count > 1) {
|
||||
status.shared += obj->size;
|
||||
} else {
|
||||
status.private += obj->size;
|
||||
}
|
||||
|
||||
if (s & DRM_GEM_OBJECT_RESIDENT) {
|
||||
status.resident += obj->size;
|
||||
} else {
|
||||
/* If already purged or not yet backed by pages, don't
|
||||
* count it as purgeable:
|
||||
*/
|
||||
s &= ~DRM_GEM_OBJECT_PURGEABLE;
|
||||
}
|
||||
|
||||
if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
|
||||
status.active += obj->size;
|
||||
|
||||
/* If still active, don't count as purgeable: */
|
||||
s &= ~DRM_GEM_OBJECT_PURGEABLE;
|
||||
}
|
||||
|
||||
if (s & DRM_GEM_OBJECT_PURGEABLE)
|
||||
status.purgeable += obj->size;
|
||||
}
|
||||
spin_unlock(&file->table_lock);
|
||||
|
||||
drm_print_memory_stats(p, &status, supported_status, "memory");
|
||||
}
|
||||
EXPORT_SYMBOL(drm_show_memory_stats);
|
||||
|
||||
/**
|
||||
* drm_show_fdinfo - helper for drm file fops
|
||||
* @m: output stream
|
||||
* @f: the device file instance
|
||||
*
|
||||
* Helper to implement fdinfo, for userspace to query usage stats, etc, of a
|
||||
* process using the GPU. See also &drm_driver.show_fdinfo.
|
||||
*
|
||||
* For text output format description please see Documentation/gpu/drm-usage-stats.rst
|
||||
*/
|
||||
void drm_show_fdinfo(struct seq_file *m, struct file *f)
|
||||
{
|
||||
struct drm_file *file = f->private_data;
|
||||
struct drm_device *dev = file->minor->dev;
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
|
||||
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
|
||||
|
||||
if (dev_is_pci(dev->dev)) {
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
|
||||
drm_printf(&p, "drm-pdev:\t%04x:%02x:%02x.%d\n",
|
||||
pci_domain_nr(pdev->bus), pdev->bus->number,
|
||||
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
|
||||
}
|
||||
|
||||
if (dev->driver->show_fdinfo)
|
||||
dev->driver->show_fdinfo(&p, file);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_show_fdinfo);
|
||||
|
||||
/**
|
||||
* mock_drm_getfile - Create a new struct file for the drm device
|
||||
* @minor: drm minor to wrap (e.g. #drm_device.primary)
|
||||
|
||||
@ -7,6 +7,7 @@ config DRM_EXYNOS
|
||||
select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP
|
||||
select DRM_KMS_HELPER
|
||||
select VIDEOMODE_HELPERS
|
||||
select FB_IO_HELPERS if DRM_FBDEV_EMULATION
|
||||
select SND_SOC_HDMI_CODEC if SND_SOC
|
||||
help
|
||||
Choose this option if you have a Samsung SoC Exynos chipset.
|
||||
|
||||
@ -8,6 +8,8 @@
|
||||
* Seung-Woo Kim <sw0312.kim@samsung.com>
|
||||
*/
|
||||
|
||||
#include <linux/fb.h>
|
||||
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
@ -47,13 +49,10 @@ static void exynos_drm_fb_destroy(struct fb_info *info)
|
||||
|
||||
static const struct fb_ops exynos_drm_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
__FB_DEFAULT_IO_OPS_RDWR,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
__FB_DEFAULT_IO_OPS_DRAW,
|
||||
.fb_mmap = exynos_drm_fb_mmap,
|
||||
.fb_read = drm_fb_helper_cfb_read,
|
||||
.fb_write = drm_fb_helper_cfb_write,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
.fb_destroy = exynos_drm_fb_destroy,
|
||||
};
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ config DRM_GMA500
|
||||
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
|
||||
depends on DRM && PCI && X86 && MMU
|
||||
select DRM_KMS_HELPER
|
||||
select FB_IO_HELPERS if DRM_FBDEV_EMULATION
|
||||
select I2C
|
||||
select I2C_ALGOBIT
|
||||
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <linux/fb.h>
|
||||
#include <linux/pfn_t.h>
|
||||
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
@ -134,13 +135,10 @@ static void psb_fbdev_fb_destroy(struct fb_info *info)
|
||||
|
||||
static const struct fb_ops psb_fbdev_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
__FB_DEFAULT_IO_OPS_RDWR,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_setcolreg = psb_fbdev_fb_setcolreg,
|
||||
.fb_read = drm_fb_helper_cfb_read,
|
||||
.fb_write = drm_fb_helper_cfb_write,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
__FB_DEFAULT_IO_OPS_DRAW,
|
||||
.fb_mmap = psb_fbdev_fb_mmap,
|
||||
.fb_destroy = psb_fbdev_fb_destroy,
|
||||
};
|
||||
|
||||
@ -492,7 +492,7 @@ static struct i2c_device_id tda9950_ids[] = {
|
||||
MODULE_DEVICE_TABLE(i2c, tda9950_ids);
|
||||
|
||||
static struct i2c_driver tda9950_driver = {
|
||||
.probe_new = tda9950_probe,
|
||||
.probe = tda9950_probe,
|
||||
.remove = tda9950_remove,
|
||||
.driver = {
|
||||
.name = "tda9950",
|
||||
|
||||
@ -2099,7 +2099,7 @@ static const struct i2c_device_id tda998x_ids[] = {
|
||||
MODULE_DEVICE_TABLE(i2c, tda998x_ids);
|
||||
|
||||
static struct i2c_driver tda998x_driver = {
|
||||
.probe_new = tda998x_probe,
|
||||
.probe = tda998x_probe,
|
||||
.remove = tda998x_remove,
|
||||
.driver = {
|
||||
.name = "tda998x",
|
||||
|
||||
@ -17,6 +17,7 @@ config DRM_I915
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_PANEL
|
||||
select DRM_MIPI_DSI
|
||||
select FB_IO_HELPERS if DRM_FBDEV_EMULATION
|
||||
select RELAY
|
||||
select I2C
|
||||
select I2C_ALGOBIT
|
||||
|
||||
@ -157,6 +157,7 @@ config DRM_I915_SW_FENCE_CHECK_DAG
|
||||
config DRM_I915_DEBUG_GUC
|
||||
bool "Enable additional driver debugging for GuC"
|
||||
depends on DRM_I915
|
||||
select STACKDEPOT
|
||||
default n
|
||||
help
|
||||
Choose this option to turn on extra driver debugging that may affect
|
||||
|
||||
@ -28,6 +28,7 @@
|
||||
#include <linux/console.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
@ -84,6 +85,10 @@ static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
|
||||
intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
|
||||
}
|
||||
|
||||
FB_GEN_DEFAULT_DEFERRED_IO_OPS(intel_fbdev,
|
||||
drm_fb_helper_damage_range,
|
||||
drm_fb_helper_damage_area)
|
||||
|
||||
static int intel_fbdev_set_par(struct fb_info *info)
|
||||
{
|
||||
struct intel_fbdev *ifbdev = to_intel_fbdev(info->par);
|
||||
@ -135,15 +140,12 @@ __diag_ignore_all("-Woverride-init", "Allow overriding the default ops");
|
||||
|
||||
static const struct fb_ops intelfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
__FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_set_par = intel_fbdev_set_par,
|
||||
.fb_read = drm_fb_helper_cfb_read,
|
||||
.fb_write = drm_fb_helper_cfb_write,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
.fb_pan_display = intel_fbdev_pan_display,
|
||||
.fb_blank = intel_fbdev_blank,
|
||||
.fb_pan_display = intel_fbdev_pan_display,
|
||||
__FB_DEFAULT_DEFERRED_OPS_DRAW(intel_fbdev),
|
||||
.fb_mmap = intel_fbdev_mmap,
|
||||
};
|
||||
|
||||
|
||||
@ -964,7 +964,11 @@ static int intel_context_set_gem(struct intel_context *ce,
|
||||
RCU_INIT_POINTER(ce->gem_context, ctx);
|
||||
|
||||
GEM_BUG_ON(intel_context_is_pinned(ce));
|
||||
ce->ring_size = SZ_16K;
|
||||
|
||||
if (ce->engine->class == COMPUTE_CLASS)
|
||||
ce->ring_size = SZ_512K;
|
||||
else
|
||||
ce->ring_size = SZ_16K;
|
||||
|
||||
i915_vm_put(ce->vm);
|
||||
ce->vm = i915_gem_context_get_eb_vm(ctx);
|
||||
|
||||
@ -245,6 +245,7 @@ struct create_ext {
|
||||
unsigned int n_placements;
|
||||
unsigned int placement_mask;
|
||||
unsigned long flags;
|
||||
unsigned int pat_index;
|
||||
};
|
||||
|
||||
static void repr_placements(char *buf, size_t size,
|
||||
@ -394,11 +395,43 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ext_set_pat(struct i915_user_extension __user *base, void *data)
|
||||
{
|
||||
struct create_ext *ext_data = data;
|
||||
struct drm_i915_private *i915 = ext_data->i915;
|
||||
struct drm_i915_gem_create_ext_set_pat ext;
|
||||
unsigned int max_pat_index;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
|
||||
offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
|
||||
|
||||
/* Limiting the extension only to Meteor Lake */
|
||||
if (!IS_METEORLAKE(i915))
|
||||
return -ENODEV;
|
||||
|
||||
if (copy_from_user(&ext, base, sizeof(ext)))
|
||||
return -EFAULT;
|
||||
|
||||
max_pat_index = INTEL_INFO(i915)->max_pat_index;
|
||||
|
||||
if (ext.pat_index > max_pat_index) {
|
||||
drm_dbg(&i915->drm, "PAT index is invalid: %u\n",
|
||||
ext.pat_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ext_data->pat_index = ext.pat_index;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const i915_user_extension_fn create_extensions[] = {
|
||||
[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
|
||||
[I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
|
||||
[I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat,
|
||||
};
|
||||
|
||||
#define PAT_INDEX_NOT_SET 0xffff
|
||||
/**
|
||||
* i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
|
||||
* @dev: drm device pointer
|
||||
@ -418,6 +451,7 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
|
||||
if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
|
||||
return -EINVAL;
|
||||
|
||||
ext_data.pat_index = PAT_INDEX_NOT_SET;
|
||||
ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
|
||||
create_extensions,
|
||||
ARRAY_SIZE(create_extensions),
|
||||
@ -454,5 +488,11 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
|
||||
i915_gem_object_set_pat_index(obj, ext_data.pat_index);
|
||||
/* Mark pat_index is set by UMD */
|
||||
obj->pat_set_by_user = true;
|
||||
}
|
||||
|
||||
return i915_gem_publish(obj, file, &args->size, &args->handle);
|
||||
}
|
||||
|
||||
@ -208,6 +208,12 @@ bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
|
||||
if (!(obj->flags & I915_BO_ALLOC_USER))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Always flush cache for UMD objects at creation time.
|
||||
*/
|
||||
if (obj->pat_set_by_user)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
|
||||
* possible for userspace to bypass the GTT caching bits set by the
|
||||
|
||||
@ -348,8 +348,10 @@ static int live_parallel_switch(void *arg)
|
||||
continue;
|
||||
|
||||
ce = intel_context_create(data[m].ce[0]->engine);
|
||||
if (IS_ERR(ce))
|
||||
if (IS_ERR(ce)) {
|
||||
err = PTR_ERR(ce);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = intel_context_pin(ce);
|
||||
if (err) {
|
||||
@ -369,8 +371,10 @@ static int live_parallel_switch(void *arg)
|
||||
|
||||
worker = kthread_create_worker(0, "igt/parallel:%s",
|
||||
data[n].ce[0]->engine->name);
|
||||
if (IS_ERR(worker))
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
goto out;
|
||||
}
|
||||
|
||||
data[n].worker = worker;
|
||||
}
|
||||
@ -399,8 +403,10 @@ static int live_parallel_switch(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
if (igt_live_test_end(&t))
|
||||
err = -EIO;
|
||||
if (igt_live_test_end(&t)) {
|
||||
err = err ?: -EIO;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
@ -177,14 +177,40 @@ u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv
|
||||
return cs;
|
||||
}
|
||||
|
||||
static int mtl_dummy_pipe_control(struct i915_request *rq)
|
||||
{
|
||||
/* Wa_14016712196 */
|
||||
if (IS_MTL_GRAPHICS_STEP(rq->engine->i915, M, STEP_A0, STEP_B0) ||
|
||||
IS_MTL_GRAPHICS_STEP(rq->engine->i915, P, STEP_A0, STEP_B0)) {
|
||||
u32 *cs;
|
||||
|
||||
/* dummy PIPE_CONTROL + depth flush */
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
cs = gen12_emit_pipe_control(cs,
|
||||
0,
|
||||
PIPE_CONTROL_DEPTH_CACHE_FLUSH,
|
||||
LRC_PPHWSP_SCRATCH_ADDR);
|
||||
intel_ring_advance(rq, cs);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
||||
{
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
|
||||
if (mode & EMIT_FLUSH) {
|
||||
u32 flags = 0;
|
||||
int err;
|
||||
u32 *cs;
|
||||
|
||||
err = mtl_dummy_pipe_control(rq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_FLUSH_L3;
|
||||
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
|
||||
@ -217,6 +243,11 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
||||
if (mode & EMIT_INVALIDATE) {
|
||||
u32 flags = 0;
|
||||
u32 *cs, count;
|
||||
int err;
|
||||
|
||||
err = mtl_dummy_pipe_control(rq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_TLB_INVALIDATE;
|
||||
@ -733,6 +764,13 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
|
||||
PIPE_CONTROL_DC_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_FLUSH_ENABLE);
|
||||
|
||||
/* Wa_14016712196 */
|
||||
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
|
||||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
|
||||
/* dummy PIPE_CONTROL + depth flush */
|
||||
cs = gen12_emit_pipe_control(cs, 0,
|
||||
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
|
||||
|
||||
if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
|
||||
/* Wa_1409600907 */
|
||||
flags |= PIPE_CONTROL_DEPTH_STALL;
|
||||
|
||||
@ -1015,16 +1015,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
|
||||
/*
|
||||
* For pre-gen8 platforms pat_index is the same as enum i915_cache_level,
|
||||
* so these PTE encode functions are left with using cache_level.
|
||||
* so the switch-case statements in these PTE encode functions are still valid.
|
||||
* See translation table LEGACY_CACHELEVEL.
|
||||
*/
|
||||
static u64 snb_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
{
|
||||
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
||||
|
||||
switch (level) {
|
||||
switch (pat_index) {
|
||||
case I915_CACHE_L3_LLC:
|
||||
case I915_CACHE_LLC:
|
||||
pte |= GEN6_PTE_CACHE_LLC;
|
||||
@ -1033,19 +1033,19 @@ static u64 snb_pte_encode(dma_addr_t addr,
|
||||
pte |= GEN6_PTE_UNCACHED;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(level);
|
||||
MISSING_CASE(pat_index);
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static u64 ivb_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
{
|
||||
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
||||
|
||||
switch (level) {
|
||||
switch (pat_index) {
|
||||
case I915_CACHE_L3_LLC:
|
||||
pte |= GEN7_PTE_CACHE_L3_LLC;
|
||||
break;
|
||||
@ -1056,14 +1056,14 @@ static u64 ivb_pte_encode(dma_addr_t addr,
|
||||
pte |= GEN6_PTE_UNCACHED;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(level);
|
||||
MISSING_CASE(pat_index);
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static u64 byt_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
{
|
||||
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
||||
@ -1071,31 +1071,31 @@ static u64 byt_pte_encode(dma_addr_t addr,
|
||||
if (!(flags & PTE_READ_ONLY))
|
||||
pte |= BYT_PTE_WRITEABLE;
|
||||
|
||||
if (level != I915_CACHE_NONE)
|
||||
if (pat_index != I915_CACHE_NONE)
|
||||
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static u64 hsw_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
{
|
||||
gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
||||
|
||||
if (level != I915_CACHE_NONE)
|
||||
if (pat_index != I915_CACHE_NONE)
|
||||
pte |= HSW_WB_LLC_AGE3;
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static u64 iris_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
{
|
||||
gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
||||
|
||||
switch (level) {
|
||||
switch (pat_index) {
|
||||
case I915_CACHE_NONE:
|
||||
break;
|
||||
case I915_CACHE_WT:
|
||||
@ -1326,6 +1326,9 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
|
||||
ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
|
||||
ggtt->error_capture.size);
|
||||
|
||||
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
|
||||
intel_uc_resume_mappings(>->uc);
|
||||
|
||||
ggtt->invalidate(ggtt);
|
||||
|
||||
if (flush)
|
||||
|
||||
@ -18,10 +18,10 @@
|
||||
static void gmch_ggtt_insert_page(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
u64 offset,
|
||||
enum i915_cache_level cache_level,
|
||||
unsigned int pat_index,
|
||||
u32 unused)
|
||||
{
|
||||
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
|
||||
unsigned int flags = (pat_index == I915_CACHE_NONE) ?
|
||||
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
||||
|
||||
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
|
||||
@ -29,10 +29,10 @@ static void gmch_ggtt_insert_page(struct i915_address_space *vm,
|
||||
|
||||
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct i915_vma_resource *vma_res,
|
||||
enum i915_cache_level cache_level,
|
||||
unsigned int pat_index,
|
||||
u32 unused)
|
||||
{
|
||||
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
|
||||
unsigned int flags = (pat_index == I915_CACHE_NONE) ?
|
||||
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
||||
|
||||
intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
|
||||
|
||||
@ -1530,8 +1530,8 @@ static int live_busywait_preempt(void *arg)
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
enum intel_engine_id id;
|
||||
int err = -ENOMEM;
|
||||
u32 *map;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
|
||||
@ -1539,13 +1539,17 @@ static int live_busywait_preempt(void *arg)
|
||||
*/
|
||||
|
||||
ctx_hi = kernel_context(gt->i915, NULL);
|
||||
if (!ctx_hi)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(ctx_hi))
|
||||
return PTR_ERR(ctx_hi);
|
||||
|
||||
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
|
||||
|
||||
ctx_lo = kernel_context(gt->i915, NULL);
|
||||
if (!ctx_lo)
|
||||
if (IS_ERR(ctx_lo)) {
|
||||
err = PTR_ERR(ctx_lo);
|
||||
goto err_ctx_hi;
|
||||
}
|
||||
|
||||
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
|
||||
|
||||
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
||||
|
||||
@ -190,11 +190,18 @@ pte_tlbinv(struct intel_context *ce,
|
||||
|
||||
static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_memory_region *mr = gt->i915->mm.regions[INTEL_REGION_LMEM_0];
|
||||
resource_size_t size = SZ_1G;
|
||||
|
||||
/*
|
||||
* Allocation of largest possible page size allows to test all types
|
||||
* of pages.
|
||||
* of pages. To succeed with both allocations, especially in case of Small
|
||||
* BAR, try to allocate no more than quarter of mappable memory.
|
||||
*/
|
||||
return i915_gem_object_create_lmem(gt->i915, SZ_1G, I915_BO_ALLOC_CONTIGUOUS);
|
||||
if (mr && size > mr->io_size / 4)
|
||||
size = mr->io_size / 4;
|
||||
|
||||
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *create_smem(struct intel_gt *gt)
|
||||
|
||||
@ -167,25 +167,4 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
|
||||
* - **flags**, holds various bits to control message handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* Definition of the command transport message header (DW0)
|
||||
*
|
||||
* bit[4..0] message len (in dwords)
|
||||
* bit[7..5] reserved
|
||||
* bit[8] response (G2H only)
|
||||
* bit[8] write fence to desc (H2G only)
|
||||
* bit[9] write status to H2G buff (H2G only)
|
||||
* bit[10] send status back via G2H (H2G only)
|
||||
* bit[15..11] reserved
|
||||
* bit[31..16] action code
|
||||
*/
|
||||
#define GUC_CT_MSG_LEN_SHIFT 0
|
||||
#define GUC_CT_MSG_LEN_MASK 0x1F
|
||||
#define GUC_CT_MSG_IS_RESPONSE (1 << 8)
|
||||
#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
|
||||
#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
|
||||
#define GUC_CT_MSG_SEND_STATUS (1 << 10)
|
||||
#define GUC_CT_MSG_ACTION_SHIFT 16
|
||||
#define GUC_CT_MSG_ACTION_MASK 0xFFFF
|
||||
|
||||
#endif /* _ABI_GUC_COMMUNICATION_CTB_ABI_H */
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
* | | 30:28 | **TYPE** - message type |
|
||||
* | | | - _`GUC_HXG_TYPE_REQUEST` = 0 |
|
||||
* | | | - _`GUC_HXG_TYPE_EVENT` = 1 |
|
||||
* | | | - _`GUC_HXG_TYPE_FAST_REQUEST` = 2 |
|
||||
* | | | - _`GUC_HXG_TYPE_NO_RESPONSE_BUSY` = 3 |
|
||||
* | | | - _`GUC_HXG_TYPE_NO_RESPONSE_RETRY` = 5 |
|
||||
* | | | - _`GUC_HXG_TYPE_RESPONSE_FAILURE` = 6 |
|
||||
@ -46,6 +47,7 @@
|
||||
#define GUC_HXG_MSG_0_TYPE (0x7 << 28)
|
||||
#define GUC_HXG_TYPE_REQUEST 0u
|
||||
#define GUC_HXG_TYPE_EVENT 1u
|
||||
#define GUC_HXG_TYPE_FAST_REQUEST 2u
|
||||
#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
|
||||
#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
|
||||
#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
|
||||
@ -89,6 +91,34 @@
|
||||
#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffff << 0)
|
||||
#define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
|
||||
|
||||
/**
|
||||
* DOC: HXG Fast Request
|
||||
*
|
||||
* The `HXG Request`_ message should be used to initiate asynchronous activity
|
||||
* for which confirmation or return data is not expected.
|
||||
*
|
||||
* If confirmation is required then `HXG Request`_ shall be used instead.
|
||||
*
|
||||
* The recipient of this message may only use `HXG Failure`_ message if it was
|
||||
* unable to accept this request (like invalid data).
|
||||
*
|
||||
* Format of `HXG Fast Request`_ message is same as `HXG Request`_ except @TYPE.
|
||||
*
|
||||
* +---+-------+--------------------------------------------------------------+
|
||||
* | | Bits | Description |
|
||||
* +===+=======+==============================================================+
|
||||
* | 0 | 31 | ORIGIN - see `HXG Message`_ |
|
||||
* | +-------+--------------------------------------------------------------+
|
||||
* | | 30:28 | TYPE = `GUC_HXG_TYPE_FAST_REQUEST`_ |
|
||||
* | +-------+--------------------------------------------------------------+
|
||||
* | | 27:16 | DATA0 - see `HXG Request`_ |
|
||||
* | +-------+--------------------------------------------------------------+
|
||||
* | | 15:0 | ACTION - see `HXG Request`_ |
|
||||
* +---+-------+--------------------------------------------------------------+
|
||||
* |...| | DATAn - see `HXG Request`_ |
|
||||
* +---+-------+--------------------------------------------------------------+
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: HXG Event
|
||||
*
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user