WIP: EN7523 Ethernet support #4
@@ -64,8 +64,6 @@ allOf:
|
||||
- description: scu base address
|
||||
- description: misc scu base address
|
||||
|
||||
'#reset-cells': false
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
@@ -89,6 +87,7 @@ examples:
|
||||
reg = <0x1fa20000 0x400>,
|
||||
<0x1fb00000 0x1000>;
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
- |
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/clock/gated-fixed-clock.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Gated Fixed clock
|
||||
|
||||
maintainers:
|
||||
- Heiko Stuebner <heiko@sntech.de>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: gated-fixed-clock
|
||||
|
||||
"#clock-cells":
|
||||
const: 0
|
||||
|
||||
clock-frequency: true
|
||||
|
||||
clock-output-names:
|
||||
maxItems: 1
|
||||
|
||||
enable-gpios:
|
||||
description:
|
||||
Contains a single GPIO specifier for the GPIO that enables and disables
|
||||
the oscillator.
|
||||
maxItems: 1
|
||||
|
||||
vdd-supply:
|
||||
description: handle of the regulator that provides the supply voltage
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- "#clock-cells"
|
||||
- clock-frequency
|
||||
- vdd-supply
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
clock-1000000000 {
|
||||
compatible = "gated-fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <1000000000>;
|
||||
vdd-supply = <®_vdd>;
|
||||
};
|
||||
...
|
||||
132
Documentation/devicetree/bindings/leds/st,led1202.yaml
Normal file
132
Documentation/devicetree/bindings/leds/st,led1202.yaml
Normal file
@@ -0,0 +1,132 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/leds/st,led1202.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: ST LED1202 LED controllers
|
||||
|
||||
maintainers:
|
||||
- Vicentiu Galanopulo <vicentiu.galanopulo@remote-tech.co.uk>
|
||||
|
||||
description: |
|
||||
The LED1202 is a 12-channel low quiescent current LED controller
|
||||
programmable via I2C; The output current can be adjusted separately
|
||||
for each channel by 8-bit analog and 12-bit digital dimming control.
|
||||
Datasheet available at
|
||||
https://www.st.com/en/power-management/led1202.html
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: st,led1202
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
"#address-cells":
|
||||
const: 1
|
||||
|
||||
"#size-cells":
|
||||
const: 0
|
||||
|
||||
patternProperties:
|
||||
"^led@[0-9a-f]$":
|
||||
type: object
|
||||
$ref: common.yaml#
|
||||
unevaluatedProperties: false
|
||||
|
||||
properties:
|
||||
reg:
|
||||
minimum: 0
|
||||
maximum: 11
|
||||
|
||||
required:
|
||||
- reg
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- "#address-cells"
|
||||
- "#size-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/leds/common.h>
|
||||
|
||||
i2c {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
led-controller@58 {
|
||||
compatible = "st,led1202";
|
||||
reg = <0x58>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
led@0 {
|
||||
reg = <0x0>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_RED>;
|
||||
function-enumerator = <1>;
|
||||
};
|
||||
|
||||
led@1 {
|
||||
reg = <0x1>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
function-enumerator = <2>;
|
||||
};
|
||||
|
||||
led@2 {
|
||||
reg = <0x2>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
function-enumerator = <3>;
|
||||
};
|
||||
|
||||
led@3 {
|
||||
reg = <0x3>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_RED>;
|
||||
function-enumerator = <4>;
|
||||
};
|
||||
|
||||
led@4 {
|
||||
reg = <0x4>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
function-enumerator = <5>;
|
||||
};
|
||||
|
||||
led@5 {
|
||||
reg = <0x5>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
function-enumerator = <6>;
|
||||
};
|
||||
|
||||
led@6 {
|
||||
reg = <0x6>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_RED>;
|
||||
function-enumerator = <7>;
|
||||
};
|
||||
|
||||
led@7 {
|
||||
reg = <0x7>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
function-enumerator = <8>;
|
||||
};
|
||||
|
||||
led@8 {
|
||||
reg = <0x8>;
|
||||
function = LED_FUNCTION_STATUS;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
function-enumerator = <9>;
|
||||
};
|
||||
};
|
||||
};
|
||||
...
|
||||
36
Documentation/devicetree/bindings/mtd/mtd-concat.txt
Normal file
36
Documentation/devicetree/bindings/mtd/mtd-concat.txt
Normal file
@@ -0,0 +1,36 @@
|
||||
Virtual MTD concat device
|
||||
|
||||
Requires properties:
|
||||
- devices: list of phandles to mtd nodes that should be concatenated
|
||||
|
||||
Example:
|
||||
|
||||
&spi {
|
||||
flash0: flash@0 {
|
||||
...
|
||||
};
|
||||
flash1: flash@1 {
|
||||
...
|
||||
};
|
||||
};
|
||||
|
||||
flash {
|
||||
compatible = "mtd-concat";
|
||||
|
||||
devices = <&flash0 &flash1>;
|
||||
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
|
||||
partition@0 {
|
||||
label = "boot";
|
||||
reg = <0x0000000 0x0040000>;
|
||||
read-only;
|
||||
};
|
||||
|
||||
partition@40000 {
|
||||
label = "firmware";
|
||||
reg = <0x0040000 0x1fc0000>;
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/net/pcs/mediatek,usxgmii.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: MediaTek USXGMII PCS
|
||||
|
||||
maintainers:
|
||||
- Daniel Golle <daniel@makrotopia.org>
|
||||
|
||||
description:
|
||||
The MediaTek USXGMII PCS provides physical link control and status
|
||||
for USXGMII, 10GBase-R and 5GBase-R links on the SerDes interfaces
|
||||
provided by the PEXTP PHY.
|
||||
In order to also support legacy 2500Base-X, 1000Base-X and Cisco
|
||||
SGMII an existing mediatek,*-sgmiisys LynxI PCS is wrapped to
|
||||
provide those interfaces modes on the same SerDes interfaces shared
|
||||
with the USXGMII PCS.
|
||||
|
||||
properties:
|
||||
$nodename:
|
||||
pattern: "^pcs@[0-9a-f]+$"
|
||||
|
||||
compatible:
|
||||
const: mediatek,mt7988-usxgmiisys
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: USXGMII top-level clock
|
||||
|
||||
resets:
|
||||
items:
|
||||
- description: XFI reset
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- resets
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/mediatek,mt7988-clk.h>
|
||||
#define MT7988_TOPRGU_XFI0_GRST 12
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
usxgmiisys0: pcs@10080000 {
|
||||
compatible = "mediatek,mt7988-usxgmiisys";
|
||||
reg = <0 0x10080000 0 0x1000>;
|
||||
clocks = <&topckgen CLK_TOP_USXGMII_SBUS_0_SEL>;
|
||||
resets = <&watchdog MT7988_TOPRGU_XFI0_GRST>;
|
||||
};
|
||||
};
|
||||
36
Documentation/devicetree/bindings/net/realtek,rtl8152.yaml
Normal file
36
Documentation/devicetree/bindings/net/realtek,rtl8152.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/net/realtek,rtl8152.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Realtek RTL8152/RTL8153 series USB ethernet
|
||||
|
||||
maintainers:
|
||||
- David Bauer <mail@david-bauer.net>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- realtek,rtl8152
|
||||
- realtek,rtl8153
|
||||
|
||||
reg:
|
||||
description: The device number on the USB bus
|
||||
|
||||
realtek,led-data:
|
||||
description: Value to be written to the LED configuration register.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
examples:
|
||||
- |
|
||||
usb-eth@2 {
|
||||
compatible = "realtek,rtl8153";
|
||||
reg = <2>;
|
||||
realtek,led-data = <0x87>;
|
||||
};
|
||||
@@ -0,0 +1,83 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/phy/airoha,an7581-usb-phy.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Airoha AN7581 SoC USB PHY
|
||||
|
||||
maintainers:
|
||||
- Christian Marangi <ansuelsmth@gmail.com>
|
||||
|
||||
description: >
|
||||
The Airoha AN7581 SoC USB PHY describes the USB PHY for the USB controller.
|
||||
|
||||
Airoha AN7581 SoC support a maximum of 2 USB port. The USB 2.0 mode is
|
||||
always supported. The USB 3.0 mode is optional and depends on the Serdes
|
||||
mode currently configured on the system for the USB port.
|
||||
|
||||
If the airoha,serdes-port property is not declared, it's assumed USB 3.0
|
||||
mode is not supported, as the Serdes mode can't be validated.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: airoha,an7581-usb-phy
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
|
||||
airoha,usb2-monitor-clk-sel:
|
||||
description: Describe what oscillator across the available 4
|
||||
should be selected for USB 2.0 Slew Rate calibration.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [0, 1, 2, 3]
|
||||
|
||||
airoha,serdes-port:
|
||||
description: Describe what Serdes Port is attached to the USB 3.0 port.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [0, 1, 2, 3]
|
||||
|
||||
airoha,scu:
|
||||
description: Phandle to the SCU node for USB 3.0 Serdes mode validation.
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
|
||||
'#phy-cells':
|
||||
const: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- airoha,usb2-monitor-clk-sel
|
||||
- '#phy-cells'
|
||||
|
||||
dependentRequired:
|
||||
airoha,serdes-port: [ 'airoha,scu' ]
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/phy/airoha,an7581-usb-phy.h>
|
||||
#include <dt-bindings/soc/airoha,scu-ssr.h>
|
||||
|
||||
phy@1fac0000 {
|
||||
compatible = "airoha,an7581-usb-phy";
|
||||
reg = <0x1fac0000 0x10000>;
|
||||
|
||||
airoha,usb2-monitor-clk-sel = <AIROHA_USB2_MONCLK_SEL1>;
|
||||
airoha,scu = <&scu>;
|
||||
airoha,serdes-port = <AIROHA_SCU_SERDES_USB1>;
|
||||
|
||||
#phy-cells = <1>;
|
||||
};
|
||||
|
||||
phy@1fae0000 {
|
||||
compatible = "airoha,an7581-usb-phy";
|
||||
reg = <0x1fae0000 0x10000>;
|
||||
|
||||
airoha,usb2-monitor-clk-sel = <AIROHA_USB2_MONCLK_SEL2>;
|
||||
|
||||
#phy-cells = <1>;
|
||||
};
|
||||
|
||||
80
Documentation/devicetree/bindings/phy/mediatek,xfi-tphy.yaml
Normal file
80
Documentation/devicetree/bindings/phy/mediatek,xfi-tphy.yaml
Normal file
@@ -0,0 +1,80 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/phy/mediatek,xfi-tphy.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: MediaTek XFI T-PHY
|
||||
|
||||
maintainers:
|
||||
- Daniel Golle <daniel@makrotopia.org>
|
||||
|
||||
description:
|
||||
The MediaTek XFI SerDes T-PHY provides the physical SerDes lanes
|
||||
used by the (10G/5G) USXGMII PCS and (1G/2.5G) LynxI PCS found in
|
||||
MediaTek's 10G-capabale SoCs.
|
||||
|
||||
properties:
|
||||
$nodename:
|
||||
pattern: "^phy@[0-9a-f]+$"
|
||||
|
||||
compatible:
|
||||
const: mediatek,mt7988-xfi-tphy
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: XFI PHY clock
|
||||
- description: XFI register clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: xfipll
|
||||
- const: topxtal
|
||||
|
||||
resets:
|
||||
items:
|
||||
- description: PEXTP reset
|
||||
|
||||
mediatek,usxgmii-performance-errata:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description:
|
||||
One instance of the T-PHY on MT7988 suffers from a performance
|
||||
problem in 10GBase-R mode which needs a work-around in the driver.
|
||||
The work-around is enabled using this flag.
|
||||
|
||||
"#phy-cells":
|
||||
const: 0
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
- resets
|
||||
- "#phy-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/mediatek,mt7988-clk.h>
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
phy@11f20000 {
|
||||
compatible = "mediatek,mt7988-xfi-tphy";
|
||||
reg = <0 0x11f20000 0 0x10000>;
|
||||
clocks = <&xfi_pll CLK_XFIPLL_PLL_EN>,
|
||||
<&topckgen CLK_TOP_XFI_PHY_0_XTAL_SEL>;
|
||||
clock-names = "xfipll", "topxtal";
|
||||
resets = <&watchdog 14>;
|
||||
mediatek,usxgmii-performance-errata;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
};
|
||||
|
||||
...
|
||||
29
MAINTAINERS
29
MAINTAINERS
@@ -737,6 +737,14 @@ S: Maintained
|
||||
F: Documentation/devicetree/bindings/spi/airoha,en7581-snand.yaml
|
||||
F: drivers/spi/spi-airoha-snfi.c
|
||||
|
||||
AIROHA USB PHY DRIVER
|
||||
M: Christian Marangi <ansuelsmth@gmail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/phy/airoha,an7581-usb-phy.yaml
|
||||
F: drivers/phy/airoha/phy-airoha-usb.c
|
||||
F: include/dt-bindings/phy/airoha,an7581-usb-phy.h
|
||||
|
||||
AIRSPY MEDIA DRIVER
|
||||
L: linux-media@vger.kernel.org
|
||||
S: Orphan
|
||||
@@ -14419,7 +14427,9 @@ M: Daniel Golle <daniel@makrotopia.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/pcs/pcs-mtk-lynxi.c
|
||||
F: drivers/net/pcs/pcs-mtk-usxgmii.c
|
||||
F: include/linux/pcs/pcs-mtk-lynxi.h
|
||||
F: include/linux/pcs/pcs-mtk-usxgmii.h
|
||||
|
||||
MEDIATEK ETHERNET PHY DRIVERS
|
||||
M: Daniel Golle <daniel@makrotopia.org>
|
||||
@@ -14427,8 +14437,10 @@ M: Qingfang Deng <dqfext@gmail.com>
|
||||
M: SkyLake Huang <SkyLake.Huang@mediatek.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/phy/mediatek-ge-soc.c
|
||||
F: drivers/net/phy/mediatek-ge.c
|
||||
F: drivers/net/phy/mediatek/mtk-ge-soc.c
|
||||
F: drivers/net/phy/mediatek/mtk-phy-lib.c
|
||||
F: drivers/net/phy/mediatek/mtk-ge.c
|
||||
F: drivers/net/phy/mediatek/mtk.h
|
||||
F: drivers/phy/mediatek/phy-mtk-xfi-tphy.c
|
||||
|
||||
MEDIATEK I2C CONTROLLER DRIVER
|
||||
@@ -18187,6 +18199,13 @@ F: drivers/pinctrl/
|
||||
F: include/dt-bindings/pinctrl/
|
||||
F: include/linux/pinctrl/
|
||||
|
||||
PIN CONTROLLER - AIROHA
|
||||
M: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pinctrl/airoha,en7581-pinctrl.yaml
|
||||
F: drivers/pinctrl/mediatek/pinctrl-airoha.c
|
||||
|
||||
PIN CONTROLLER - AMD
|
||||
M: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
|
||||
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
|
||||
@@ -23666,6 +23685,12 @@ F: Documentation/filesystems/ubifs-authentication.rst
|
||||
F: Documentation/filesystems/ubifs.rst
|
||||
F: fs/ubifs/
|
||||
|
||||
U-BOOT UIMAGE.FIT PARSER
|
||||
M: Daniel Golle <daniel@makrotopia.org>
|
||||
L: linux-block@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/block/fitblk.c
|
||||
|
||||
UBLK USERSPACE BLOCK DRIVER
|
||||
M: Ming Lei <ming.lei@redhat.com>
|
||||
L: linux-block@vger.kernel.org
|
||||
|
||||
2
Makefile
2
Makefile
@@ -589,7 +589,7 @@ export RUSTC_BOOTSTRAP := 1
|
||||
# Allows finding `.clippy.toml` in out-of-srctree builds.
|
||||
export CLIPPY_CONF_DIR := $(srctree)
|
||||
|
||||
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC HOSTPKG_CONFIG
|
||||
export ARCH SRCARCH SUBARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC HOSTPKG_CONFIG
|
||||
export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN
|
||||
export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
|
||||
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
|
||||
|
||||
@@ -88,6 +88,16 @@
|
||||
DSP_EARLY_INIT
|
||||
.endm
|
||||
|
||||
; Here "patch-dtb" will embed external .dtb
|
||||
; Note "patch-dtb" searches for ASCII "OWRTDTB:" string
|
||||
; and pastes .dtb right after it, hense the string precedes
|
||||
; __image_dtb symbol.
|
||||
.section .owrt, "aw",@progbits
|
||||
.ascii "OWRTDTB:"
|
||||
ENTRY(__image_dtb)
|
||||
.fill 0x4000
|
||||
END(__image_dtb)
|
||||
|
||||
.section .init.text, "ax",@progbits
|
||||
|
||||
;----------------------------------------------------------------
|
||||
|
||||
@@ -450,6 +450,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
|
||||
/* We always pass 0 as magic from U-boot */
|
||||
#define UBOOT_MAGIC_VALUE 0
|
||||
|
||||
extern struct boot_param_header __image_dtb;
|
||||
|
||||
void __init handle_uboot_args(void)
|
||||
{
|
||||
bool use_embedded_dtb = true;
|
||||
@@ -488,7 +490,7 @@ void __init handle_uboot_args(void)
|
||||
ignore_uboot_args:
|
||||
|
||||
if (use_embedded_dtb) {
|
||||
machine_desc = setup_machine_fdt(__dtb_start);
|
||||
machine_desc = setup_machine_fdt(&__image_dtb);
|
||||
if (!machine_desc)
|
||||
panic("Embedded DT invalid\n");
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
||||
char buf[TASK_COMM_LEN];
|
||||
|
||||
/* handle user mode only and only if enabled by sysadmin */
|
||||
if (!user_mode(regs) || !unaligned_enabled)
|
||||
if (!unaligned_enabled)
|
||||
return 1;
|
||||
|
||||
if (no_unaligned_warning) {
|
||||
|
||||
@@ -27,6 +27,19 @@ SECTIONS
|
||||
|
||||
. = CONFIG_LINUX_LINK_BASE;
|
||||
|
||||
/*
|
||||
* In OpenWRT we want to patch built binary embedding .dtb of choice.
|
||||
* This is implemented with "patch-dtb" utility which searches for
|
||||
* "OWRTDTB:" string in first 16k of image and if it is found
|
||||
* copies .dtb right after mentioned string.
|
||||
*
|
||||
* Note: "OWRTDTB:" won't be overwritten with .dtb, .dtb will follow it.
|
||||
*/
|
||||
.owrt : {
|
||||
*(.owrt)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
}
|
||||
|
||||
_int_vec_base_lds = .;
|
||||
.vector : {
|
||||
*(.vector)
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/clock/en7523-clk.h>
|
||||
#include <dt-bindings/reset/airoha,en7523-reset.h>
|
||||
|
||||
/ {
|
||||
interrupt-parent = <&gic>;
|
||||
@@ -91,6 +92,7 @@
|
||||
reg = <0x1fa20000 0x400>,
|
||||
<0x1fb00000 0x1000>;
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
gic: interrupt-controller@9000000 {
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
#define IDMAP_TEXT \
|
||||
ALIGN_FUNCTION(); \
|
||||
__idmap_text_start = .; \
|
||||
*(.idmap.text) \
|
||||
KEEP(*(.idmap.text)) \
|
||||
__idmap_text_end = .; \
|
||||
|
||||
#define ARM_DISCARD \
|
||||
@@ -114,12 +114,12 @@
|
||||
. = ALIGN(8); \
|
||||
.ARM.unwind_idx : { \
|
||||
__start_unwind_idx = .; \
|
||||
*(.ARM.exidx*) \
|
||||
KEEP(*(.ARM.exidx*)) \
|
||||
__stop_unwind_idx = .; \
|
||||
} \
|
||||
.ARM.unwind_tab : { \
|
||||
__start_unwind_tab = .; \
|
||||
*(.ARM.extab*) \
|
||||
KEEP(*(.ARM.extab*)) \
|
||||
__stop_unwind_tab = .; \
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@
|
||||
__vectors_lma = .; \
|
||||
OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
|
||||
.vectors { \
|
||||
OVERLAY_KEEP(*(.vectors)) \
|
||||
KEEP(*(.vectors)) \
|
||||
} \
|
||||
.vectors.bhb.loop8 { \
|
||||
OVERLAY_KEEP(*(.vectors.bhb.loop8)) \
|
||||
@@ -149,7 +149,7 @@
|
||||
\
|
||||
__stubs_lma = .; \
|
||||
.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
|
||||
*(.stubs) \
|
||||
KEEP(*(.stubs)) \
|
||||
} \
|
||||
ARM_LMA(__stubs, .stubs); \
|
||||
. = __stubs_lma + SIZEOF(.stubs); \
|
||||
|
||||
@@ -112,6 +112,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
|
||||
ELF_ST_BIND(sym->st_info) == STB_WEAK)
|
||||
continue;
|
||||
|
||||
loc = dstsec->sh_addr + rel->r_offset;
|
||||
|
||||
switch (ELF32_R_TYPE(rel->r_info)) {
|
||||
|
||||
@@ -104,13 +104,13 @@ SECTIONS
|
||||
}
|
||||
.init.tagtable : {
|
||||
__tagtable_begin = .;
|
||||
*(.taglist.init)
|
||||
KEEP(*(.taglist.init))
|
||||
__tagtable_end = .;
|
||||
}
|
||||
#ifdef CONFIG_SMP_ON_UP
|
||||
.init.smpalt : {
|
||||
__smpalt_begin = .;
|
||||
*(.alt.smp.init)
|
||||
KEEP(*(.alt.smp.init))
|
||||
__smpalt_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mbus.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_net.h>
|
||||
|
||||
@@ -1055,9 +1055,6 @@ config FW_ARC
|
||||
config ARCH_MAY_HAVE_PC_FDC
|
||||
bool
|
||||
|
||||
config BOOT_RAW
|
||||
bool
|
||||
|
||||
config CEVT_BCM1480
|
||||
bool
|
||||
|
||||
@@ -2990,6 +2987,18 @@ choice
|
||||
bool "Extend builtin kernel arguments with bootloader arguments"
|
||||
endchoice
|
||||
|
||||
config BOOT_RAW
|
||||
bool "Enable the kernel to be executed from the load address"
|
||||
default n
|
||||
help
|
||||
Allow the kernel to be executed from the load address for
|
||||
bootloaders which cannot read the ELF format. This places
|
||||
a jump to start_kernel at the load address.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
|
||||
|
||||
endmenu
|
||||
|
||||
config LOCKDEP_SUPPORT
|
||||
|
||||
@@ -94,11 +94,21 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlinuz
|
||||
# machines may also. Since BFD is incredibly buggy with respect to
|
||||
# crossformat linking we rely on the elf2ecoff tool for format conversion.
|
||||
#
|
||||
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
|
||||
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
|
||||
cflags-y += -msoft-float -Wa,-msoft-float
|
||||
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
|
||||
ifdef CONFIG_64BIT
|
||||
KBUILD_AFLAGS_MODULE += -mlong-calls
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls
|
||||
else
|
||||
ifdef CONFIG_DYNAMIC_FTRACE
|
||||
KBUILD_AFLAGS_MODULE += -mlong-calls
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls
|
||||
else
|
||||
KBUILD_AFLAGS_MODULE += -mno-long-calls
|
||||
KBUILD_CFLAGS_MODULE += -mno-long-calls
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_RELOCATABLE),y)
|
||||
LDFLAGS_vmlinux += --emit-relocs
|
||||
@@ -153,7 +163,7 @@ cflags-$(CONFIG_CPU_R4300) += $(call cc-option,-march=r4300,-march=mips3) -Wa,--
|
||||
cflags-$(CONFIG_CPU_R4X00) += $(call cc-option,-march=r4600,-march=mips3) -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_TX49XX) += $(call cc-option,-march=r4600,-march=mips3) -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -mtune=34kc -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_MIPS32_R5) += -march=mips32r5 -Wa,--trap -modd-spreg
|
||||
cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
|
||||
cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
|
||||
|
||||
@@ -93,7 +93,9 @@ const char *get_system_type(void);
|
||||
|
||||
extern unsigned long mips_machtype;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max);
|
||||
#endif
|
||||
|
||||
extern void prom_init(void);
|
||||
extern void prom_free_prom_memory(void);
|
||||
|
||||
@@ -12,6 +12,11 @@ struct mod_arch_specific {
|
||||
const struct exception_table_entry *dbe_start;
|
||||
const struct exception_table_entry *dbe_end;
|
||||
struct mips_hi16 *r_mips_hi16_list;
|
||||
|
||||
void *phys_plt_tbl;
|
||||
void *virt_plt_tbl;
|
||||
unsigned int phys_plt_offset;
|
||||
unsigned int virt_plt_offset;
|
||||
};
|
||||
|
||||
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
|
||||
|
||||
@@ -290,14 +290,46 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
|
||||
unsigned long end) \
|
||||
{ \
|
||||
unsigned long lsize = cpu_##desc##_line_size(); \
|
||||
unsigned long lsize_2 = lsize * 2; \
|
||||
unsigned long lsize_3 = lsize * 3; \
|
||||
unsigned long lsize_4 = lsize * 4; \
|
||||
unsigned long lsize_5 = lsize * 5; \
|
||||
unsigned long lsize_6 = lsize * 6; \
|
||||
unsigned long lsize_7 = lsize * 7; \
|
||||
unsigned long lsize_8 = lsize * 8; \
|
||||
unsigned long addr = start & ~(lsize - 1); \
|
||||
unsigned long aend = (end - 1) & ~(lsize - 1); \
|
||||
unsigned long aend = (end + lsize - 1) & ~(lsize - 1); \
|
||||
int lines = (aend - addr) / lsize; \
|
||||
\
|
||||
while (1) { \
|
||||
while (lines >= 8) { \
|
||||
prot##cache_op(hitop, addr); \
|
||||
prot##cache_op(hitop, addr + lsize); \
|
||||
prot##cache_op(hitop, addr + lsize_2); \
|
||||
prot##cache_op(hitop, addr + lsize_3); \
|
||||
prot##cache_op(hitop, addr + lsize_4); \
|
||||
prot##cache_op(hitop, addr + lsize_5); \
|
||||
prot##cache_op(hitop, addr + lsize_6); \
|
||||
prot##cache_op(hitop, addr + lsize_7); \
|
||||
addr += lsize_8; \
|
||||
lines -= 8; \
|
||||
} \
|
||||
\
|
||||
if (lines & 0x4) { \
|
||||
prot##cache_op(hitop, addr); \
|
||||
prot##cache_op(hitop, addr + lsize); \
|
||||
prot##cache_op(hitop, addr + lsize_2); \
|
||||
prot##cache_op(hitop, addr + lsize_3); \
|
||||
addr += lsize_4; \
|
||||
} \
|
||||
\
|
||||
if (lines & 0x2) { \
|
||||
prot##cache_op(hitop, addr); \
|
||||
prot##cache_op(hitop, addr + lsize); \
|
||||
addr += lsize_2; \
|
||||
} \
|
||||
\
|
||||
if (lines & 0x1) { \
|
||||
prot##cache_op(hitop, addr); \
|
||||
if (addr == aend) \
|
||||
break; \
|
||||
addr += lsize; \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
@@ -10,14 +10,11 @@
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
extern const unsigned char relocate_new_kernel[];
|
||||
extern const size_t relocate_new_kernel_size;
|
||||
|
||||
extern unsigned long kexec_start_address;
|
||||
extern unsigned long kexec_indirection_page;
|
||||
#include <linux/uaccess.h>
|
||||
#include "machine_kexec.h"
|
||||
|
||||
static unsigned long reboot_code_buffer;
|
||||
|
||||
@@ -31,6 +28,101 @@ void (*_crash_smp_send_stop)(void) = NULL;
|
||||
void (*_machine_kexec_shutdown)(void) = NULL;
|
||||
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
|
||||
|
||||
static void machine_kexec_print_args(void)
|
||||
{
|
||||
unsigned long argc = (int)kexec_args[0];
|
||||
int i;
|
||||
|
||||
pr_info("kexec_args[0] (argc): %lu\n", argc);
|
||||
pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
|
||||
pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
|
||||
pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
|
||||
|
||||
for (i = 0; i < argc; i++) {
|
||||
pr_info("kexec_argv[%d] = %p, %s\n",
|
||||
i, kexec_argv[i], kexec_argv[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void machine_kexec_init_argv(struct kimage *image)
|
||||
{
|
||||
void __user *buf = NULL;
|
||||
size_t bufsz;
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
bufsz = 0;
|
||||
for (i = 0; i < image->nr_segments; i++) {
|
||||
struct kexec_segment *seg;
|
||||
|
||||
seg = &image->segment[i];
|
||||
if (seg->bufsz < 6)
|
||||
continue;
|
||||
|
||||
if (strncmp((char *) seg->buf, "kexec ", 6))
|
||||
continue;
|
||||
|
||||
buf = seg->buf;
|
||||
bufsz = seg->bufsz;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
size = KEXEC_COMMAND_LINE_SIZE;
|
||||
size = min(size, bufsz);
|
||||
if (size < bufsz)
|
||||
pr_warn("kexec command line truncated to %zd bytes\n", size);
|
||||
|
||||
/* Copy to kernel space */
|
||||
if (copy_from_user(kexec_argv_buf, buf, size))
|
||||
pr_warn("kexec command line copy to kernel space failed\n");
|
||||
|
||||
kexec_argv_buf[size - 1] = 0;
|
||||
}
|
||||
|
||||
static void machine_kexec_parse_argv(struct kimage *image)
|
||||
{
|
||||
char *reboot_code_buffer;
|
||||
int reloc_delta;
|
||||
char *ptr;
|
||||
int argc;
|
||||
int i;
|
||||
|
||||
ptr = kexec_argv_buf;
|
||||
argc = 0;
|
||||
|
||||
/*
|
||||
* convert command line string to array of parameters
|
||||
* (as bootloader does).
|
||||
*/
|
||||
while (ptr && *ptr && (KEXEC_MAX_ARGC > argc)) {
|
||||
if (*ptr == ' ') {
|
||||
*ptr++ = '\0';
|
||||
continue;
|
||||
}
|
||||
|
||||
kexec_argv[argc++] = ptr;
|
||||
ptr = strchr(ptr, ' ');
|
||||
}
|
||||
|
||||
if (!argc)
|
||||
return;
|
||||
|
||||
kexec_args[0] = argc;
|
||||
kexec_args[1] = (unsigned long)kexec_argv;
|
||||
kexec_args[2] = 0;
|
||||
kexec_args[3] = 0;
|
||||
|
||||
reboot_code_buffer = page_address(image->control_code_page);
|
||||
reloc_delta = reboot_code_buffer - (char *)kexec_relocate_new_kernel;
|
||||
|
||||
kexec_args[1] += reloc_delta;
|
||||
for (i = 0; i < argc; i++)
|
||||
kexec_argv[i] += reloc_delta;
|
||||
}
|
||||
|
||||
static void kexec_image_info(const struct kimage *kimage)
|
||||
{
|
||||
unsigned long i;
|
||||
@@ -100,6 +192,18 @@ machine_kexec_prepare(struct kimage *kimage)
|
||||
#endif
|
||||
|
||||
kexec_image_info(kimage);
|
||||
/*
|
||||
* Whenever arguments passed from kexec-tools, Init the arguments as
|
||||
* the original ones to try avoiding booting failure.
|
||||
*/
|
||||
|
||||
kexec_args[0] = fw_arg0;
|
||||
kexec_args[1] = fw_arg1;
|
||||
kexec_args[2] = fw_arg2;
|
||||
kexec_args[3] = fw_arg3;
|
||||
|
||||
machine_kexec_init_argv(kimage);
|
||||
machine_kexec_parse_argv(kimage);
|
||||
|
||||
if (_machine_kexec_prepare)
|
||||
return _machine_kexec_prepare(kimage);
|
||||
@@ -162,7 +266,7 @@ machine_crash_shutdown(struct pt_regs *regs)
|
||||
void kexec_nonboot_cpu_jump(void)
|
||||
{
|
||||
local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
|
||||
reboot_code_buffer + relocate_new_kernel_size);
|
||||
reboot_code_buffer + KEXEC_RELOCATE_NEW_KERNEL_SIZE);
|
||||
|
||||
relocated_kexec_smp_wait(NULL);
|
||||
}
|
||||
@@ -200,7 +304,7 @@ void kexec_reboot(void)
|
||||
* machine_kexec() CPU.
|
||||
*/
|
||||
local_flush_icache_range(reboot_code_buffer,
|
||||
reboot_code_buffer + relocate_new_kernel_size);
|
||||
reboot_code_buffer + KEXEC_RELOCATE_NEW_KERNEL_SIZE);
|
||||
|
||||
do_kexec = (void *)reboot_code_buffer;
|
||||
do_kexec();
|
||||
@@ -213,10 +317,12 @@ machine_kexec(struct kimage *image)
|
||||
unsigned long *ptr;
|
||||
|
||||
reboot_code_buffer =
|
||||
(unsigned long)page_address(image->control_code_page);
|
||||
(unsigned long)page_address(image->control_code_page);
|
||||
pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
|
||||
|
||||
kexec_start_address =
|
||||
(unsigned long) phys_to_virt(image->start);
|
||||
pr_info("kexec_start_address = %p\n", (void *)kexec_start_address);
|
||||
|
||||
if (image->type == KEXEC_TYPE_DEFAULT) {
|
||||
kexec_indirection_page =
|
||||
@@ -224,9 +330,19 @@ machine_kexec(struct kimage *image)
|
||||
} else {
|
||||
kexec_indirection_page = (unsigned long)&image->head;
|
||||
}
|
||||
pr_info("kexec_indirection_page = %p\n", (void *)kexec_indirection_page);
|
||||
|
||||
memcpy((void*)reboot_code_buffer, relocate_new_kernel,
|
||||
relocate_new_kernel_size);
|
||||
pr_info("Where is memcpy: %p\n", memcpy);
|
||||
pr_info("kexec_relocate_new_kernel = %p, kexec_relocate_new_kernel_end = %p\n",
|
||||
(void *)kexec_relocate_new_kernel, &kexec_relocate_new_kernel_end);
|
||||
pr_info("Copy %lu bytes from %p to %p\n", KEXEC_RELOCATE_NEW_KERNEL_SIZE,
|
||||
(void *)kexec_relocate_new_kernel, (void *)reboot_code_buffer);
|
||||
memcpy((void*)reboot_code_buffer, kexec_relocate_new_kernel,
|
||||
KEXEC_RELOCATE_NEW_KERNEL_SIZE);
|
||||
|
||||
pr_info("Before _print_args().\n");
|
||||
machine_kexec_print_args();
|
||||
pr_info("Before eval loop.\n");
|
||||
|
||||
/*
|
||||
* The generic kexec code builds a page list with physical
|
||||
@@ -257,7 +373,7 @@ machine_kexec(struct kimage *image)
|
||||
#ifdef CONFIG_SMP
|
||||
/* All secondary cpus now may jump to kexec_wait cycle */
|
||||
relocated_kexec_smp_wait = reboot_code_buffer +
|
||||
(void *)(kexec_smp_wait - relocate_new_kernel);
|
||||
(void *)(kexec_smp_wait - kexec_relocate_new_kernel);
|
||||
smp_wmb();
|
||||
atomic_set(&kexec_ready_to_reboot, 1);
|
||||
#endif
|
||||
|
||||
20
arch/mips/kernel/machine_kexec.h
Normal file
20
arch/mips/kernel/machine_kexec.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef _MACHINE_KEXEC_H
|
||||
#define _MACHINE_KEXEC_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern const unsigned char kexec_relocate_new_kernel[];
|
||||
extern unsigned long kexec_relocate_new_kernel_end;
|
||||
extern unsigned long kexec_start_address;
|
||||
extern unsigned long kexec_indirection_page;
|
||||
|
||||
extern char kexec_argv_buf[];
|
||||
extern char *kexec_argv[];
|
||||
|
||||
#define KEXEC_RELOCATE_NEW_KERNEL_SIZE ((unsigned long)&kexec_relocate_new_kernel_end - (unsigned long)kexec_relocate_new_kernel)
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define KEXEC_COMMAND_LINE_SIZE 256
|
||||
#define KEXEC_ARGV_SIZE (KEXEC_COMMAND_LINE_SIZE / 16)
|
||||
#define KEXEC_MAX_ARGC (KEXEC_ARGV_SIZE / sizeof(long))
|
||||
|
||||
#endif
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#include <linux/execmem.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/elf.h>
|
||||
@@ -19,6 +20,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/jump_label.h>
|
||||
|
||||
struct mips_hi16 {
|
||||
@@ -30,14 +32,254 @@ struct mips_hi16 {
|
||||
static LIST_HEAD(dbe_list);
|
||||
static DEFINE_SPINLOCK(dbe_lock);
|
||||
|
||||
/*
|
||||
* Get the potential max trampolines size required of the init and
|
||||
* non-init sections. Only used if we cannot find enough contiguous
|
||||
* physically mapped memory to put the module into.
|
||||
*/
|
||||
static unsigned int
|
||||
get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
||||
const char *secstrings, unsigned int symindex, bool is_init)
|
||||
{
|
||||
unsigned long ret = 0;
|
||||
unsigned int i, j;
|
||||
Elf_Sym *syms;
|
||||
|
||||
/* Everything marked ALLOC (this includes the exported symbols) */
|
||||
for (i = 1; i < hdr->e_shnum; ++i) {
|
||||
unsigned int info = sechdrs[i].sh_info;
|
||||
|
||||
if (sechdrs[i].sh_type != SHT_REL
|
||||
&& sechdrs[i].sh_type != SHT_RELA)
|
||||
continue;
|
||||
|
||||
/* Not a valid relocation section? */
|
||||
if (info >= hdr->e_shnum)
|
||||
continue;
|
||||
|
||||
/* Don't bother with non-allocated sections */
|
||||
if (!(sechdrs[info].sh_flags & SHF_ALLOC))
|
||||
continue;
|
||||
|
||||
/* If it's called *.init*, and we're not init, we're
|
||||
not interested */
|
||||
if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
|
||||
!= is_init)
|
||||
continue;
|
||||
|
||||
syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
|
||||
if (sechdrs[i].sh_type == SHT_REL) {
|
||||
Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
|
||||
unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
|
||||
|
||||
for (j = 0; j < size; ++j) {
|
||||
Elf_Sym *sym;
|
||||
|
||||
if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
|
||||
continue;
|
||||
|
||||
sym = syms + ELF_MIPS_R_SYM(rel[j]);
|
||||
if (!is_init && sym->st_shndx != SHN_UNDEF)
|
||||
continue;
|
||||
|
||||
ret += 4 * sizeof(int);
|
||||
}
|
||||
} else {
|
||||
Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
|
||||
unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
|
||||
|
||||
for (j = 0; j < size; ++j) {
|
||||
Elf_Sym *sym;
|
||||
|
||||
if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
|
||||
continue;
|
||||
|
||||
sym = syms + ELF_MIPS_R_SYM(rela[j]);
|
||||
if (!is_init && sym->st_shndx != SHN_UNDEF)
|
||||
continue;
|
||||
|
||||
ret += 4 * sizeof(int);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef MODULES_VADDR
|
||||
static void *alloc_phys(unsigned long size)
|
||||
{
|
||||
unsigned order;
|
||||
struct page *page;
|
||||
struct page *p;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
order = get_order(size);
|
||||
|
||||
page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
|
||||
__GFP_THISNODE, order);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
split_page(page, order);
|
||||
|
||||
/* mark all pages except for the last one */
|
||||
for (p = page; p + 1 < page + (size >> PAGE_SHIFT); ++p)
|
||||
set_bit(PG_owner_priv_1, &p->flags);
|
||||
|
||||
for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
|
||||
__free_page(p);
|
||||
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
static void free_phys(void *ptr)
|
||||
{
|
||||
struct page *page;
|
||||
bool free;
|
||||
|
||||
page = virt_to_page(ptr);
|
||||
do {
|
||||
free = test_and_clear_bit(PG_owner_priv_1, &page->flags);
|
||||
__free_page(page);
|
||||
page++;
|
||||
} while (free);
|
||||
}
|
||||
|
||||
void *arch_execmem_alloc(enum execmem_type type,
|
||||
size_t size)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
ptr = alloc_phys(size);
|
||||
|
||||
/* If we failed to allocate physically contiguous memory,
|
||||
* fall back to regular vmalloc. The module loader code will
|
||||
* create jump tables to handle long jumps */
|
||||
if (!ptr)
|
||||
return vmalloc(size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool is_phys_addr(void *ptr)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return (KSEGX((unsigned long)ptr) == CKSEG0);
|
||||
#else
|
||||
return (KSEGX(ptr) == KSEG0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef MODULES_VADDR
|
||||
/* Free memory returned from module_alloc */
|
||||
void arch_execmem_free(void *ptr)
|
||||
{
|
||||
if (is_phys_addr(ptr))
|
||||
free_phys(ptr);
|
||||
else
|
||||
vfree(ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void *__module_alloc(int size, bool phys)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
if (phys)
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
else
|
||||
ptr = vmalloc(size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void __module_free(void *ptr)
|
||||
{
|
||||
if (is_phys_addr(ptr))
|
||||
kfree(ptr);
|
||||
else
|
||||
vfree(ptr);
|
||||
}
|
||||
|
||||
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
|
||||
char *secstrings, struct module *mod)
|
||||
{
|
||||
unsigned int symindex = 0;
|
||||
unsigned int core_size, init_size;
|
||||
int i;
|
||||
|
||||
mod->arch.phys_plt_offset = 0;
|
||||
mod->arch.virt_plt_offset = 0;
|
||||
mod->arch.phys_plt_tbl = NULL;
|
||||
mod->arch.virt_plt_tbl = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_64BIT))
|
||||
return 0;
|
||||
|
||||
for (i = 1; i < hdr->e_shnum; i++)
|
||||
if (sechdrs[i].sh_type == SHT_SYMTAB)
|
||||
symindex = i;
|
||||
|
||||
core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
|
||||
init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
|
||||
|
||||
if ((core_size + init_size) == 0)
|
||||
return 0;
|
||||
|
||||
mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
|
||||
if (!mod->arch.phys_plt_tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
|
||||
if (!mod->arch.virt_plt_tbl) {
|
||||
__module_free(mod->arch.phys_plt_tbl);
|
||||
mod->arch.phys_plt_tbl = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v)
|
||||
{
|
||||
*location = base + v;
|
||||
}
|
||||
|
||||
static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
|
||||
void *start, Elf_Addr v)
|
||||
{
|
||||
unsigned *tramp = start + *plt_offset;
|
||||
*plt_offset += 4 * sizeof(int);
|
||||
|
||||
/* adjust carry for addiu */
|
||||
if (v & 0x00008000)
|
||||
v += 0x10000;
|
||||
|
||||
tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
|
||||
tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
|
||||
tramp[2] = 0x03200008; /* jr t9 */
|
||||
tramp[3] = 0x00000000; /* nop */
|
||||
|
||||
return (Elf_Addr) tramp;
|
||||
}
|
||||
|
||||
static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
|
||||
{
|
||||
if (is_phys_addr(location))
|
||||
return add_plt_entry_to(&me->arch.phys_plt_offset,
|
||||
me->arch.phys_plt_tbl, v);
|
||||
else
|
||||
return add_plt_entry_to(&me->arch.virt_plt_offset,
|
||||
me->arch.virt_plt_tbl, v);
|
||||
|
||||
}
|
||||
|
||||
static int apply_r_mips_26(struct module *me, u32 *location, u32 base,
|
||||
Elf_Addr v)
|
||||
{
|
||||
u32 ofs = base & 0x03ffffff;
|
||||
|
||||
if (v % 4) {
|
||||
pr_err("module %s: dangerous R_MIPS_26 relocation\n",
|
||||
me->name);
|
||||
@@ -45,13 +287,17 @@ static int apply_r_mips_26(struct module *me, u32 *location, u32 base,
|
||||
}
|
||||
|
||||
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
|
||||
pr_err("module %s: relocation overflow\n",
|
||||
me->name);
|
||||
return -ENOEXEC;
|
||||
v = add_plt_entry(me, location, v + (ofs << 2));
|
||||
if (!v) {
|
||||
pr_err("module %s: relocation overflow\n",
|
||||
me->name);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
ofs = 0;
|
||||
}
|
||||
|
||||
*location = (*location & ~0x03ffffff) |
|
||||
((base + (v >> 2)) & 0x03ffffff);
|
||||
((ofs + (v >> 2)) & 0x03ffffff);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -431,9 +677,36 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
list_add(&me->arch.dbe_list, &dbe_list);
|
||||
spin_unlock_irq(&dbe_lock);
|
||||
}
|
||||
|
||||
/* Get rid of the fixup trampoline if we're running the module
|
||||
* from physically mapped address space */
|
||||
if (me->arch.phys_plt_offset == 0) {
|
||||
__module_free(me->arch.phys_plt_tbl);
|
||||
me->arch.phys_plt_tbl = NULL;
|
||||
}
|
||||
if (me->arch.virt_plt_offset == 0) {
|
||||
__module_free(me->arch.virt_plt_tbl);
|
||||
me->arch.virt_plt_tbl = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void module_arch_freeing_init(struct module *mod)
|
||||
{
|
||||
if (mod->state == MODULE_STATE_LIVE)
|
||||
return;
|
||||
|
||||
if (mod->arch.phys_plt_tbl) {
|
||||
__module_free(mod->arch.phys_plt_tbl);
|
||||
mod->arch.phys_plt_tbl = NULL;
|
||||
}
|
||||
if (mod->arch.virt_plt_tbl) {
|
||||
__module_free(mod->arch.virt_plt_tbl);
|
||||
mod->arch.virt_plt_tbl = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
spin_lock_irq(&dbe_lock);
|
||||
|
||||
@@ -395,6 +395,8 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
|
||||
|
||||
if (ip->i_format.opcode == addiu_op ||
|
||||
ip->i_format.opcode == daddiu_op) {
|
||||
if (ip->i_format.simmediate > 0)
|
||||
return 0;
|
||||
*frame_size = -ip->i_format.simmediate;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -10,10 +10,11 @@
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include "machine_kexec.h"
|
||||
|
||||
#include <kernel-entry-init.h>
|
||||
|
||||
LEAF(relocate_new_kernel)
|
||||
LEAF(kexec_relocate_new_kernel)
|
||||
PTR_L a0, arg0
|
||||
PTR_L a1, arg1
|
||||
PTR_L a2, arg2
|
||||
@@ -97,7 +98,7 @@ done:
|
||||
#endif
|
||||
/* jump to kexec_start_address */
|
||||
j s1
|
||||
END(relocate_new_kernel)
|
||||
END(kexec_relocate_new_kernel)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
@@ -176,8 +177,15 @@ EXPORT(kexec_indirection_page)
|
||||
PTR_WD 0
|
||||
.size kexec_indirection_page, PTRSIZE
|
||||
|
||||
relocate_new_kernel_end:
|
||||
kexec_argv_buf:
|
||||
EXPORT(kexec_argv_buf)
|
||||
.skip KEXEC_COMMAND_LINE_SIZE
|
||||
.size kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE
|
||||
|
||||
EXPORT(relocate_new_kernel_size)
|
||||
PTR_WD relocate_new_kernel_end - relocate_new_kernel
|
||||
.size relocate_new_kernel_size, PTRSIZE
|
||||
kexec_argv:
|
||||
EXPORT(kexec_argv)
|
||||
.skip KEXEC_ARGV_SIZE
|
||||
.size kexec_argv, KEXEC_ARGV_SIZE
|
||||
|
||||
kexec_relocate_new_kernel_end:
|
||||
EXPORT(kexec_relocate_new_kernel_end)
|
||||
|
||||
@@ -86,21 +86,27 @@ static struct resource bss_resource = { .name = "Kernel bss", };
|
||||
unsigned long __kaslr_offset __ro_after_init;
|
||||
EXPORT_SYMBOL(__kaslr_offset);
|
||||
|
||||
static void *detect_magic __initdata = detect_memory_region;
|
||||
|
||||
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
|
||||
unsigned long ARCH_PFN_OFFSET;
|
||||
EXPORT_SYMBOL(ARCH_PFN_OFFSET);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
static u32 detect_magic __initdata;
|
||||
#define MIPS_MEM_TEST_PATTERN 0xaa5555aa
|
||||
|
||||
void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
|
||||
{
|
||||
void *dm = &detect_magic;
|
||||
void *dm = (void *)KSEG1ADDR(&detect_magic);
|
||||
phys_addr_t size;
|
||||
|
||||
for (size = sz_min; size < sz_max; size <<= 1) {
|
||||
if (!memcmp(dm, dm + size, sizeof(detect_magic)))
|
||||
break;
|
||||
__raw_writel(MIPS_MEM_TEST_PATTERN, dm);
|
||||
if (__raw_readl(dm) == __raw_readl(dm + size)) {
|
||||
__raw_writel(~MIPS_MEM_TEST_PATTERN, dm);
|
||||
if (__raw_readl(dm) == __raw_readl(dm + size))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
|
||||
@@ -111,6 +117,7 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add
|
||||
|
||||
memblock_add(start, size);
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
* Manage initrd
|
||||
|
||||
@@ -403,6 +403,7 @@ static inline void local_r4k___flush_cache_all(void * args)
|
||||
|
||||
default:
|
||||
r4k_blast_dcache();
|
||||
mb(); /* cache instructions may be reordered */
|
||||
r4k_blast_icache();
|
||||
break;
|
||||
}
|
||||
@@ -483,8 +484,10 @@ static inline void local_r4k_flush_cache_range(void * args)
|
||||
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
|
||||
r4k_blast_dcache();
|
||||
/* If executable, blast stale lines from icache */
|
||||
if (exec)
|
||||
if (exec) {
|
||||
mb(); /* cache instructions may be reordered */
|
||||
r4k_blast_icache();
|
||||
}
|
||||
}
|
||||
|
||||
static void r4k_flush_cache_range(struct vm_area_struct *vma,
|
||||
@@ -586,8 +589,13 @@ static inline void local_r4k_flush_cache_page(void *args)
|
||||
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
|
||||
vaddr ? r4k_blast_dcache_page(addr) :
|
||||
r4k_blast_dcache_user_page(addr);
|
||||
if (exec && !cpu_icache_snoops_remote_store)
|
||||
if (exec)
|
||||
mb(); /* cache instructions may be reordered */
|
||||
|
||||
if (exec && !cpu_icache_snoops_remote_store) {
|
||||
r4k_blast_scache_page(addr);
|
||||
mb(); /* cache instructions may be reordered */
|
||||
}
|
||||
}
|
||||
if (exec) {
|
||||
if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
|
||||
@@ -654,6 +662,7 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
|
||||
else
|
||||
blast_dcache_range(start, end);
|
||||
}
|
||||
mb(); /* cache instructions may be reordered */
|
||||
}
|
||||
|
||||
if (type == R4K_INDEX ||
|
||||
|
||||
@@ -254,7 +254,7 @@ config PPC
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE
|
||||
select HAVE_KERNEL_LZO if DEFAULT_UIMAGE
|
||||
select HAVE_KERNEL_XZ if PPC_BOOK3S || 44x
|
||||
select HAVE_KERNEL_XZ if PPC_BOOK3S || 44x || PPC_85xx
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KPROBES_ON_FTRACE
|
||||
select HAVE_KRETPROBES
|
||||
|
||||
@@ -209,6 +209,12 @@ config BLK_INLINE_ENCRYPTION_FALLBACK
|
||||
by falling back to the kernel crypto API when inline
|
||||
encryption hardware is not present.
|
||||
|
||||
config BLOCK_NOTIFIERS
|
||||
bool "Enable support for notifications in block layer"
|
||||
help
|
||||
Enable this option to provide notifiers for other subsystems
|
||||
upon addition or removal of block devices.
|
||||
|
||||
source "block/partitions/Kconfig"
|
||||
|
||||
config BLK_MQ_PCI
|
||||
|
||||
@@ -38,3 +38,4 @@ obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o \
|
||||
blk-crypto-sysfs.o
|
||||
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
|
||||
obj-$(CONFIG_BLOCK_HOLDER_DEPRECATED) += holder.o
|
||||
obj-$(CONFIG_BLOCK_NOTIFIERS) += blk-notify.o
|
||||
|
||||
107
block/blk-notify.c
Normal file
107
block/blk-notify.c
Normal file
@@ -0,0 +1,107 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Notifiers for addition and removal of block devices
|
||||
*
|
||||
* Copyright (c) 2024 Daniel Golle <daniel@makrotopia.org>
|
||||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "blk.h"
|
||||
|
||||
struct blk_device_list {
|
||||
struct device *dev;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static RAW_NOTIFIER_HEAD(blk_notifier_list);
|
||||
static DEFINE_MUTEX(blk_notifier_lock);
|
||||
static LIST_HEAD(blk_devices);
|
||||
|
||||
struct blk_notify_event {
|
||||
struct delayed_work work;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
void blk_register_notify(struct notifier_block *nb)
|
||||
{
|
||||
struct blk_device_list *existing_blkdev;
|
||||
|
||||
mutex_lock(&blk_notifier_lock);
|
||||
raw_notifier_chain_register(&blk_notifier_list, nb);
|
||||
|
||||
list_for_each_entry(existing_blkdev, &blk_devices, list)
|
||||
nb->notifier_call(nb, BLK_DEVICE_ADD, existing_blkdev->dev);
|
||||
|
||||
mutex_unlock(&blk_notifier_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_register_notify);
|
||||
|
||||
void blk_unregister_notify(struct notifier_block *nb)
|
||||
{
|
||||
mutex_lock(&blk_notifier_lock);
|
||||
raw_notifier_chain_unregister(&blk_notifier_list, nb);
|
||||
mutex_unlock(&blk_notifier_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_unregister_notify);
|
||||
|
||||
static void blk_notify_work(struct work_struct *work)
|
||||
{
|
||||
struct blk_notify_event *ev =
|
||||
container_of(work, struct blk_notify_event, work.work);
|
||||
|
||||
raw_notifier_call_chain(&blk_notifier_list, BLK_DEVICE_ADD, ev->dev);
|
||||
kfree(ev);
|
||||
}
|
||||
|
||||
static int blk_call_notifier_add(struct device *dev)
|
||||
{
|
||||
struct blk_device_list *new_blkdev;
|
||||
struct blk_notify_event *ev;
|
||||
|
||||
new_blkdev = kmalloc(sizeof (*new_blkdev), GFP_KERNEL);
|
||||
if (!new_blkdev)
|
||||
return -ENOMEM;
|
||||
|
||||
ev = kmalloc(sizeof(*ev), GFP_KERNEL);
|
||||
INIT_DEFERRABLE_WORK(&ev->work, blk_notify_work);
|
||||
ev->dev = dev;
|
||||
new_blkdev->dev = dev;
|
||||
mutex_lock(&blk_notifier_lock);
|
||||
list_add_tail(&new_blkdev->list, &blk_devices);
|
||||
schedule_delayed_work(&ev->work, msecs_to_jiffies(500));
|
||||
mutex_unlock(&blk_notifier_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void blk_call_notifier_remove(struct device *dev)
|
||||
{
|
||||
struct blk_device_list *old_blkdev, *tmp;
|
||||
|
||||
mutex_lock(&blk_notifier_lock);
|
||||
list_for_each_entry_safe(old_blkdev, tmp, &blk_devices, list) {
|
||||
if (old_blkdev->dev != dev)
|
||||
continue;
|
||||
|
||||
list_del(&old_blkdev->list);
|
||||
kfree(old_blkdev);
|
||||
}
|
||||
raw_notifier_call_chain(&blk_notifier_list, BLK_DEVICE_REMOVE, dev);
|
||||
mutex_unlock(&blk_notifier_lock);
|
||||
}
|
||||
|
||||
static struct class_interface blk_notifications_bus_interface __refdata = {
|
||||
.class = &block_class,
|
||||
.add_dev = &blk_call_notifier_add,
|
||||
.remove_dev = &blk_call_notifier_remove,
|
||||
};
|
||||
|
||||
static int __init blk_notifications_init(void)
|
||||
{
|
||||
return class_interface_register(&blk_notifications_bus_interface);
|
||||
}
|
||||
device_initcall(blk_notifications_init);
|
||||
@@ -556,6 +556,7 @@ void blk_free_ext_minor(unsigned int minor);
|
||||
#define ADDPART_FLAG_NONE 0
|
||||
#define ADDPART_FLAG_RAID 1
|
||||
#define ADDPART_FLAG_WHOLEDISK 2
|
||||
#define ADDPART_FLAG_READONLY 4
|
||||
int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
|
||||
sector_t length);
|
||||
int bdev_del_partition(struct gendisk *disk, int partno);
|
||||
|
||||
@@ -383,16 +383,18 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
|
||||
}
|
||||
|
||||
/**
|
||||
* device_add_disk - add disk information to kernel list
|
||||
* add_disk_fwnode - add disk information to kernel list with fwnode
|
||||
* @parent: parent device for the disk
|
||||
* @disk: per-device partitioning information
|
||||
* @groups: Additional per-device sysfs groups
|
||||
* @fwnode: attached disk fwnode
|
||||
*
|
||||
* This function registers the partitioning information in @disk
|
||||
* with the kernel.
|
||||
* with the kernel. Also attach a fwnode to the disk device.
|
||||
*/
|
||||
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups)
|
||||
int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups,
|
||||
struct fwnode_handle *fwnode)
|
||||
|
||||
{
|
||||
struct device *ddev = disk_to_dev(disk);
|
||||
@@ -452,6 +454,8 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
ddev->parent = parent;
|
||||
ddev->groups = groups;
|
||||
dev_set_name(ddev, "%s", disk->disk_name);
|
||||
if (fwnode)
|
||||
device_set_node(ddev, fwnode);
|
||||
if (!(disk->flags & GENHD_FL_HIDDEN))
|
||||
ddev->devt = MKDEV(disk->major, disk->first_minor);
|
||||
ret = device_add(ddev);
|
||||
@@ -553,6 +557,22 @@ out_exit_elevator:
|
||||
elevator_exit(disk->queue);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(add_disk_fwnode);
|
||||
|
||||
/**
|
||||
* device_add_disk - add disk information to kernel list
|
||||
* @parent: parent device for the disk
|
||||
* @disk: per-device partitioning information
|
||||
* @groups: Additional per-device sysfs groups
|
||||
*
|
||||
* This function registers the partitioning information in @disk
|
||||
* with the kernel.
|
||||
*/
|
||||
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
return add_disk_fwnode(parent, disk, groups, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(device_add_disk);
|
||||
|
||||
static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
|
||||
|
||||
@@ -270,4 +270,13 @@ config CMDLINE_PARTITION
|
||||
Say Y here if you want to read the partition table from bootargs.
|
||||
The format for the command line is just like mtdparts.
|
||||
|
||||
config OF_PARTITION
|
||||
bool "Device Tree partition support" if PARTITION_ADVANCED
|
||||
depends on OF
|
||||
help
|
||||
Say Y here if you want to enable support for partition table
|
||||
defined in Device Tree. (mainly for eMMC)
|
||||
The format for the device tree node is just like MTD fixed-partition
|
||||
schema.
|
||||
|
||||
endmenu
|
||||
|
||||
@@ -12,6 +12,7 @@ obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o
|
||||
obj-$(CONFIG_MAC_PARTITION) += mac.o
|
||||
obj-$(CONFIG_LDM_PARTITION) += ldm.o
|
||||
obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
|
||||
obj-$(CONFIG_OF_PARTITION) += of.o
|
||||
obj-$(CONFIG_OSF_PARTITION) += osf.o
|
||||
obj-$(CONFIG_SGI_PARTITION) += sgi.o
|
||||
obj-$(CONFIG_SUN_PARTITION) += sun.o
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/of.h>
|
||||
#include "../blk.h"
|
||||
|
||||
/*
|
||||
@@ -16,6 +17,9 @@ struct parsed_partitions {
|
||||
int flags;
|
||||
bool has_info;
|
||||
struct partition_meta_info info;
|
||||
#ifdef CONFIG_OF
|
||||
struct device_node *np;
|
||||
#endif
|
||||
} *parts;
|
||||
int next;
|
||||
int limit;
|
||||
@@ -34,18 +38,28 @@ static inline void put_dev_sector(Sector p)
|
||||
}
|
||||
|
||||
static inline void
|
||||
put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size)
|
||||
of_put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size,
|
||||
struct device_node *np)
|
||||
{
|
||||
if (n < p->limit) {
|
||||
char tmp[1 + BDEVNAME_SIZE + 10 + 1];
|
||||
|
||||
p->parts[n].from = from;
|
||||
p->parts[n].size = size;
|
||||
#ifdef CONFIG_OF
|
||||
p->parts[n].np = np;
|
||||
#endif
|
||||
snprintf(tmp, sizeof(tmp), " %s%d", p->name, n);
|
||||
strlcat(p->pp_buf, tmp, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size)
|
||||
{
|
||||
of_put_partition(p, n, from, size, NULL);
|
||||
}
|
||||
|
||||
/* detection routines go here in alphabetical order: */
|
||||
int adfspart_check_ADFS(struct parsed_partitions *state);
|
||||
int adfspart_check_CUMANA(struct parsed_partitions *state);
|
||||
@@ -62,6 +76,7 @@ int karma_partition(struct parsed_partitions *state);
|
||||
int ldm_partition(struct parsed_partitions *state);
|
||||
int mac_partition(struct parsed_partitions *state);
|
||||
int msdos_partition(struct parsed_partitions *state);
|
||||
int of_partition(struct parsed_partitions *state);
|
||||
int osf_partition(struct parsed_partitions *state);
|
||||
int sgi_partition(struct parsed_partitions *state);
|
||||
int sun_partition(struct parsed_partitions *state);
|
||||
|
||||
@@ -237,6 +237,9 @@ static int add_part(int slot, struct cmdline_subpart *subpart,
|
||||
put_partition(state, slot, subpart->from >> 9,
|
||||
subpart->size >> 9);
|
||||
|
||||
if (subpart->flags & PF_RDONLY)
|
||||
state->parts[slot].flags |= ADDPART_FLAG_READONLY;
|
||||
|
||||
info = &state->parts[slot].info;
|
||||
|
||||
strscpy(info->volname, subpart->name, sizeof(info->volname));
|
||||
|
||||
@@ -9,7 +9,10 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/raid/detect.h>
|
||||
#include <linux/property.h>
|
||||
|
||||
#include "check.h"
|
||||
|
||||
static int (*const check_part[])(struct parsed_partitions *) = {
|
||||
@@ -43,6 +46,9 @@ static int (*const check_part[])(struct parsed_partitions *) = {
|
||||
#ifdef CONFIG_CMDLINE_PARTITION
|
||||
cmdline_partition,
|
||||
#endif
|
||||
#ifdef CONFIG_OF_PARTITION
|
||||
of_partition, /* cmdline have priority to OF */
|
||||
#endif
|
||||
#ifdef CONFIG_EFI_PARTITION
|
||||
efi_partition, /* this must come before msdos */
|
||||
#endif
|
||||
@@ -281,13 +287,82 @@ static ssize_t whole_disk_show(struct device *dev,
|
||||
}
|
||||
static const DEVICE_ATTR(whole_disk, 0444, whole_disk_show, NULL);
|
||||
|
||||
static bool part_meta_match(const char *attr, const char *member, size_t length)
|
||||
{
|
||||
/* check if length of attr exceeds specified maximum length */
|
||||
if (strnlen(attr, length) == length)
|
||||
return false;
|
||||
|
||||
/* return true if strings match */
|
||||
return !strncmp(attr, member, length);
|
||||
}
|
||||
|
||||
static struct fwnode_handle *find_partition_fwnode(struct block_device *bdev)
|
||||
{
|
||||
struct fwnode_handle *fw_parts, *fw_part;
|
||||
struct device *ddev = disk_to_dev(bdev->bd_disk);
|
||||
const char *partname, *uuid;
|
||||
u32 partno;
|
||||
bool got_uuid, got_partname, got_partno;
|
||||
|
||||
fw_parts = device_get_named_child_node(ddev, "partitions");
|
||||
if (!fw_parts)
|
||||
return NULL;
|
||||
|
||||
fwnode_for_each_child_node(fw_parts, fw_part) {
|
||||
got_uuid = false;
|
||||
got_partname = false;
|
||||
got_partno = false;
|
||||
/*
|
||||
* In case 'uuid' is defined in the partitions firmware node
|
||||
* require partition meta info being present and the specified
|
||||
* uuid to match.
|
||||
*/
|
||||
got_uuid = !fwnode_property_read_string(fw_part, "uuid", &uuid);
|
||||
if (got_uuid && (!bdev->bd_meta_info ||
|
||||
!part_meta_match(uuid, bdev->bd_meta_info->uuid,
|
||||
PARTITION_META_INFO_UUIDLTH)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* In case 'partname' is defined in the partitions firmware node
|
||||
* require partition meta info being present and the specified
|
||||
* volname to match.
|
||||
*/
|
||||
got_partname = !fwnode_property_read_string(fw_part, "partname",
|
||||
&partname);
|
||||
if (got_partname && (!bdev->bd_meta_info ||
|
||||
!part_meta_match(partname,
|
||||
bdev->bd_meta_info->volname,
|
||||
PARTITION_META_INFO_VOLNAMELTH)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* In case 'partno' is defined in the partitions firmware node
|
||||
* the specified partno needs to match.
|
||||
*/
|
||||
got_partno = !fwnode_property_read_u32(fw_part, "partno", &partno);
|
||||
if (got_partno && bdev_partno(bdev) != partno)
|
||||
continue;
|
||||
|
||||
/* Skip if no matching criteria is present in firmware node */
|
||||
if (!got_uuid && !got_partname && !got_partno)
|
||||
continue;
|
||||
|
||||
return fw_part;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called either with open_mutex held, before a disk can be opened or
|
||||
* after all disk users are gone.
|
||||
*/
|
||||
static struct block_device *add_partition(struct gendisk *disk, int partno,
|
||||
sector_t start, sector_t len, int flags,
|
||||
struct partition_meta_info *info)
|
||||
struct partition_meta_info *info,
|
||||
struct device_node *np)
|
||||
{
|
||||
dev_t devt = MKDEV(0, 0);
|
||||
struct device *ddev = disk_to_dev(disk);
|
||||
@@ -336,6 +411,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
|
||||
pdev->class = &block_class;
|
||||
pdev->type = &part_type;
|
||||
pdev->parent = ddev;
|
||||
device_set_node(pdev, of_fwnode_handle(np));
|
||||
|
||||
/* in consecutive minor range? */
|
||||
if (bdev_partno(bdev) < disk->minors) {
|
||||
@@ -355,6 +431,9 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
if (!pdev->fwnode && !pdev->of_node)
|
||||
device_set_node(pdev, find_partition_fwnode(bdev));
|
||||
|
||||
/* delay uevent until 'holders' subdir is created */
|
||||
dev_set_uevent_suppress(pdev, 1);
|
||||
err = device_add(pdev);
|
||||
@@ -373,6 +452,9 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
|
||||
goto out_del;
|
||||
}
|
||||
|
||||
if (flags & ADDPART_FLAG_READONLY)
|
||||
bdev_set_flag(bdev, BD_READ_ONLY);
|
||||
|
||||
/* everything is up and running, commence */
|
||||
err = xa_insert(&disk->part_tbl, partno, bdev, GFP_KERNEL);
|
||||
if (err)
|
||||
@@ -439,7 +521,7 @@ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
|
||||
}
|
||||
|
||||
part = add_partition(disk, partno, start, length,
|
||||
ADDPART_FLAG_NONE, NULL);
|
||||
ADDPART_FLAG_NONE, NULL, NULL);
|
||||
ret = PTR_ERR_OR_ZERO(part);
|
||||
out:
|
||||
mutex_unlock(&disk->open_mutex);
|
||||
@@ -553,8 +635,13 @@ static bool blk_add_partition(struct gendisk *disk,
|
||||
size = get_capacity(disk) - from;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
part = add_partition(disk, p, from, size, state->parts[p].flags,
|
||||
&state->parts[p].info);
|
||||
&state->parts[p].info, state->parts[p].np);
|
||||
#else
|
||||
part = add_partition(disk, p, from, size, state->parts[p].flags,
|
||||
&state->parts[p].info, NULL);
|
||||
#endif
|
||||
if (IS_ERR(part)) {
|
||||
if (PTR_ERR(part) != -ENXIO) {
|
||||
printk(KERN_ERR " %s: p%d could not be added: %pe\n",
|
||||
|
||||
@@ -86,6 +86,7 @@
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include "check.h"
|
||||
#include "efi.h"
|
||||
@@ -95,15 +96,13 @@
|
||||
* the partition tables happens after init too.
|
||||
*/
|
||||
static int force_gpt;
|
||||
static int __init
|
||||
force_gpt_fn(char *str)
|
||||
static int __init force_gpt_fn(char *str)
|
||||
{
|
||||
force_gpt = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("gpt", force_gpt_fn);
|
||||
|
||||
|
||||
/**
|
||||
* efi_crc32() - EFI version of crc32 function
|
||||
* @buf: buffer to calculate crc32 of
|
||||
@@ -116,8 +115,7 @@ __setup("gpt", force_gpt_fn);
|
||||
* Note, the EFI Specification, v1.02, has a reference to
|
||||
* Dr. Dobbs Journal, May 1994 (actually it's in May 1992).
|
||||
*/
|
||||
static inline u32
|
||||
efi_crc32(const void *buf, unsigned long len)
|
||||
static inline u32 efi_crc32(const void *buf, unsigned long len)
|
||||
{
|
||||
return (crc32(~0L, buf, len) ^ ~0L);
|
||||
}
|
||||
@@ -134,7 +132,8 @@ efi_crc32(const void *buf, unsigned long len)
|
||||
static u64 last_lba(struct gendisk *disk)
|
||||
{
|
||||
return div_u64(bdev_nr_bytes(disk->part0),
|
||||
queue_logical_block_size(disk->queue)) - 1ULL;
|
||||
queue_logical_block_size(disk->queue)) -
|
||||
1ULL;
|
||||
}
|
||||
|
||||
static inline int pmbr_part_valid(gpt_mbr_record *part)
|
||||
@@ -195,7 +194,7 @@ static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
|
||||
check_hybrid:
|
||||
for (i = 0; i < 4; i++)
|
||||
if ((mbr->partition_record[i].os_type !=
|
||||
EFI_PMBR_OSTYPE_EFI_GPT) &&
|
||||
EFI_PMBR_OSTYPE_EFI_GPT) &&
|
||||
(mbr->partition_record[i].os_type != 0x00))
|
||||
ret = GPT_MBR_HYBRID;
|
||||
|
||||
@@ -213,10 +212,11 @@ check_hybrid:
|
||||
*/
|
||||
if (ret == GPT_MBR_PROTECTIVE) {
|
||||
sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
|
||||
if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
|
||||
pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
|
||||
sz, min_t(uint32_t,
|
||||
total_sectors - 1, 0xFFFFFFFF));
|
||||
if (sz != (uint32_t)total_sectors - 1 && sz != 0xFFFFFFFF)
|
||||
pr_debug(
|
||||
"GPT: mbr size in lba (%u) different than whole disk (%u).\n",
|
||||
sz,
|
||||
min_t(uint32_t, total_sectors - 1, 0xFFFFFFFF));
|
||||
}
|
||||
done:
|
||||
return ret;
|
||||
@@ -232,15 +232,14 @@ done:
|
||||
* Description: Reads @count bytes from @state->disk into @buffer.
|
||||
* Returns number of bytes read on success, 0 on error.
|
||||
*/
|
||||
static size_t read_lba(struct parsed_partitions *state,
|
||||
u64 lba, u8 *buffer, size_t count)
|
||||
static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
|
||||
size_t count)
|
||||
{
|
||||
size_t totalreadcount = 0;
|
||||
sector_t n = lba *
|
||||
(queue_logical_block_size(state->disk->queue) / 512);
|
||||
sector_t n = lba * (queue_logical_block_size(state->disk->queue) / 512);
|
||||
|
||||
if (!buffer || lba > last_lba(state->disk))
|
||||
return 0;
|
||||
return 0;
|
||||
|
||||
while (count) {
|
||||
int copied = 512;
|
||||
@@ -253,7 +252,7 @@ static size_t read_lba(struct parsed_partitions *state,
|
||||
memcpy(buffer, data, copied);
|
||||
put_dev_sector(sect);
|
||||
buffer += copied;
|
||||
totalreadcount +=copied;
|
||||
totalreadcount += copied;
|
||||
count -= copied;
|
||||
}
|
||||
return totalreadcount;
|
||||
@@ -278,17 +277,17 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
|
||||
return NULL;
|
||||
|
||||
count = (size_t)le32_to_cpu(gpt->num_partition_entries) *
|
||||
le32_to_cpu(gpt->sizeof_partition_entry);
|
||||
le32_to_cpu(gpt->sizeof_partition_entry);
|
||||
if (!count)
|
||||
return NULL;
|
||||
pte = kmalloc(count, GFP_KERNEL);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
|
||||
if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
|
||||
(u8 *) pte, count) < count) {
|
||||
if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba), (u8 *)pte,
|
||||
count) < count) {
|
||||
kfree(pte);
|
||||
pte=NULL;
|
||||
pte = NULL;
|
||||
return NULL;
|
||||
}
|
||||
return pte;
|
||||
@@ -313,9 +312,9 @@ static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
|
||||
if (!gpt)
|
||||
return NULL;
|
||||
|
||||
if (read_lba(state, lba, (u8 *) gpt, ssz) < ssz) {
|
||||
if (read_lba(state, lba, (u8 *)gpt, ssz) < ssz) {
|
||||
kfree(gpt);
|
||||
gpt=NULL;
|
||||
gpt = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -354,8 +353,9 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
|
||||
|
||||
/* Check the GUID Partition Table header size is too big */
|
||||
if (le32_to_cpu((*gpt)->header_size) >
|
||||
queue_logical_block_size(state->disk->queue)) {
|
||||
pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
|
||||
queue_logical_block_size(state->disk->queue)) {
|
||||
pr_debug(
|
||||
"GUID Partition Table Header size is too large: %u > %u\n",
|
||||
le32_to_cpu((*gpt)->header_size),
|
||||
queue_logical_block_size(state->disk->queue));
|
||||
goto fail;
|
||||
@@ -363,16 +363,17 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
|
||||
|
||||
/* Check the GUID Partition Table header size is too small */
|
||||
if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) {
|
||||
pr_debug("GUID Partition Table Header size is too small: %u < %zu\n",
|
||||
le32_to_cpu((*gpt)->header_size),
|
||||
sizeof(gpt_header));
|
||||
pr_debug(
|
||||
"GUID Partition Table Header size is too small: %u < %zu\n",
|
||||
le32_to_cpu((*gpt)->header_size), sizeof(gpt_header));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Check the GUID Partition Table CRC */
|
||||
origcrc = le32_to_cpu((*gpt)->header_crc32);
|
||||
(*gpt)->header_crc32 = 0;
|
||||
crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size));
|
||||
crc = efi_crc32((const unsigned char *)(*gpt),
|
||||
le32_to_cpu((*gpt)->header_size));
|
||||
|
||||
if (crc != origcrc) {
|
||||
pr_debug("GUID Partition Table Header CRC is wrong: %x != %x\n",
|
||||
@@ -396,20 +397,25 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
|
||||
lastlba = last_lba(state->disk);
|
||||
if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
|
||||
pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
|
||||
(unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
|
||||
(unsigned long long)le64_to_cpu(
|
||||
(*gpt)->first_usable_lba),
|
||||
(unsigned long long)lastlba);
|
||||
goto fail;
|
||||
}
|
||||
if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) {
|
||||
pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
|
||||
(unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
|
||||
(unsigned long long)le64_to_cpu(
|
||||
(*gpt)->last_usable_lba),
|
||||
(unsigned long long)lastlba);
|
||||
goto fail;
|
||||
}
|
||||
if (le64_to_cpu((*gpt)->last_usable_lba) < le64_to_cpu((*gpt)->first_usable_lba)) {
|
||||
if (le64_to_cpu((*gpt)->last_usable_lba) <
|
||||
le64_to_cpu((*gpt)->first_usable_lba)) {
|
||||
pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
|
||||
(unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
|
||||
(unsigned long long)le64_to_cpu((*gpt)->first_usable_lba));
|
||||
(unsigned long long)le64_to_cpu(
|
||||
(*gpt)->last_usable_lba),
|
||||
(unsigned long long)le64_to_cpu(
|
||||
(*gpt)->first_usable_lba));
|
||||
goto fail;
|
||||
}
|
||||
/* Check that sizeof_partition_entry has the correct value */
|
||||
@@ -420,10 +426,11 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
|
||||
|
||||
/* Sanity check partition table size */
|
||||
pt_size = (u64)le32_to_cpu((*gpt)->num_partition_entries) *
|
||||
le32_to_cpu((*gpt)->sizeof_partition_entry);
|
||||
le32_to_cpu((*gpt)->sizeof_partition_entry);
|
||||
if (pt_size > KMALLOC_MAX_SIZE) {
|
||||
pr_debug("GUID Partition Table is too large: %llu > %lu bytes\n",
|
||||
(unsigned long long)pt_size, KMALLOC_MAX_SIZE);
|
||||
pr_debug(
|
||||
"GUID Partition Table is too large: %llu > %lu bytes\n",
|
||||
(unsigned long long)pt_size, KMALLOC_MAX_SIZE);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -431,7 +438,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
|
||||
goto fail;
|
||||
|
||||
/* Check the GUID Partition Entry Array CRC */
|
||||
crc = efi_crc32((const unsigned char *) (*ptes), pt_size);
|
||||
crc = efi_crc32((const unsigned char *)(*ptes), pt_size);
|
||||
|
||||
if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
|
||||
pr_debug("GUID Partition Entry Array CRC check failed.\n");
|
||||
@@ -441,10 +448,10 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
|
||||
/* We're done, all's well */
|
||||
return 1;
|
||||
|
||||
fail_ptes:
|
||||
fail_ptes:
|
||||
kfree(*ptes);
|
||||
*ptes = NULL;
|
||||
fail:
|
||||
fail:
|
||||
kfree(*gpt);
|
||||
*gpt = NULL;
|
||||
return 0;
|
||||
@@ -457,12 +464,11 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
|
||||
*
|
||||
* Description: returns 1 if valid, 0 on error.
|
||||
*/
|
||||
static inline int
|
||||
is_pte_valid(const gpt_entry *pte, const u64 lastlba)
|
||||
static inline int is_pte_valid(const gpt_entry *pte, const u64 lastlba)
|
||||
{
|
||||
if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) ||
|
||||
le64_to_cpu(pte->starting_lba) > lastlba ||
|
||||
le64_to_cpu(pte->ending_lba) > lastlba)
|
||||
le64_to_cpu(pte->starting_lba) > lastlba ||
|
||||
le64_to_cpu(pte->ending_lba) > lastlba)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
@@ -477,8 +483,7 @@ is_pte_valid(const gpt_entry *pte, const u64 lastlba)
|
||||
* and prints warnings on discrepancies.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
|
||||
static void compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
|
||||
{
|
||||
int error_found = 0;
|
||||
if (!pgpt || !agpt)
|
||||
@@ -486,31 +491,32 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
|
||||
if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
|
||||
pr_warn("GPT:Primary header LBA != Alt. header alternate_lba\n");
|
||||
pr_warn("GPT:%lld != %lld\n",
|
||||
(unsigned long long)le64_to_cpu(pgpt->my_lba),
|
||||
(unsigned long long)le64_to_cpu(agpt->alternate_lba));
|
||||
(unsigned long long)le64_to_cpu(pgpt->my_lba),
|
||||
(unsigned long long)le64_to_cpu(agpt->alternate_lba));
|
||||
error_found++;
|
||||
}
|
||||
if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
|
||||
pr_warn("GPT:Primary header alternate_lba != Alt. header my_lba\n");
|
||||
pr_warn("GPT:%lld != %lld\n",
|
||||
(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
|
||||
(unsigned long long)le64_to_cpu(agpt->my_lba));
|
||||
(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
|
||||
(unsigned long long)le64_to_cpu(agpt->my_lba));
|
||||
error_found++;
|
||||
}
|
||||
if (le64_to_cpu(pgpt->first_usable_lba) !=
|
||||
le64_to_cpu(agpt->first_usable_lba)) {
|
||||
le64_to_cpu(agpt->first_usable_lba)) {
|
||||
pr_warn("GPT:first_usable_lbas don't match.\n");
|
||||
pr_warn("GPT:%lld != %lld\n",
|
||||
(unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
|
||||
(unsigned long long)le64_to_cpu(agpt->first_usable_lba));
|
||||
(unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
|
||||
(unsigned long long)le64_to_cpu(
|
||||
agpt->first_usable_lba));
|
||||
error_found++;
|
||||
}
|
||||
if (le64_to_cpu(pgpt->last_usable_lba) !=
|
||||
le64_to_cpu(agpt->last_usable_lba)) {
|
||||
le64_to_cpu(agpt->last_usable_lba)) {
|
||||
pr_warn("GPT:last_usable_lbas don't match.\n");
|
||||
pr_warn("GPT:%lld != %lld\n",
|
||||
(unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
|
||||
(unsigned long long)le64_to_cpu(agpt->last_usable_lba));
|
||||
(unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
|
||||
(unsigned long long)le64_to_cpu(agpt->last_usable_lba));
|
||||
error_found++;
|
||||
}
|
||||
if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
|
||||
@@ -518,27 +524,27 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
|
||||
error_found++;
|
||||
}
|
||||
if (le32_to_cpu(pgpt->num_partition_entries) !=
|
||||
le32_to_cpu(agpt->num_partition_entries)) {
|
||||
le32_to_cpu(agpt->num_partition_entries)) {
|
||||
pr_warn("GPT:num_partition_entries don't match: "
|
||||
"0x%x != 0x%x\n",
|
||||
le32_to_cpu(pgpt->num_partition_entries),
|
||||
le32_to_cpu(agpt->num_partition_entries));
|
||||
"0x%x != 0x%x\n",
|
||||
le32_to_cpu(pgpt->num_partition_entries),
|
||||
le32_to_cpu(agpt->num_partition_entries));
|
||||
error_found++;
|
||||
}
|
||||
if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
|
||||
le32_to_cpu(agpt->sizeof_partition_entry)) {
|
||||
le32_to_cpu(agpt->sizeof_partition_entry)) {
|
||||
pr_warn("GPT:sizeof_partition_entry values don't match: "
|
||||
"0x%x != 0x%x\n",
|
||||
le32_to_cpu(pgpt->sizeof_partition_entry),
|
||||
le32_to_cpu(agpt->sizeof_partition_entry));
|
||||
"0x%x != 0x%x\n",
|
||||
le32_to_cpu(pgpt->sizeof_partition_entry),
|
||||
le32_to_cpu(agpt->sizeof_partition_entry));
|
||||
error_found++;
|
||||
}
|
||||
if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
|
||||
le32_to_cpu(agpt->partition_entry_array_crc32)) {
|
||||
le32_to_cpu(agpt->partition_entry_array_crc32)) {
|
||||
pr_warn("GPT:partition_entry_array_crc32 values don't match: "
|
||||
"0x%x != 0x%x\n",
|
||||
le32_to_cpu(pgpt->partition_entry_array_crc32),
|
||||
le32_to_cpu(agpt->partition_entry_array_crc32));
|
||||
"0x%x != 0x%x\n",
|
||||
le32_to_cpu(pgpt->partition_entry_array_crc32),
|
||||
le32_to_cpu(agpt->partition_entry_array_crc32));
|
||||
error_found++;
|
||||
}
|
||||
if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
|
||||
@@ -594,7 +600,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
|
||||
return 0;
|
||||
|
||||
lastlba = last_lba(state->disk);
|
||||
if (!force_gpt) {
|
||||
if (!force_gpt) {
|
||||
/* This will be added to the EFI Spec. per Intel after v1.02. */
|
||||
legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
|
||||
if (!legacymbr)
|
||||
@@ -608,18 +614,17 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
|
||||
goto fail;
|
||||
|
||||
pr_debug("Device has a %s MBR\n",
|
||||
good_pmbr == GPT_MBR_PROTECTIVE ?
|
||||
"protective" : "hybrid");
|
||||
good_pmbr == GPT_MBR_PROTECTIVE ? "protective" :
|
||||
"hybrid");
|
||||
}
|
||||
|
||||
good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
|
||||
&pgpt, &pptes);
|
||||
if (good_pgpt)
|
||||
good_agpt = is_gpt_valid(state,
|
||||
le64_to_cpu(pgpt->alternate_lba),
|
||||
&agpt, &aptes);
|
||||
if (!good_agpt && force_gpt)
|
||||
good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
|
||||
good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA, &pgpt,
|
||||
&pptes);
|
||||
if (good_pgpt)
|
||||
good_agpt = is_gpt_valid(
|
||||
state, le64_to_cpu(pgpt->alternate_lba), &agpt, &aptes);
|
||||
if (!good_agpt && force_gpt)
|
||||
good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
|
||||
|
||||
if (!good_agpt && force_gpt && fops->alternative_gpt_sector) {
|
||||
sector_t agpt_sector;
|
||||
@@ -627,43 +632,42 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
|
||||
|
||||
err = fops->alternative_gpt_sector(disk, &agpt_sector);
|
||||
if (!err)
|
||||
good_agpt = is_gpt_valid(state, agpt_sector,
|
||||
&agpt, &aptes);
|
||||
good_agpt =
|
||||
is_gpt_valid(state, agpt_sector, &agpt, &aptes);
|
||||
}
|
||||
|
||||
/* The obviously unsuccessful case */
|
||||
if (!good_pgpt && !good_agpt)
|
||||
goto fail;
|
||||
/* The obviously unsuccessful case */
|
||||
if (!good_pgpt && !good_agpt)
|
||||
goto fail;
|
||||
|
||||
compare_gpts(pgpt, agpt, lastlba);
|
||||
compare_gpts(pgpt, agpt, lastlba);
|
||||
|
||||
/* The good cases */
|
||||
if (good_pgpt) {
|
||||
*gpt = pgpt;
|
||||
*ptes = pptes;
|
||||
kfree(agpt);
|
||||
kfree(aptes);
|
||||
/* The good cases */
|
||||
if (good_pgpt) {
|
||||
*gpt = pgpt;
|
||||
*ptes = pptes;
|
||||
kfree(agpt);
|
||||
kfree(aptes);
|
||||
if (!good_agpt)
|
||||
pr_warn("Alternate GPT is invalid, using primary GPT.\n");
|
||||
return 1;
|
||||
}
|
||||
else if (good_agpt) {
|
||||
*gpt = agpt;
|
||||
*ptes = aptes;
|
||||
kfree(pgpt);
|
||||
kfree(pptes);
|
||||
pr_warn("Alternate GPT is invalid, using primary GPT.\n");
|
||||
return 1;
|
||||
} else if (good_agpt) {
|
||||
*gpt = agpt;
|
||||
*ptes = aptes;
|
||||
kfree(pgpt);
|
||||
kfree(pptes);
|
||||
pr_warn("Primary GPT is invalid, using alternate GPT.\n");
|
||||
return 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
fail:
|
||||
kfree(pgpt);
|
||||
kfree(agpt);
|
||||
kfree(pptes);
|
||||
kfree(aptes);
|
||||
*gpt = NULL;
|
||||
*ptes = NULL;
|
||||
return 0;
|
||||
fail:
|
||||
kfree(pgpt);
|
||||
kfree(agpt);
|
||||
kfree(pptes);
|
||||
kfree(aptes);
|
||||
*gpt = NULL;
|
||||
*ptes = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -691,6 +695,34 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
|
||||
}
|
||||
}
|
||||
|
||||
static struct device_node *find_partition_of_node(struct device_node *partitions_np,
|
||||
gpt_entry *ptes)
|
||||
{
|
||||
char volname[64], partuuid[UUID_STRING_LEN + 1];
|
||||
const char *uuid, *name;
|
||||
|
||||
if (!partitions_np ||
|
||||
!of_device_is_compatible(partitions_np, "gpt-partitions"))
|
||||
return NULL;
|
||||
|
||||
efi_guid_to_str(&ptes->unique_partition_guid, partuuid);
|
||||
utf16_le_to_7bit(ptes->partition_name, ARRAY_SIZE(volname) - 1, volname);
|
||||
|
||||
for_each_available_child_of_node_scoped(partitions_np, np) {
|
||||
if (!of_property_read_string(np, "uuid", &uuid) &&
|
||||
strncmp(uuid, partuuid, ARRAY_SIZE(partuuid)))
|
||||
continue;
|
||||
|
||||
if (!of_property_read_string(np, "partname", &name) &&
|
||||
strncmp(name, volname, ARRAY_SIZE(volname)))
|
||||
continue;
|
||||
|
||||
return np;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* efi_partition - scan for GPT partitions
|
||||
* @state: disk parsed partitions
|
||||
@@ -716,6 +748,8 @@ int efi_partition(struct parsed_partitions *state)
|
||||
gpt_entry *ptes = NULL;
|
||||
u32 i;
|
||||
unsigned ssz = queue_logical_block_size(state->disk->queue) / 512;
|
||||
struct device *ddev = disk_to_dev(state->disk);
|
||||
struct device_node *partitions_np = of_node_get(ddev->of_node);
|
||||
|
||||
if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
|
||||
kfree(gpt);
|
||||
@@ -725,7 +759,9 @@ int efi_partition(struct parsed_partitions *state)
|
||||
|
||||
pr_debug("GUID Partition Table is valid! Yea!\n");
|
||||
|
||||
for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
|
||||
for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) &&
|
||||
i < state->limit - 1;
|
||||
i++) {
|
||||
struct partition_meta_info *info;
|
||||
unsigned label_max;
|
||||
u64 start = le64_to_cpu(ptes[i].starting_lba);
|
||||
@@ -735,10 +771,12 @@ int efi_partition(struct parsed_partitions *state)
|
||||
if (!is_pte_valid(&ptes[i], last_lba(state->disk)))
|
||||
continue;
|
||||
|
||||
put_partition(state, i+1, start * ssz, size * ssz);
|
||||
of_put_partition(state, i + 1, start * ssz, size * ssz,
|
||||
find_partition_of_node(partitions_np, &ptes[i]));
|
||||
|
||||
/* If this is a RAID volume, tell md */
|
||||
if (!efi_guidcmp(ptes[i].partition_type_guid, PARTITION_LINUX_RAID_GUID))
|
||||
if (!efi_guidcmp(ptes[i].partition_type_guid,
|
||||
PARTITION_LINUX_RAID_GUID))
|
||||
state->parts[i + 1].flags = ADDPART_FLAG_RAID;
|
||||
|
||||
info = &state->parts[i + 1].info;
|
||||
@@ -747,7 +785,8 @@ int efi_partition(struct parsed_partitions *state)
|
||||
/* Naively convert UTF16-LE to 7 bits. */
|
||||
label_max = min(ARRAY_SIZE(info->volname) - 1,
|
||||
ARRAY_SIZE(ptes[i].partition_name));
|
||||
utf16_le_to_7bit(ptes[i].partition_name, label_max, info->volname);
|
||||
utf16_le_to_7bit(ptes[i].partition_name, label_max,
|
||||
info->volname);
|
||||
state->parts[i + 1].has_info = true;
|
||||
}
|
||||
kfree(ptes);
|
||||
|
||||
110
block/partitions/of.c
Normal file
110
block/partitions/of.c
Normal file
@@ -0,0 +1,110 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/string.h>
|
||||
#include "check.h"
|
||||
|
||||
static int validate_of_partition(struct device_node *np, int slot)
|
||||
{
|
||||
u64 offset, size;
|
||||
int len;
|
||||
|
||||
const __be32 *reg = of_get_property(np, "reg", &len);
|
||||
int a_cells = of_n_addr_cells(np);
|
||||
int s_cells = of_n_size_cells(np);
|
||||
|
||||
/* Make sure reg len match the expected addr and size cells */
|
||||
if (len / sizeof(*reg) != a_cells + s_cells)
|
||||
return -EINVAL;
|
||||
|
||||
/* Validate offset conversion from bytes to sectors */
|
||||
offset = of_read_number(reg, a_cells);
|
||||
if (offset % SECTOR_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
/* Validate size conversion from bytes to sectors */
|
||||
size = of_read_number(reg + a_cells, s_cells);
|
||||
if (!size || size % SECTOR_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void add_of_partition(struct parsed_partitions *state, int slot,
|
||||
struct device_node *np)
|
||||
{
|
||||
struct partition_meta_info *info;
|
||||
char tmp[sizeof(info->volname) + 4];
|
||||
const char *partname;
|
||||
int len;
|
||||
|
||||
const __be32 *reg = of_get_property(np, "reg", &len);
|
||||
int a_cells = of_n_addr_cells(np);
|
||||
int s_cells = of_n_size_cells(np);
|
||||
|
||||
/* Convert bytes to sector size */
|
||||
u64 offset = of_read_number(reg, a_cells) / SECTOR_SIZE;
|
||||
u64 size = of_read_number(reg + a_cells, s_cells) / SECTOR_SIZE;
|
||||
|
||||
of_put_partition(state, slot, offset, size, np);
|
||||
|
||||
if (of_property_read_bool(np, "read-only"))
|
||||
state->parts[slot].flags |= ADDPART_FLAG_READONLY;
|
||||
|
||||
/*
|
||||
* Follow MTD label logic, search for label property,
|
||||
* fallback to node name if not found.
|
||||
*/
|
||||
info = &state->parts[slot].info;
|
||||
partname = of_get_property(np, "label", &len);
|
||||
if (!partname)
|
||||
partname = of_get_property(np, "name", &len);
|
||||
strscpy(info->volname, partname, sizeof(info->volname));
|
||||
|
||||
snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
|
||||
strlcat(state->pp_buf, tmp, PAGE_SIZE);
|
||||
}
|
||||
|
||||
int of_partition(struct parsed_partitions *state)
|
||||
{
|
||||
struct device *ddev = disk_to_dev(state->disk);
|
||||
struct device_node *np;
|
||||
int slot;
|
||||
|
||||
struct device_node *partitions_np = of_node_get(ddev->of_node);
|
||||
|
||||
if (!partitions_np ||
|
||||
!of_device_is_compatible(partitions_np, "fixed-partitions"))
|
||||
return 0;
|
||||
|
||||
slot = 1;
|
||||
/* Validate parition offset and size */
|
||||
for_each_child_of_node(partitions_np, np) {
|
||||
if (validate_of_partition(np, slot)) {
|
||||
of_node_put(np);
|
||||
of_node_put(partitions_np);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
slot++;
|
||||
}
|
||||
|
||||
slot = 1;
|
||||
for_each_child_of_node(partitions_np, np) {
|
||||
if (slot >= state->limit) {
|
||||
of_node_put(np);
|
||||
break;
|
||||
}
|
||||
|
||||
add_of_partition(state, slot, np);
|
||||
|
||||
slot++;
|
||||
}
|
||||
|
||||
strlcat(state->pp_buf, "\n", PAGE_SIZE);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -55,7 +55,7 @@ config CRYPTO_FIPS_VERSION
|
||||
By default the KERNELRELEASE value is used.
|
||||
|
||||
config CRYPTO_ALGAPI
|
||||
tristate
|
||||
tristate "ALGAPI"
|
||||
select CRYPTO_ALGAPI2
|
||||
help
|
||||
This option provides the API for cryptographic algorithms.
|
||||
@@ -64,7 +64,7 @@ config CRYPTO_ALGAPI2
|
||||
tristate
|
||||
|
||||
config CRYPTO_AEAD
|
||||
tristate
|
||||
tristate "AEAD"
|
||||
select CRYPTO_AEAD2
|
||||
select CRYPTO_ALGAPI
|
||||
|
||||
@@ -82,7 +82,7 @@ config CRYPTO_SIG2
|
||||
select CRYPTO_ALGAPI2
|
||||
|
||||
config CRYPTO_SKCIPHER
|
||||
tristate
|
||||
tristate "SKCIPHER"
|
||||
select CRYPTO_SKCIPHER2
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_ECB
|
||||
@@ -92,7 +92,7 @@ config CRYPTO_SKCIPHER2
|
||||
select CRYPTO_ALGAPI2
|
||||
|
||||
config CRYPTO_HASH
|
||||
tristate
|
||||
tristate "HASH"
|
||||
select CRYPTO_HASH2
|
||||
select CRYPTO_ALGAPI
|
||||
|
||||
@@ -101,7 +101,7 @@ config CRYPTO_HASH2
|
||||
select CRYPTO_ALGAPI2
|
||||
|
||||
config CRYPTO_RNG
|
||||
tristate
|
||||
tristate "RNG"
|
||||
select CRYPTO_RNG2
|
||||
select CRYPTO_ALGAPI
|
||||
|
||||
@@ -149,15 +149,15 @@ config CRYPTO_MANAGER
|
||||
cbc(aes).
|
||||
|
||||
config CRYPTO_MANAGER2
|
||||
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
|
||||
select CRYPTO_ACOMP2
|
||||
select CRYPTO_AEAD2
|
||||
select CRYPTO_AKCIPHER2
|
||||
select CRYPTO_SIG2
|
||||
select CRYPTO_HASH2
|
||||
select CRYPTO_KPP2
|
||||
select CRYPTO_RNG2
|
||||
select CRYPTO_SKCIPHER2
|
||||
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y && !CRYPTO_MANAGER_DISABLE_TESTS)
|
||||
select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
select CRYPTO_AEAD2 if !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
select CRYPTO_SIG2 if !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
select CRYPTO_HASH2 if !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
select CRYPTO_RNG2 if !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
select CRYPTO_SKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
|
||||
config CRYPTO_USER
|
||||
tristate "Userspace cryptographic algorithm configuration"
|
||||
|
||||
@@ -203,6 +203,10 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
|
||||
memcpy(param->alg, alg->cra_name, sizeof(param->alg));
|
||||
param->type = alg->cra_flags;
|
||||
|
||||
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
|
||||
param->type |= CRYPTO_ALG_TESTED;
|
||||
#endif
|
||||
|
||||
thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
|
||||
if (IS_ERR(thread))
|
||||
goto err_free_param;
|
||||
|
||||
@@ -67,6 +67,22 @@ config ATA_FORCE
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARCH_WANT_LIBATA_LEDS
|
||||
bool
|
||||
|
||||
config ATA_LEDS
|
||||
bool "support ATA port LED triggers"
|
||||
depends on ARCH_WANT_LIBATA_LEDS
|
||||
select NEW_LEDS
|
||||
select LEDS_CLASS
|
||||
select LEDS_TRIGGERS
|
||||
default y
|
||||
help
|
||||
This option adds a LED trigger for each registered ATA port.
|
||||
It is used to drive disk activity leds connected via GPIO.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config ATA_ACPI
|
||||
bool "ATA ACPI Support"
|
||||
depends on ACPI
|
||||
|
||||
@@ -703,6 +703,17 @@ static inline void ata_set_tf_cdl(struct ata_queued_cmd *qc, int cdl)
|
||||
qc->flags |= ATA_QCFLAG_HAS_CDL | ATA_QCFLAG_RESULT_TF;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ATA_LEDS
|
||||
#define LIBATA_BLINK_DELAY 20 /* ms */
|
||||
static inline void ata_led_act(struct ata_port *ap)
|
||||
{
|
||||
if (unlikely(!ap->ledtrig))
|
||||
return;
|
||||
|
||||
led_trigger_blink_oneshot(ap->ledtrig, LIBATA_BLINK_DELAY, LIBATA_BLINK_DELAY, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ata_build_rw_tf - Build ATA taskfile for given read/write request
|
||||
* @qc: Metadata associated with the taskfile to build
|
||||
@@ -4767,6 +4778,9 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
|
||||
link->active_tag = ATA_TAG_POISON;
|
||||
ap->nr_active_links--;
|
||||
}
|
||||
#ifdef CONFIG_ATA_LEDS
|
||||
ata_led_act(ap);
|
||||
#endif
|
||||
|
||||
/* clear exclusive status */
|
||||
if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
|
||||
@@ -5488,6 +5502,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
|
||||
#ifdef ATA_IRQ_TRAP
|
||||
ap->stats.unhandled_irq = 1;
|
||||
ap->stats.idle_irq = 1;
|
||||
#endif
|
||||
#ifdef CONFIG_ATA_LEDS
|
||||
ap->ledtrig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
|
||||
#endif
|
||||
ata_sff_port_init(ap);
|
||||
|
||||
@@ -5505,6 +5522,12 @@ void ata_port_free(struct ata_port *ap)
|
||||
kfree(ap->pmp_link);
|
||||
kfree(ap->slave_link);
|
||||
ida_free(&ata_ida, ap->print_id);
|
||||
#ifdef CONFIG_ATA_LEDS
|
||||
if (ap->ledtrig) {
|
||||
led_trigger_unregister(ap->ledtrig);
|
||||
kfree(ap->ledtrig);
|
||||
};
|
||||
#endif
|
||||
kfree(ap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_port_free);
|
||||
@@ -5909,7 +5932,23 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
#ifdef CONFIG_ATA_LEDS
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
if (unlikely(!host->ports[i]->ledtrig))
|
||||
continue;
|
||||
|
||||
snprintf(host->ports[i]->ledtrig_name,
|
||||
sizeof(host->ports[i]->ledtrig_name), "ata%u",
|
||||
host->ports[i]->print_id);
|
||||
|
||||
host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name;
|
||||
|
||||
if (led_trigger_register(host->ports[i]->ledtrig)) {
|
||||
kfree(host->ports[i]->ledtrig);
|
||||
host->ports[i]->ledtrig = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* Create associated sysfs transport objects */
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
rc = ata_tport_add(host->dev,host->ports[i]);
|
||||
|
||||
@@ -198,7 +198,7 @@ config SOC_BUS
|
||||
source "drivers/base/regmap/Kconfig"
|
||||
|
||||
config DMA_SHARED_BUFFER
|
||||
bool
|
||||
tristate
|
||||
default n
|
||||
select IRQ_WORK
|
||||
help
|
||||
|
||||
@@ -1653,7 +1653,7 @@ static void device_links_purge(struct device *dev)
|
||||
#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
|
||||
DL_FLAG_PM_RUNTIME)
|
||||
|
||||
static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
|
||||
static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
|
||||
static int __init fw_devlink_setup(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
# subsystems should select the appropriate symbols.
|
||||
|
||||
config REGMAP
|
||||
bool
|
||||
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
|
||||
tristate
|
||||
select IRQ_DOMAIN if REGMAP_IRQ
|
||||
select MDIO_BUS if REGMAP_MDIO
|
||||
help
|
||||
@@ -19,7 +18,7 @@ config REGMAP
|
||||
|
||||
config REGMAP_KUNIT
|
||||
tristate "KUnit tests for regmap"
|
||||
depends on KUNIT && REGMAP
|
||||
depends on KUNIT
|
||||
default KUNIT_ALL_TESTS
|
||||
select REGMAP_RAM
|
||||
|
||||
@@ -34,60 +33,76 @@ config REGMAP_BUILD
|
||||
normally enabled.
|
||||
|
||||
config REGMAP_AC97
|
||||
select REGMAP
|
||||
tristate
|
||||
|
||||
config REGMAP_I2C
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on I2C
|
||||
|
||||
config REGMAP_SLIMBUS
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on SLIMBUS
|
||||
|
||||
config REGMAP_SPI
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on SPI
|
||||
|
||||
config REGMAP_SPMI
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on SPMI
|
||||
|
||||
config REGMAP_W1
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on W1
|
||||
|
||||
config REGMAP_MDIO
|
||||
select REGMAP
|
||||
tristate
|
||||
|
||||
config REGMAP_MMIO
|
||||
select REGMAP
|
||||
tristate
|
||||
|
||||
config REGMAP_IRQ
|
||||
select REGMAP
|
||||
bool
|
||||
|
||||
config REGMAP_RAM
|
||||
select REGMAP
|
||||
tristate
|
||||
|
||||
config REGMAP_SOUNDWIRE
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on SOUNDWIRE
|
||||
|
||||
config REGMAP_SOUNDWIRE_MBQ
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on SOUNDWIRE
|
||||
|
||||
config REGMAP_SCCB
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on I2C
|
||||
|
||||
config REGMAP_I3C
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on I3C
|
||||
|
||||
config REGMAP_SPI_AVMM
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on SPI
|
||||
|
||||
config REGMAP_FSI
|
||||
select REGMAP
|
||||
tristate
|
||||
depends on FSI
|
||||
|
||||
@@ -2,9 +2,11 @@
|
||||
# For include/trace/define_trace.h to include trace.h
|
||||
CFLAGS_regmap.o := -I$(src)
|
||||
|
||||
obj-$(CONFIG_REGMAP) += regmap.o regcache.o
|
||||
obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o regcache-maple.o
|
||||
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
|
||||
regmap-core-objs = regmap.o regcache.o regcache-rbtree.o regcache-flat.o regcache-maple.o
|
||||
ifdef CONFIG_DEBUG_FS
|
||||
regmap-core-objs += regmap-debugfs.o
|
||||
endif
|
||||
obj-$(CONFIG_REGMAP) += regmap-core.o
|
||||
obj-$(CONFIG_REGMAP_KUNIT) += regmap-kunit.o
|
||||
obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
|
||||
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/property.h>
|
||||
@@ -3521,3 +3522,5 @@ static int __init regmap_initcall(void)
|
||||
return 0;
|
||||
}
|
||||
postcore_initcall(regmap_initcall);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@@ -16,6 +16,11 @@ if BCMA
|
||||
# Support for Block-I/O. SELECT this from the driver that needs it.
|
||||
config BCMA_BLOCKIO
|
||||
bool
|
||||
default y
|
||||
|
||||
config BCMA_FALLBACK_SPROM
|
||||
bool
|
||||
default y
|
||||
|
||||
config BCMA_HOST_PCI_POSSIBLE
|
||||
bool
|
||||
|
||||
@@ -11,6 +11,7 @@ bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
|
||||
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
|
||||
bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
|
||||
bcma-$(CONFIG_BCMA_DRIVER_GPIO) += driver_gpio.o
|
||||
bcma-$(CONFIG_BCMA_FALLBACK_SPROM) += fallback-sprom.o
|
||||
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
|
||||
bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
|
||||
obj-$(CONFIG_BCMA) += bcma.o
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/bcma/bcma.h>
|
||||
#include <linux/delay.h>
|
||||
#include "fallback-sprom.h"
|
||||
|
||||
#define bcma_err(bus, fmt, ...) \
|
||||
dev_err((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
|
||||
|
||||
@@ -191,6 +191,8 @@ int __init bcma_host_soc_init(struct bcma_soc *soc)
|
||||
struct bcma_bus *bus = &soc->bus;
|
||||
int err;
|
||||
|
||||
bus->dev = soc->dev;
|
||||
|
||||
/* Scan bus and initialize it */
|
||||
err = bcma_bus_early_register(bus);
|
||||
if (err)
|
||||
|
||||
@@ -237,13 +237,17 @@ EXPORT_SYMBOL(bcma_core_irq);
|
||||
|
||||
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
|
||||
{
|
||||
device_initialize(&core->dev);
|
||||
struct device *dev = &core->dev;
|
||||
|
||||
device_initialize(dev);
|
||||
core->dev.release = bcma_release_core_dev;
|
||||
core->dev.bus = &bcma_bus_type;
|
||||
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
|
||||
dev_set_name(dev, "bcma%d:%d", bus->num, core->core_index);
|
||||
core->dev.parent = bus->dev;
|
||||
if (bus->dev)
|
||||
if (bus->dev) {
|
||||
bcma_of_fill_device(bus->dev, core);
|
||||
dma_coerce_mask_and_coherent(dev, bus->dev->coherent_dma_mask);
|
||||
}
|
||||
|
||||
switch (bus->hosttype) {
|
||||
case BCMA_HOSTTYPE_PCI:
|
||||
@@ -667,6 +671,14 @@ static int __init bcma_modinit(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
#ifdef CONFIG_BCMA_FALLBACK_SPROM
|
||||
err = bcma_fbs_register();
|
||||
if (err) {
|
||||
pr_err("Fallback SPROM initialization failed\n");
|
||||
err = 0;
|
||||
}
|
||||
#endif /* CONFIG_BCMA_FALLBACK_SPROM */
|
||||
|
||||
err = bcma_init_bus_register();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -51,21 +51,26 @@ static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!get_fallback_sprom) {
|
||||
err = -ENOENT;
|
||||
goto fail;
|
||||
}
|
||||
if (get_fallback_sprom)
|
||||
err = get_fallback_sprom(bus, out);
|
||||
|
||||
err = get_fallback_sprom(bus, out);
|
||||
if (err)
|
||||
goto fail;
|
||||
#ifdef CONFIG_BCMA_FALLBACK_SPROM
|
||||
if (!get_fallback_sprom || err)
|
||||
err = bcma_get_fallback_sprom(bus, out);
|
||||
#else
|
||||
if (!get_fallback_sprom)
|
||||
err = -ENOENT;
|
||||
#endif /* CONFIG_BCMA_FALLBACK_SPROM */
|
||||
|
||||
if (err) {
|
||||
bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
|
||||
bus->sprom.revision);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
|
||||
@@ -363,6 +363,18 @@ config BLK_DEV_RUST_NULL
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config UIMAGE_FIT_BLK
|
||||
bool "uImage.FIT block driver"
|
||||
help
|
||||
This driver allows using filesystems contained in uImage.FIT images
|
||||
by mapping them as block devices.
|
||||
|
||||
It can currently not be built as a module due to libfdt symbols not
|
||||
being exported.
|
||||
|
||||
Say Y if you want to mount filesystems sub-images of a uImage.FIT
|
||||
stored in a block device partition, mtdblock or ubiblock device.
|
||||
|
||||
config BLK_DEV_RBD
|
||||
tristate "Rados block device (RBD)"
|
||||
depends on INET && BLOCK
|
||||
|
||||
@@ -42,4 +42,6 @@ obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
|
||||
|
||||
obj-$(CONFIG_UIMAGE_FIT_BLK) += fitblk.o
|
||||
|
||||
swim_mod-y := swim.o swim_asm.o
|
||||
|
||||
660
drivers/block/fitblk.c
Normal file
660
drivers/block/fitblk.c
Normal file
@@ -0,0 +1,660 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* uImage.FIT virtual block device driver.
|
||||
*
|
||||
* Copyright (C) 2023 Daniel Golle
|
||||
* Copyright (C) 2007 Nick Piggin
|
||||
* Copyright (C) 2007 Novell Inc.
|
||||
*
|
||||
* Initially derived from drivers/block/brd.c which is in parts derived from
|
||||
* drivers/block/rd.c, and drivers/block/loop.c, copyright of their respective
|
||||
* owners.
|
||||
*
|
||||
* uImage.FIT headers extracted from Das U-Boot
|
||||
* (C) Copyright 2008 Semihalf
|
||||
* (C) Copyright 2000-2005
|
||||
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/task_work.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/root_dev.h>
|
||||
#include <uapi/linux/fitblk.h>
|
||||
|
||||
#define FIT_DEVICE_PREFIX "fit"
|
||||
|
||||
/* maximum number of pages used for the uImage.FIT index structure */
|
||||
#define FIT_MAX_PAGES 1024
|
||||
|
||||
/* minimum free sectors to map as read-write "remainder" volume */
|
||||
#define MIN_FREE_SECT 16
|
||||
|
||||
/* maximum number of mapped loadables */
|
||||
#define MAX_FIT_LOADABLES 16
|
||||
|
||||
/* constants for uImage.FIT structrure traversal */
|
||||
#define FIT_IMAGES_PATH "/images"
|
||||
#define FIT_CONFS_PATH "/configurations"
|
||||
|
||||
/* hash/signature/key node */
|
||||
#define FIT_HASH_NODENAME "hash"
|
||||
#define FIT_ALGO_PROP "algo"
|
||||
#define FIT_VALUE_PROP "value"
|
||||
#define FIT_IGNORE_PROP "uboot-ignore"
|
||||
#define FIT_SIG_NODENAME "signature"
|
||||
#define FIT_KEY_REQUIRED "required"
|
||||
#define FIT_KEY_HINT "key-name-hint"
|
||||
|
||||
/* cipher node */
|
||||
#define FIT_CIPHER_NODENAME "cipher"
|
||||
#define FIT_ALGO_PROP "algo"
|
||||
|
||||
/* image node */
|
||||
#define FIT_DATA_PROP "data"
|
||||
#define FIT_DATA_POSITION_PROP "data-position"
|
||||
#define FIT_DATA_OFFSET_PROP "data-offset"
|
||||
#define FIT_DATA_SIZE_PROP "data-size"
|
||||
#define FIT_TIMESTAMP_PROP "timestamp"
|
||||
#define FIT_DESC_PROP "description"
|
||||
#define FIT_ARCH_PROP "arch"
|
||||
#define FIT_TYPE_PROP "type"
|
||||
#define FIT_OS_PROP "os"
|
||||
#define FIT_COMP_PROP "compression"
|
||||
#define FIT_ENTRY_PROP "entry"
|
||||
#define FIT_LOAD_PROP "load"
|
||||
|
||||
/* configuration node */
|
||||
#define FIT_KERNEL_PROP "kernel"
|
||||
#define FIT_FILESYSTEM_PROP "filesystem"
|
||||
#define FIT_RAMDISK_PROP "ramdisk"
|
||||
#define FIT_FDT_PROP "fdt"
|
||||
#define FIT_LOADABLE_PROP "loadables"
|
||||
#define FIT_DEFAULT_PROP "default"
|
||||
#define FIT_SETUP_PROP "setup"
|
||||
#define FIT_FPGA_PROP "fpga"
|
||||
#define FIT_FIRMWARE_PROP "firmware"
|
||||
#define FIT_STANDALONE_PROP "standalone"
|
||||
|
||||
/* fitblk driver data */
|
||||
static const char *_fitblk_claim_ptr = "I belong to fitblk";
|
||||
static const char *ubootver;
|
||||
struct device_node *rootdisk;
|
||||
static struct platform_device *pdev;
|
||||
static LIST_HEAD(fitblk_devices);
|
||||
static DEFINE_MUTEX(devices_mutex);
|
||||
refcount_t num_devs;
|
||||
|
||||
struct fitblk {
|
||||
struct platform_device *pdev;
|
||||
struct file *bdev_file;
|
||||
sector_t start_sect;
|
||||
struct gendisk *disk;
|
||||
struct work_struct remove_work;
|
||||
struct list_head list;
|
||||
bool dead;
|
||||
};
|
||||
|
||||
static int fitblk_open(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct fitblk *fitblk = disk->private_data;
|
||||
|
||||
if (fitblk->dead)
|
||||
return -ENOENT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fitblk_release(struct gendisk *disk)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void fitblk_submit_bio(struct bio *orig_bio)
|
||||
{
|
||||
struct bio *bio = orig_bio;
|
||||
struct fitblk *fitblk = bio->bi_bdev->bd_disk->private_data;
|
||||
|
||||
if (fitblk->dead)
|
||||
return;
|
||||
|
||||
/* mangle bio and re-submit */
|
||||
while (bio) {
|
||||
bio->bi_iter.bi_sector += fitblk->start_sect;
|
||||
bio->bi_bdev = file_bdev(fitblk->bdev_file);
|
||||
bio = bio->bi_next;
|
||||
}
|
||||
submit_bio(orig_bio);
|
||||
}
|
||||
|
||||
static void fitblk_remove(struct fitblk *fitblk)
|
||||
{
|
||||
blk_mark_disk_dead(fitblk->disk);
|
||||
mutex_lock(&devices_mutex);
|
||||
fitblk->dead = true;
|
||||
list_del(&fitblk->list);
|
||||
mutex_unlock(&devices_mutex);
|
||||
|
||||
schedule_work(&fitblk->remove_work);
|
||||
}
|
||||
|
||||
static int fitblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct fitblk *fitblk = bdev->bd_disk->private_data;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (fitblk->dead)
|
||||
return -ENOENT;
|
||||
|
||||
switch (cmd) {
|
||||
case FITBLK_RELEASE:
|
||||
fitblk_remove(fitblk);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct block_device_operations fitblk_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.ioctl = fitblk_ioctl,
|
||||
.open = fitblk_open,
|
||||
.release = fitblk_release,
|
||||
.submit_bio = fitblk_submit_bio,
|
||||
};
|
||||
|
||||
static void fitblk_purge(struct work_struct *work)
|
||||
{
|
||||
struct fitblk *fitblk = container_of(work, struct fitblk, remove_work);
|
||||
|
||||
del_gendisk(fitblk->disk);
|
||||
refcount_dec(&num_devs);
|
||||
platform_device_del(fitblk->pdev);
|
||||
platform_device_put(fitblk->pdev);
|
||||
|
||||
if (refcount_dec_if_one(&num_devs)) {
|
||||
sysfs_remove_link(&pdev->dev.kobj, "lower_dev");
|
||||
fput(fitblk->bdev_file);
|
||||
}
|
||||
|
||||
kfree(fitblk);
|
||||
}
|
||||
|
||||
static int add_fit_subimage_device(struct file *bdev_file,
|
||||
unsigned int slot, sector_t start_sect,
|
||||
sector_t nr_sect, bool readonly)
|
||||
{
|
||||
struct block_device *bdev = file_bdev(bdev_file);
|
||||
struct fitblk *fitblk;
|
||||
struct gendisk *disk;
|
||||
int err;
|
||||
|
||||
mutex_lock(&devices_mutex);
|
||||
if (!refcount_inc_not_zero(&num_devs))
|
||||
return -EBADF;
|
||||
|
||||
fitblk = kzalloc(sizeof(struct fitblk), GFP_KERNEL);
|
||||
if (!fitblk) {
|
||||
err = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
fitblk->bdev_file = bdev_file;
|
||||
fitblk->start_sect = start_sect;
|
||||
INIT_WORK(&fitblk->remove_work, fitblk_purge);
|
||||
|
||||
disk = blk_alloc_disk(&bdev->bd_disk->queue->limits, NUMA_NO_NODE);
|
||||
if (!disk) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_fitblk;
|
||||
}
|
||||
|
||||
disk->first_minor = 0;
|
||||
disk->flags = bdev->bd_disk->flags | GENHD_FL_NO_PART;
|
||||
disk->fops = &fitblk_fops;
|
||||
disk->private_data = fitblk;
|
||||
if (readonly) {
|
||||
set_disk_ro(disk, 1);
|
||||
snprintf(disk->disk_name, sizeof(disk->disk_name), FIT_DEVICE_PREFIX "%u", slot);
|
||||
} else {
|
||||
strcpy(disk->disk_name, FIT_DEVICE_PREFIX "rw");
|
||||
}
|
||||
|
||||
set_capacity(disk, nr_sect);
|
||||
disk->queue->queue_flags = bdev->bd_disk->queue->queue_flags;
|
||||
|
||||
fitblk->disk = disk;
|
||||
fitblk->pdev = platform_device_alloc(disk->disk_name, PLATFORM_DEVID_NONE);
|
||||
if (!fitblk->pdev) {
|
||||
err = -ENOMEM;
|
||||
goto out_cleanup_disk;
|
||||
}
|
||||
|
||||
fitblk->pdev->dev.parent = &pdev->dev;
|
||||
err = platform_device_add(fitblk->pdev);
|
||||
if (err)
|
||||
goto out_put_pdev;
|
||||
|
||||
err = device_add_disk(&fitblk->pdev->dev, disk, NULL);
|
||||
if (err)
|
||||
goto out_del_pdev;
|
||||
|
||||
if (!ROOT_DEV)
|
||||
ROOT_DEV = disk->part0->bd_dev;
|
||||
|
||||
list_add_tail(&fitblk->list, &fitblk_devices);
|
||||
|
||||
mutex_unlock(&devices_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
out_del_pdev:
|
||||
platform_device_del(fitblk->pdev);
|
||||
out_put_pdev:
|
||||
platform_device_put(fitblk->pdev);
|
||||
out_cleanup_disk:
|
||||
put_disk(disk);
|
||||
out_free_fitblk:
|
||||
kfree(fitblk);
|
||||
out_unlock:
|
||||
refcount_dec(&num_devs);
|
||||
mutex_unlock(&devices_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void fitblk_mark_dead(struct block_device *bdev, bool surprise)
|
||||
{
|
||||
struct list_head *n, *tmp;
|
||||
struct fitblk *fitblk;
|
||||
|
||||
mutex_lock(&devices_mutex);
|
||||
list_for_each_safe(n, tmp, &fitblk_devices) {
|
||||
fitblk = list_entry(n, struct fitblk, list);
|
||||
if (file_bdev(fitblk->bdev_file) != bdev)
|
||||
continue;
|
||||
|
||||
fitblk->dead = true;
|
||||
list_del(&fitblk->list);
|
||||
/* removal needs to be deferred to avoid deadlock */
|
||||
schedule_work(&fitblk->remove_work);
|
||||
}
|
||||
mutex_unlock(&devices_mutex);
|
||||
}
|
||||
|
||||
static const struct blk_holder_ops fitblk_hops = {
|
||||
.mark_dead = fitblk_mark_dead,
|
||||
};
|
||||
|
||||
static int parse_fit_on_dev(struct device *dev)
|
||||
{
|
||||
struct file *bdev_file;
|
||||
struct block_device *bdev;
|
||||
struct address_space *mapping;
|
||||
struct folio *folio;
|
||||
pgoff_t f_index = 0;
|
||||
size_t bytes_left, bytes_to_copy;
|
||||
void *pre_fit, *fit, *fit_c;
|
||||
u64 dsize, dsectors, imgmaxsect = 0;
|
||||
u32 size, image_pos, image_len;
|
||||
const __be32 *image_offset_be, *image_len_be, *image_pos_be;
|
||||
int ret = 0, node, images, config;
|
||||
const char *image_name, *image_type, *image_description,
|
||||
*config_default, *config_description, *config_loadables;
|
||||
u32 image_name_len, image_type_len, image_description_len,
|
||||
bootconf_len, config_default_len, config_description_len,
|
||||
config_loadables_len;
|
||||
sector_t start_sect, nr_sects;
|
||||
struct device_node *np = NULL;
|
||||
const char *bootconf_c;
|
||||
const char *loadable;
|
||||
char *bootconf = NULL, *bootconf_term;
|
||||
bool found;
|
||||
int loadables_rem_len, loadable_len;
|
||||
u16 loadcnt;
|
||||
unsigned int slot = 0;
|
||||
|
||||
/* Exclusive open the block device to receive holder notifications */
|
||||
bdev_file = bdev_file_open_by_dev(dev->devt,
|
||||
BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES,
|
||||
&_fitblk_claim_ptr, &fitblk_hops);
|
||||
if (!bdev_file)
|
||||
return -ENODEV;
|
||||
|
||||
if (IS_ERR(bdev_file))
|
||||
return PTR_ERR(bdev_file);
|
||||
|
||||
bdev = file_bdev(bdev_file);
|
||||
mapping = bdev_file->f_mapping;
|
||||
|
||||
/* map first page */
|
||||
folio = read_mapping_folio(mapping, f_index++, NULL);
|
||||
if (IS_ERR(folio)) {
|
||||
ret = PTR_ERR(folio);
|
||||
goto out_blkdev;
|
||||
}
|
||||
pre_fit = folio_address(folio) + offset_in_folio(folio, 0);
|
||||
|
||||
/* uImage.FIT is based on flattened device tree structure */
|
||||
if (fdt_check_header(pre_fit)) {
|
||||
ret = -EINVAL;
|
||||
folio_put(folio);
|
||||
goto out_blkdev;
|
||||
}
|
||||
|
||||
size = fdt_totalsize(pre_fit);
|
||||
|
||||
if (size > PAGE_SIZE * FIT_MAX_PAGES) {
|
||||
ret = -EOPNOTSUPP;
|
||||
folio_put(folio);
|
||||
goto out_blkdev;
|
||||
}
|
||||
|
||||
/* acquire disk size */
|
||||
dsectors = bdev_nr_sectors(bdev);
|
||||
dsize = dsectors << SECTOR_SHIFT;
|
||||
|
||||
/* abort if FIT structure is larger than disk or partition size */
|
||||
if (size >= dsize) {
|
||||
ret = -EFBIG;
|
||||
folio_put(folio);
|
||||
goto out_blkdev;
|
||||
}
|
||||
|
||||
fit = kmalloc(size, GFP_KERNEL);
|
||||
if (!fit) {
|
||||
ret = -ENOMEM;
|
||||
folio_put(folio);
|
||||
goto out_blkdev;
|
||||
}
|
||||
|
||||
bytes_left = size;
|
||||
fit_c = fit;
|
||||
while (bytes_left > 0) {
|
||||
bytes_to_copy = min_t(size_t, bytes_left,
|
||||
folio_size(folio) - offset_in_folio(folio, 0));
|
||||
memcpy(fit_c, pre_fit, bytes_to_copy);
|
||||
fit_c += bytes_to_copy;
|
||||
bytes_left -= bytes_to_copy;
|
||||
if (bytes_left) {
|
||||
folio_put(folio);
|
||||
folio = read_mapping_folio(mapping, f_index++, NULL);
|
||||
if (IS_ERR(folio)) {
|
||||
ret = PTR_ERR(folio);
|
||||
goto out_blkdev;
|
||||
};
|
||||
pre_fit = folio_address(folio) + offset_in_folio(folio, 0);
|
||||
}
|
||||
}
|
||||
folio_put(folio);
|
||||
|
||||
/* set boot config node name U-Boot may have added to the device tree */
|
||||
np = of_find_node_by_path("/chosen");
|
||||
if (np) {
|
||||
bootconf_c = of_get_property(np, "u-boot,bootconf", &bootconf_len);
|
||||
if (bootconf_c && bootconf_len)
|
||||
bootconf = kmemdup_nul(bootconf_c, bootconf_len, GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (bootconf) {
|
||||
bootconf_term = strchr(bootconf, '#');
|
||||
if (bootconf_term)
|
||||
*bootconf_term = '\0';
|
||||
}
|
||||
|
||||
/* find configuration path in uImage.FIT */
|
||||
config = fdt_path_offset(fit, FIT_CONFS_PATH);
|
||||
if (config < 0) {
|
||||
pr_err("FIT: Cannot find %s node: %d\n",
|
||||
FIT_CONFS_PATH, config);
|
||||
ret = -ENOENT;
|
||||
goto out_bootconf;
|
||||
}
|
||||
|
||||
/* get default configuration node name */
|
||||
config_default =
|
||||
fdt_getprop(fit, config, FIT_DEFAULT_PROP, &config_default_len);
|
||||
|
||||
/* make sure we got either default or selected boot config node name */
|
||||
if (!config_default && !bootconf) {
|
||||
pr_err("FIT: Cannot find default configuration\n");
|
||||
ret = -ENOENT;
|
||||
goto out_bootconf;
|
||||
}
|
||||
|
||||
/* find selected boot config node, fallback on default config node */
|
||||
node = fdt_subnode_offset(fit, config, bootconf ?: config_default);
|
||||
if (node < 0) {
|
||||
pr_err("FIT: Cannot find %s node: %d\n",
|
||||
bootconf ?: config_default, node);
|
||||
ret = -ENOENT;
|
||||
goto out_bootconf;
|
||||
}
|
||||
|
||||
pr_info("FIT: Detected U-Boot %s\n", ubootver);
|
||||
|
||||
/* get selected configuration data */
|
||||
config_description =
|
||||
fdt_getprop(fit, node, FIT_DESC_PROP, &config_description_len);
|
||||
config_loadables = fdt_getprop(fit, node, FIT_LOADABLE_PROP,
|
||||
&config_loadables_len);
|
||||
|
||||
pr_info("FIT: %s configuration: \"%.*s\"%s%.*s%s\n",
|
||||
bootconf ? "Selected" : "Default",
|
||||
bootconf ? bootconf_len : config_default_len,
|
||||
bootconf ?: config_default,
|
||||
config_description ? " (" : "",
|
||||
config_description ? config_description_len : 0,
|
||||
config_description ?: "",
|
||||
config_description ? ")" : "");
|
||||
|
||||
if (!config_loadables || !config_loadables_len) {
|
||||
pr_err("FIT: No loadables configured in \"%s\"\n",
|
||||
bootconf ?: config_default);
|
||||
ret = -ENOENT;
|
||||
goto out_bootconf;
|
||||
}
|
||||
|
||||
/* get images path in uImage.FIT */
|
||||
images = fdt_path_offset(fit, FIT_IMAGES_PATH);
|
||||
if (images < 0) {
|
||||
pr_err("FIT: Cannot find %s node: %d\n", FIT_IMAGES_PATH, images);
|
||||
ret = -EINVAL;
|
||||
goto out_bootconf;
|
||||
}
|
||||
|
||||
/* iterate over images in uImage.FIT */
|
||||
fdt_for_each_subnode(node, fit, images) {
|
||||
image_name = fdt_get_name(fit, node, &image_name_len);
|
||||
image_type = fdt_getprop(fit, node, FIT_TYPE_PROP, &image_type_len);
|
||||
image_offset_be = fdt_getprop(fit, node, FIT_DATA_OFFSET_PROP, NULL);
|
||||
image_pos_be = fdt_getprop(fit, node, FIT_DATA_POSITION_PROP, NULL);
|
||||
image_len_be = fdt_getprop(fit, node, FIT_DATA_SIZE_PROP, NULL);
|
||||
|
||||
if (!image_name || !image_type || !image_len_be ||
|
||||
!image_name_len || !image_type_len)
|
||||
continue;
|
||||
|
||||
image_len = be32_to_cpu(*image_len_be);
|
||||
if (!image_len)
|
||||
continue;
|
||||
|
||||
if (image_offset_be)
|
||||
image_pos = be32_to_cpu(*image_offset_be) + size;
|
||||
else if (image_pos_be)
|
||||
image_pos = be32_to_cpu(*image_pos_be);
|
||||
else
|
||||
continue;
|
||||
|
||||
image_description = fdt_getprop(fit, node, FIT_DESC_PROP,
|
||||
&image_description_len);
|
||||
|
||||
pr_info("FIT: %16s sub-image 0x%08x..0x%08x \"%.*s\"%s%.*s%s\n",
|
||||
image_type, image_pos, image_pos + image_len - 1,
|
||||
image_name_len, image_name, image_description ? " (" : "",
|
||||
image_description ? image_description_len : 0,
|
||||
image_description ?: "", image_description ? ") " : "");
|
||||
|
||||
/* only 'filesystem' images should be mapped as partitions */
|
||||
if (strncmp(image_type, FIT_FILESYSTEM_PROP, image_type_len))
|
||||
continue;
|
||||
|
||||
/* check if sub-image is part of configured loadables */
|
||||
found = false;
|
||||
loadable = config_loadables;
|
||||
loadables_rem_len = config_loadables_len;
|
||||
for (loadcnt = 0; loadables_rem_len > 1 &&
|
||||
loadcnt < MAX_FIT_LOADABLES; ++loadcnt) {
|
||||
loadable_len =
|
||||
strnlen(loadable, loadables_rem_len - 1) + 1;
|
||||
loadables_rem_len -= loadable_len;
|
||||
if (!strncmp(image_name, loadable, loadable_len)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
loadable += loadable_len;
|
||||
}
|
||||
if (!found)
|
||||
continue;
|
||||
|
||||
if (image_pos % (1 << PAGE_SHIFT)) {
|
||||
dev_err(dev, "FIT: image %.*s start not aligned to page boundaries, skipping\n",
|
||||
image_name_len, image_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (image_len % (1 << PAGE_SHIFT)) {
|
||||
dev_err(dev, "FIT: sub-image %.*s end not aligned to page boundaries, skipping\n",
|
||||
image_name_len, image_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
start_sect = image_pos >> SECTOR_SHIFT;
|
||||
nr_sects = image_len >> SECTOR_SHIFT;
|
||||
imgmaxsect = max_t(sector_t, imgmaxsect, start_sect + nr_sects);
|
||||
|
||||
if (start_sect + nr_sects > dsectors) {
|
||||
dev_err(dev, "FIT: sub-image %.*s disk access beyond EOD\n",
|
||||
image_name_len, image_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!slot) {
|
||||
ret = sysfs_create_link_nowarn(&pdev->dev.kobj, bdev_kobj(bdev), "lower_dev");
|
||||
if (ret && ret != -EEXIST)
|
||||
goto out_bootconf;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
add_fit_subimage_device(bdev_file, slot++, start_sect, nr_sects, true);
|
||||
}
|
||||
|
||||
if (!slot)
|
||||
goto out_bootconf;
|
||||
|
||||
dev_info(dev, "mapped %u uImage.FIT filesystem sub-image%s as /dev/fit%s%u%s\n",
|
||||
slot, (slot > 1)?"s":"", (slot > 1)?"[0...":"", slot - 1,
|
||||
(slot > 1)?"]":"");
|
||||
|
||||
/* in case uImage.FIT is stored in a partition, map the remaining space */
|
||||
if (!bdev_read_only(bdev) && bdev_is_partition(bdev) &&
|
||||
(imgmaxsect + MIN_FREE_SECT) < dsectors) {
|
||||
add_fit_subimage_device(bdev_file, slot++, imgmaxsect,
|
||||
dsectors - imgmaxsect, false);
|
||||
dev_info(dev, "mapped remaining space as /dev/fitrw\n");
|
||||
}
|
||||
|
||||
out_bootconf:
|
||||
kfree(bootconf);
|
||||
kfree(fit);
|
||||
out_blkdev:
|
||||
if (!slot)
|
||||
fput(bdev_file);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fitblk_match_of_node(struct device *dev, const void *np)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = device_match_of_node(dev, np);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* To match ubiblock and mtdblock devices by their parent ubi
|
||||
* or mtd device, also consider block device parent
|
||||
*/
|
||||
if (!dev->parent)
|
||||
return 0;
|
||||
|
||||
return device_match_of_node(dev->parent, np);
|
||||
}
|
||||
|
||||
static int fitblk_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = class_find_device(&block_class, NULL, rootdisk, fitblk_match_of_node);
|
||||
if (!dev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
return parse_fit_on_dev(dev);
|
||||
}
|
||||
|
||||
static struct platform_driver fitblk_driver = {
|
||||
.probe = fitblk_probe,
|
||||
.driver = {
|
||||
.name = "fitblk",
|
||||
},
|
||||
};
|
||||
|
||||
static int __init fitblk_init(void)
|
||||
{
|
||||
/* detect U-Boot firmware */
|
||||
ubootver = of_get_property(of_chosen, "u-boot,version", NULL);
|
||||
if (!ubootver)
|
||||
return 0;
|
||||
|
||||
/* parse 'rootdisk' property phandle */
|
||||
rootdisk = of_parse_phandle(of_chosen, "rootdisk", 0);
|
||||
if (!rootdisk)
|
||||
return 0;
|
||||
|
||||
if (platform_driver_register(&fitblk_driver))
|
||||
return -ENODEV;
|
||||
|
||||
refcount_set(&num_devs, 1);
|
||||
pdev = platform_device_register_simple("fitblk", -1, NULL, 0);
|
||||
if (IS_ERR(pdev))
|
||||
return PTR_ERR(pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(fitblk_init);
|
||||
@@ -916,6 +916,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
||||
switch (event) {
|
||||
case MHI_EE_SBL:
|
||||
st = DEV_ST_TRANSITION_SBL;
|
||||
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_SBL_MODE);
|
||||
break;
|
||||
case MHI_EE_WFW:
|
||||
case MHI_EE_AMSS:
|
||||
|
||||
@@ -62,6 +62,19 @@ config HW_RANDOM_AMD
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_AIROHA
|
||||
tristate "Airoha True HW Random Number Generator support"
|
||||
depends on ARCH_AIROHA || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the True Random Number
|
||||
Generator hardware found on Airoha SoC.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called airoha-rng.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_ATMEL
|
||||
tristate "Atmel Random Number Generator support"
|
||||
depends on (ARCH_AT91 || COMPILE_TEST)
|
||||
|
||||
@@ -8,6 +8,7 @@ rng-core-y := core.o
|
||||
obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o
|
||||
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
|
||||
|
||||
243
drivers/char/hw_random/airoha-trng.c
Normal file
243
drivers/char/hw_random/airoha-trng.c
Normal file
@@ -0,0 +1,243 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2024 Christian Marangi */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#define TRNG_IP_RDY 0x800
|
||||
#define CNT_TRANS GENMASK(15, 8)
|
||||
#define SAMPLE_RDY BIT(0)
|
||||
#define TRNG_NS_SEK_AND_DAT_EN 0x804
|
||||
#define RNG_EN BIT(31) /* referenced as ring_en */
|
||||
#define RAW_DATA_EN BIT(16)
|
||||
#define TRNG_HEALTH_TEST_SW_RST 0x808
|
||||
#define SW_RST BIT(0) /* Active High */
|
||||
#define TRNG_INTR_EN 0x818
|
||||
#define INTR_MASK BIT(16)
|
||||
#define CONTINUOUS_HEALTH_INITR_EN BIT(2)
|
||||
#define SW_STARTUP_INITR_EN BIT(1)
|
||||
#define RST_STARTUP_INITR_EN BIT(0)
|
||||
/* Notice that Health Test are done only out of Reset and with RNG_EN */
|
||||
#define TRNG_HEALTH_TEST_STATUS 0x824
|
||||
#define CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23)
|
||||
#define CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22)
|
||||
#define SW_STARTUP_TEST_DONE BIT(21)
|
||||
#define SW_STARTUP_AP_TEST_FAIL BIT(20)
|
||||
#define SW_STARTUP_RC_TEST_FAIL BIT(19)
|
||||
#define RST_STARTUP_TEST_DONE BIT(18)
|
||||
#define RST_STARTUP_AP_TEST_FAIL BIT(17)
|
||||
#define RST_STARTUP_RC_TEST_FAIL BIT(16)
|
||||
#define RAW_DATA_VALID BIT(7)
|
||||
|
||||
#define TRNG_RAW_DATA_OUT 0x828
|
||||
|
||||
#define TRNG_CNT_TRANS_VALID 0x80
|
||||
#define BUSY_LOOP_SLEEP 10
|
||||
#define BUSY_LOOP_TIMEOUT (BUSY_LOOP_SLEEP * 10000)
|
||||
|
||||
struct airoha_trng {
|
||||
void __iomem *base;
|
||||
struct hwrng rng;
|
||||
struct device *dev;
|
||||
|
||||
struct completion rng_op_done;
|
||||
};
|
||||
|
||||
static int airoha_trng_irq_mask(struct airoha_trng *trng)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = readl(trng->base + TRNG_INTR_EN);
|
||||
val |= INTR_MASK;
|
||||
writel(val, trng->base + TRNG_INTR_EN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int airoha_trng_irq_unmask(struct airoha_trng *trng)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = readl(trng->base + TRNG_INTR_EN);
|
||||
val &= ~INTR_MASK;
|
||||
writel(val, trng->base + TRNG_INTR_EN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int airoha_trng_init(struct hwrng *rng)
|
||||
{
|
||||
struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
|
||||
val |= RNG_EN;
|
||||
writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
|
||||
|
||||
/* Set out of SW Reset */
|
||||
airoha_trng_irq_unmask(trng);
|
||||
writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST);
|
||||
|
||||
ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT);
|
||||
if (ret <= 0) {
|
||||
dev_err(trng->dev, "Timeout waiting for Health Check\n");
|
||||
airoha_trng_irq_mask(trng);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Check if Health Test Failed */
|
||||
val = readl(trng->base + TRNG_HEALTH_TEST_STATUS);
|
||||
if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) {
|
||||
dev_err(trng->dev, "Health Check fail: %s test fail\n",
|
||||
val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Check if IP is ready */
|
||||
ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
|
||||
val & SAMPLE_RDY, 10, 1000);
|
||||
if (ret < 0) {
|
||||
dev_err(trng->dev, "Timeout waiting for IP ready");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* CNT_TRANS must be 0x80 for IP to be considered ready */
|
||||
ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
|
||||
FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID,
|
||||
10, 1000);
|
||||
if (ret < 0) {
|
||||
dev_err(trng->dev, "Timeout waiting for IP ready");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void airoha_trng_cleanup(struct hwrng *rng)
|
||||
{
|
||||
struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
|
||||
u32 val;
|
||||
|
||||
val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
|
||||
val &= ~RNG_EN;
|
||||
writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
|
||||
|
||||
/* Put it in SW Reset */
|
||||
writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
|
||||
}
|
||||
|
||||
static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
|
||||
{
|
||||
struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
|
||||
u32 *data = buf;
|
||||
u32 status;
|
||||
int ret;
|
||||
|
||||
ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status,
|
||||
status & RAW_DATA_VALID, 10, 1000);
|
||||
if (ret < 0) {
|
||||
dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
*data = readl(trng->base + TRNG_RAW_DATA_OUT);
|
||||
|
||||
return 4;
|
||||
}
|
||||
|
||||
static irqreturn_t airoha_trng_irq(int irq, void *priv)
|
||||
{
|
||||
struct airoha_trng *trng = (struct airoha_trng *)priv;
|
||||
|
||||
airoha_trng_irq_mask(trng);
|
||||
/* Just complete the task, we will read the value later */
|
||||
complete(&trng->rng_op_done);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int airoha_trng_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct airoha_trng *trng;
|
||||
int irq, ret;
|
||||
u32 val;
|
||||
|
||||
trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
|
||||
if (!trng)
|
||||
return -ENOMEM;
|
||||
|
||||
trng->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(trng->base))
|
||||
return PTR_ERR(trng->base);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
airoha_trng_irq_mask(trng);
|
||||
ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0,
|
||||
pdev->name, (void *)trng);
|
||||
if (ret) {
|
||||
dev_err(dev, "Can't get interrupt working.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
init_completion(&trng->rng_op_done);
|
||||
|
||||
/* Enable interrupt for SW reset Health Check */
|
||||
val = readl(trng->base + TRNG_INTR_EN);
|
||||
val |= RST_STARTUP_INITR_EN;
|
||||
writel(val, trng->base + TRNG_INTR_EN);
|
||||
|
||||
/* Set output to raw data */
|
||||
val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
|
||||
val |= RAW_DATA_EN;
|
||||
writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
|
||||
|
||||
/* Put it in SW Reset */
|
||||
writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
|
||||
|
||||
trng->dev = dev;
|
||||
trng->rng.name = pdev->name;
|
||||
trng->rng.init = airoha_trng_init;
|
||||
trng->rng.cleanup = airoha_trng_cleanup;
|
||||
trng->rng.read = airoha_trng_read;
|
||||
|
||||
ret = devm_hwrng_register(dev, &trng->rng);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register rng device: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id airoha_trng_of_match[] = {
|
||||
{ .compatible = "airoha,en7581-trng", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, airoha_trng_of_match);
|
||||
|
||||
static struct platform_driver airoha_trng_driver = {
|
||||
.driver = {
|
||||
.name = "airoha-trng",
|
||||
.of_match_table = airoha_trng_of_match,
|
||||
},
|
||||
.probe = airoha_trng_probe,
|
||||
};
|
||||
|
||||
module_platform_driver(airoha_trng_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
|
||||
MODULE_DESCRIPTION("Airoha True Random Number Generator driver");
|
||||
@@ -169,6 +169,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
||||
priv->rng.init = bcm2835_rng_init;
|
||||
priv->rng.read = bcm2835_rng_read;
|
||||
priv->rng.cleanup = bcm2835_rng_cleanup;
|
||||
priv->rng.quality = 1000;
|
||||
|
||||
if (dev_of_node(dev)) {
|
||||
rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
|
||||
|
||||
@@ -218,8 +218,8 @@ static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
|
||||
clk_bulk_put_all(devres->num_clks, devres->clks);
|
||||
}
|
||||
|
||||
int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
|
||||
struct clk_bulk_data **clks)
|
||||
int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
|
||||
struct clk_bulk_data **clks)
|
||||
{
|
||||
struct clk_bulk_devres *devres;
|
||||
int ret;
|
||||
@@ -244,11 +244,12 @@ int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
|
||||
} else {
|
||||
clk_bulk_put_all(devres->num_clks, devres->clks);
|
||||
devres_free(devres);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return devres->num_clks;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
|
||||
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enabled);
|
||||
|
||||
static int devm_clk_match(struct device *dev, void *res, void *data)
|
||||
{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,13 +17,15 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
/**
|
||||
* DOC: basic gpio gated clock which can be enabled and disabled
|
||||
* with gpio output
|
||||
* Traits of this clock:
|
||||
* prepare - clk_(un)prepare only ensures parent is (un)prepared
|
||||
* enable - clk_enable and clk_disable are functional & control gpio
|
||||
* prepare - clk_(un)prepare are functional and control a gpio that can sleep
|
||||
* enable - clk_enable and clk_disable are functional & control
|
||||
* non-sleeping gpio
|
||||
* rate - inherits rate from parent. No clk_set_rate support
|
||||
* parent - fixed parent. No clk_set_parent support
|
||||
*/
|
||||
@@ -199,7 +201,6 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
|
||||
struct gpio_desc *gpiod;
|
||||
struct clk_hw *hw;
|
||||
bool is_mux;
|
||||
int ret;
|
||||
|
||||
is_mux = of_device_is_compatible(node, "gpio-mux-clock");
|
||||
|
||||
@@ -211,17 +212,9 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
|
||||
|
||||
gpio_name = is_mux ? "select" : "enable";
|
||||
gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW);
|
||||
if (IS_ERR(gpiod)) {
|
||||
ret = PTR_ERR(gpiod);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
pr_debug("%pOFn: %s: GPIOs not yet available, retry later\n",
|
||||
node, __func__);
|
||||
else
|
||||
pr_err("%pOFn: %s: Can't get '%s' named GPIO property\n",
|
||||
node, __func__,
|
||||
gpio_name);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(gpiod))
|
||||
return dev_err_probe(dev, PTR_ERR(gpiod),
|
||||
"Can't get '%s' named GPIO property\n", gpio_name);
|
||||
|
||||
if (is_mux)
|
||||
hw = clk_hw_register_gpio_mux(dev, gpiod);
|
||||
@@ -247,3 +240,187 @@ static struct platform_driver gpio_clk_driver = {
|
||||
},
|
||||
};
|
||||
builtin_platform_driver(gpio_clk_driver);
|
||||
|
||||
/**
|
||||
* DOC: gated fixed clock, controlled with a gpio output and a regulator
|
||||
* Traits of this clock:
|
||||
* prepare - clk_prepare and clk_unprepare are function & control regulator
|
||||
* optionally a gpio that can sleep
|
||||
* enable - clk_enable and clk_disable are functional & control gpio
|
||||
* rate - rate is fixed and set on clock registration
|
||||
* parent - fixed clock is a root clock and has no parent
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct clk_gated_fixed - Gateable fixed rate clock
|
||||
* @clk_gpio: instance of clk_gpio for gate-gpio
|
||||
* @supply: supply regulator
|
||||
* @rate: fixed rate
|
||||
*/
|
||||
struct clk_gated_fixed {
|
||||
struct clk_gpio clk_gpio;
|
||||
struct regulator *supply;
|
||||
unsigned long rate;
|
||||
};
|
||||
|
||||
#define to_clk_gated_fixed(_clk_gpio) container_of(_clk_gpio, struct clk_gated_fixed, clk_gpio)
|
||||
|
||||
static unsigned long clk_gated_fixed_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
return to_clk_gated_fixed(to_clk_gpio(hw))->rate;
|
||||
}
|
||||
|
||||
static int clk_gated_fixed_prepare(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
|
||||
|
||||
if (!clk->supply)
|
||||
return 0;
|
||||
|
||||
return regulator_enable(clk->supply);
|
||||
}
|
||||
|
||||
static void clk_gated_fixed_unprepare(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
|
||||
|
||||
if (!clk->supply)
|
||||
return;
|
||||
|
||||
regulator_disable(clk->supply);
|
||||
}
|
||||
|
||||
static int clk_gated_fixed_is_prepared(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
|
||||
|
||||
if (!clk->supply)
|
||||
return true;
|
||||
|
||||
return regulator_is_enabled(clk->supply);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixed gated clock with non-sleeping gpio.
|
||||
*
|
||||
* Prepare operation turns on the supply regulator
|
||||
* and the enable operation switches the enable-gpio.
|
||||
*/
|
||||
static const struct clk_ops clk_gated_fixed_ops = {
|
||||
.prepare = clk_gated_fixed_prepare,
|
||||
.unprepare = clk_gated_fixed_unprepare,
|
||||
.is_prepared = clk_gated_fixed_is_prepared,
|
||||
.enable = clk_gpio_gate_enable,
|
||||
.disable = clk_gpio_gate_disable,
|
||||
.is_enabled = clk_gpio_gate_is_enabled,
|
||||
.recalc_rate = clk_gated_fixed_recalc_rate,
|
||||
};
|
||||
|
||||
static int clk_sleeping_gated_fixed_prepare(struct clk_hw *hw)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = clk_gated_fixed_prepare(hw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_sleeping_gpio_gate_prepare(hw);
|
||||
if (ret)
|
||||
clk_gated_fixed_unprepare(hw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void clk_sleeping_gated_fixed_unprepare(struct clk_hw *hw)
|
||||
{
|
||||
clk_gated_fixed_unprepare(hw);
|
||||
clk_sleeping_gpio_gate_unprepare(hw);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixed gated clock with non-sleeping gpio.
|
||||
*
|
||||
* Enabling the supply regulator and switching the enable-gpio happens
|
||||
* both in the prepare step.
|
||||
* is_prepared only needs to check the gpio state, as toggling the
|
||||
* gpio is the last step when preparing.
|
||||
*/
|
||||
static const struct clk_ops clk_sleeping_gated_fixed_ops = {
|
||||
.prepare = clk_sleeping_gated_fixed_prepare,
|
||||
.unprepare = clk_sleeping_gated_fixed_unprepare,
|
||||
.is_prepared = clk_sleeping_gpio_gate_is_prepared,
|
||||
.recalc_rate = clk_gated_fixed_recalc_rate,
|
||||
};
|
||||
|
||||
static int clk_gated_fixed_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct clk_gated_fixed *clk;
|
||||
const struct clk_ops *ops;
|
||||
const char *clk_name;
|
||||
u32 rate;
|
||||
int ret;
|
||||
|
||||
clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
|
||||
if (!clk)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = device_property_read_u32(dev, "clock-frequency", &rate);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "Failed to get clock-frequency\n");
|
||||
clk->rate = rate;
|
||||
|
||||
ret = device_property_read_string(dev, "clock-output-names", &clk_name);
|
||||
if (ret)
|
||||
clk_name = fwnode_get_name(dev->fwnode);
|
||||
|
||||
clk->supply = devm_regulator_get_optional(dev, "vdd");
|
||||
if (IS_ERR(clk->supply)) {
|
||||
if (PTR_ERR(clk->supply) != -ENODEV)
|
||||
return dev_err_probe(dev, PTR_ERR(clk->supply),
|
||||
"Failed to get regulator\n");
|
||||
clk->supply = NULL;
|
||||
}
|
||||
|
||||
clk->clk_gpio.gpiod = devm_gpiod_get_optional(dev, "enable",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(clk->clk_gpio.gpiod))
|
||||
return dev_err_probe(dev, PTR_ERR(clk->clk_gpio.gpiod),
|
||||
"Failed to get gpio\n");
|
||||
|
||||
if (gpiod_cansleep(clk->clk_gpio.gpiod))
|
||||
ops = &clk_sleeping_gated_fixed_ops;
|
||||
else
|
||||
ops = &clk_gated_fixed_ops;
|
||||
|
||||
clk->clk_gpio.hw.init = CLK_HW_INIT_NO_PARENT(clk_name, ops, 0);
|
||||
|
||||
/* register the clock */
|
||||
ret = devm_clk_hw_register(dev, &clk->clk_gpio.hw);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret,
|
||||
"Failed to register clock\n");
|
||||
|
||||
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
|
||||
&clk->clk_gpio.hw);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret,
|
||||
"Failed to register clock provider\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id gated_fixed_clk_match_table[] = {
|
||||
{ .compatible = "gated-fixed-clock" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static struct platform_driver gated_fixed_clk_driver = {
|
||||
.probe = clk_gated_fixed_probe,
|
||||
.driver = {
|
||||
.name = "gated-fixed-clk",
|
||||
.of_match_table = gated_fixed_clk_match_table,
|
||||
},
|
||||
};
|
||||
builtin_platform_driver(gated_fixed_clk_driver);
|
||||
|
||||
@@ -15,6 +15,14 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called sun50i-cpufreq-nvmem.
|
||||
|
||||
config ARM_AIROHA_SOC_CPUFREQ
|
||||
tristate "Airoha EN7581 SoC CPUFreq support"
|
||||
depends on ARCH_AIROHA || COMPILE_TEST
|
||||
select PM_OPP
|
||||
default ARCH_AIROHA
|
||||
help
|
||||
This adds the CPUFreq driver for Airoha EN7581 SoCs.
|
||||
|
||||
config ARM_APPLE_SOC_CPUFREQ
|
||||
tristate "Apple Silicon SoC CPUFreq support"
|
||||
depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
|
||||
|
||||
@@ -52,6 +52,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
|
||||
|
||||
##################################################################################
|
||||
# ARM SoC drivers
|
||||
obj-$(CONFIG_ARM_AIROHA_SOC_CPUFREQ) += airoha-cpufreq.o
|
||||
obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
|
||||
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
|
||||
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
|
||||
|
||||
167
drivers/cpufreq/airoha-cpufreq.c
Normal file
167
drivers/cpufreq/airoha-cpufreq.c
Normal file
@@ -0,0 +1,167 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "cpufreq-dt.h"
|
||||
|
||||
struct airoha_cpufreq_priv {
|
||||
int opp_token;
|
||||
struct device **virt_devs;
|
||||
struct platform_device *cpufreq_dt;
|
||||
};
|
||||
|
||||
static struct platform_device *cpufreq_pdev;
|
||||
|
||||
/* NOP function to disable OPP from setting clock */
|
||||
static int airoha_cpufreq_config_clks_nop(struct device *dev,
|
||||
struct opp_table *opp_table,
|
||||
struct dev_pm_opp *old_opp,
|
||||
struct dev_pm_opp *opp,
|
||||
void *data, bool scaling_down)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char * const airoha_cpufreq_clk_names[] = { "cpu", NULL };
|
||||
static const char * const airoha_cpufreq_pd_names[] = { "perf", NULL };
|
||||
|
||||
static int airoha_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct dev_pm_opp_config config = {
|
||||
.clk_names = airoha_cpufreq_clk_names,
|
||||
.config_clks = airoha_cpufreq_config_clks_nop,
|
||||
.genpd_names = airoha_cpufreq_pd_names,
|
||||
};
|
||||
struct platform_device *cpufreq_dt;
|
||||
struct airoha_cpufreq_priv *priv;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device **virt_devs = NULL;
|
||||
struct device *cpu_dev;
|
||||
int ret;
|
||||
|
||||
/* CPUs refer to the same OPP table */
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Set OPP table conf with NOP config_clks */
|
||||
priv->opp_token = dev_pm_opp_set_config(cpu_dev, &config);
|
||||
if (priv->opp_token < 0)
|
||||
return dev_err_probe(dev, priv->opp_token, "Failed to set OPP config\n");
|
||||
|
||||
/* Set Attached PM for OPP ACTIVE */
|
||||
if (virt_devs) {
|
||||
const char * const *name = airoha_cpufreq_pd_names;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; *name; i++, name++) {
|
||||
ret = pm_runtime_resume_and_get(virt_devs[i]);
|
||||
if (ret) {
|
||||
dev_err(cpu_dev, "failed to resume %s: %d\n",
|
||||
*name, ret);
|
||||
|
||||
/* Rollback previous PM runtime calls */
|
||||
name = config.genpd_names;
|
||||
for (j = 0; *name && j < i; j++, name++)
|
||||
pm_runtime_put(virt_devs[j]);
|
||||
|
||||
goto err_register_cpufreq;
|
||||
}
|
||||
}
|
||||
priv->virt_devs = virt_devs;
|
||||
}
|
||||
|
||||
cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
|
||||
ret = PTR_ERR_OR_ZERO(cpufreq_dt);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to create cpufreq-dt device: %d\n", ret);
|
||||
goto err_register_cpufreq;
|
||||
}
|
||||
|
||||
priv->cpufreq_dt = cpufreq_dt;
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
return 0;
|
||||
|
||||
err_register_cpufreq:
|
||||
dev_pm_opp_clear_config(priv->opp_token);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void airoha_cpufreq_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct airoha_cpufreq_priv *priv = platform_get_drvdata(pdev);
|
||||
const char * const *name = airoha_cpufreq_pd_names;
|
||||
int i;
|
||||
|
||||
platform_device_unregister(priv->cpufreq_dt);
|
||||
|
||||
dev_pm_opp_clear_config(priv->opp_token);
|
||||
|
||||
for (i = 0; *name; i++, name++)
|
||||
pm_runtime_put(priv->virt_devs[i]);
|
||||
}
|
||||
|
||||
static struct platform_driver airoha_cpufreq_driver = {
|
||||
.probe = airoha_cpufreq_probe,
|
||||
.remove_new = airoha_cpufreq_remove,
|
||||
.driver = {
|
||||
.name = "airoha-cpufreq",
|
||||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
|
||||
{ .compatible = "airoha,an7583" },
|
||||
{ .compatible = "airoha,en7581" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, airoha_cpufreq_match_list);
|
||||
|
||||
static int __init airoha_cpufreq_init(void)
|
||||
{
|
||||
struct device_node *np = of_find_node_by_path("/");
|
||||
const struct of_device_id *match;
|
||||
int ret;
|
||||
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
match = of_match_node(airoha_cpufreq_match_list, np);
|
||||
of_node_put(np);
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
|
||||
ret = platform_driver_register(&airoha_cpufreq_driver);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
cpufreq_pdev = platform_device_register_data(NULL, "airoha-cpufreq",
|
||||
-1, match, sizeof(*match));
|
||||
ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
|
||||
if (ret)
|
||||
platform_driver_unregister(&airoha_cpufreq_driver);
|
||||
|
||||
return ret;
|
||||
}
|
||||
module_init(airoha_cpufreq_init);
|
||||
|
||||
static void __exit airoha_cpufreq_exit(void)
|
||||
{
|
||||
platform_device_unregister(cpufreq_pdev);
|
||||
platform_driver_unregister(&airoha_cpufreq_driver);
|
||||
}
|
||||
module_exit(airoha_cpufreq_exit);
|
||||
|
||||
MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
|
||||
MODULE_DESCRIPTION("CPUfreq driver for Airoha SoCs");
|
||||
MODULE_LICENSE("GPL");
|
||||
@@ -103,6 +103,9 @@ static const struct of_device_id allowlist[] __initconst = {
|
||||
* platforms using "operating-points-v2" property.
|
||||
*/
|
||||
static const struct of_device_id blocklist[] __initconst = {
|
||||
{ .compatible = "airoha,an7583", },
|
||||
{ .compatible = "airoha,en7581", },
|
||||
|
||||
{ .compatible = "allwinner,sun50i-h6", },
|
||||
{ .compatible = "allwinner,sun50i-h616", },
|
||||
{ .compatible = "allwinner,sun50i-h618", },
|
||||
|
||||
@@ -851,5 +851,6 @@ config CRYPTO_DEV_SA2UL
|
||||
|
||||
source "drivers/crypto/aspeed/Kconfig"
|
||||
source "drivers/crypto/starfive/Kconfig"
|
||||
source "drivers/crypto/inside-secure/eip93/Kconfig"
|
||||
|
||||
endif # CRYPTO_HW
|
||||
|
||||
@@ -52,3 +52,4 @@ obj-y += hisilicon/
|
||||
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
|
||||
obj-y += intel/
|
||||
obj-y += starfive/
|
||||
obj-y += inside-secure/eip93/
|
||||
|
||||
20
drivers/crypto/inside-secure/eip93/Kconfig
Normal file
20
drivers/crypto/inside-secure/eip93/Kconfig
Normal file
@@ -0,0 +1,20 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
config CRYPTO_DEV_EIP93
|
||||
tristate "Support for EIP93 crypto HW accelerators"
|
||||
depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_LIB_DES
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_SHA256
|
||||
help
|
||||
EIP93 have various crypto HW accelerators. Select this if
|
||||
you want to use the EIP93 modules for any of the crypto algorithms.
|
||||
|
||||
If the IP supports it, this provide offload for AES - ECB, CBC and
|
||||
CTR crypto. Also provide DES and 3DES ECB and CBC.
|
||||
|
||||
Also provide AEAD authenc(hmac(x), cipher(y)) for supported algo.
|
||||
5
drivers/crypto/inside-secure/eip93/Makefile
Normal file
5
drivers/crypto/inside-secure/eip93/Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o
|
||||
|
||||
crypto-hw-eip93-y += eip93-main.o eip93-common.o
|
||||
crypto-hw-eip93-y += eip93-cipher.o eip93-aead.o
|
||||
crypto-hw-eip93-y += eip93-hash.o
|
||||
702
drivers/crypto/inside-secure/eip93/eip93-aead.c
Normal file
702
drivers/crypto/inside-secure/eip93/eip93-aead.c
Normal file
@@ -0,0 +1,702 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/hmac.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/md5.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/sha1.h>
|
||||
#include <crypto/sha2.h>
|
||||
|
||||
#include <crypto/internal/des.h>
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "eip93-aead.h"
|
||||
#include "eip93-cipher.h"
|
||||
#include "eip93-common.h"
|
||||
#include "eip93-regs.h"
|
||||
|
||||
void eip93_aead_handle_result(struct crypto_async_request *async, int err)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
|
||||
struct eip93_device *mtk = ctx->mtk;
|
||||
struct aead_request *req = aead_request_cast(async);
|
||||
struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
|
||||
|
||||
eip93_unmap_dma(mtk, rctx, req->src, req->dst);
|
||||
eip93_handle_result(mtk, rctx, req->iv);
|
||||
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int eip93_aead_send_req(struct crypto_async_request *async)
|
||||
{
|
||||
struct aead_request *req = aead_request_cast(async);
|
||||
struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
|
||||
int err;
|
||||
|
||||
err = check_valid_request(rctx);
|
||||
if (err) {
|
||||
aead_request_complete(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return eip93_send_req(async, req->iv, rctx);
|
||||
}
|
||||
|
||||
/* Crypto aead API functions */
|
||||
static int eip93_aead_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
|
||||
struct eip93_alg_template, alg.aead.base);
|
||||
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
sizeof(struct eip93_cipher_reqctx));
|
||||
|
||||
ctx->mtk = tmpl->mtk;
|
||||
ctx->flags = tmpl->flags;
|
||||
ctx->type = tmpl->type;
|
||||
ctx->set_assoc = true;
|
||||
|
||||
ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL);
|
||||
if (!ctx->sa_record)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eip93_aead_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
dma_unmap_single(ctx->mtk->dev, ctx->sa_record_base,
|
||||
sizeof(*ctx->sa_record), DMA_TO_DEVICE);
|
||||
kfree(ctx->sa_record);
|
||||
}
|
||||
|
||||
static int eip93_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_authenc_keys keys;
|
||||
struct crypto_aes_ctx aes;
|
||||
struct sa_record *sa_record = ctx->sa_record;
|
||||
u32 nonce = 0;
|
||||
int ret;
|
||||
|
||||
if (crypto_authenc_extractkeys(&keys, key, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_RFC3686(ctx->flags)) {
|
||||
if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
|
||||
memcpy(&nonce, keys.enckey + keys.enckeylen,
|
||||
CTR_RFC3686_NONCE_SIZE);
|
||||
}
|
||||
|
||||
switch ((ctx->flags & EIP93_ALG_MASK)) {
|
||||
case EIP93_ALG_DES:
|
||||
ret = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
|
||||
break;
|
||||
case EIP93_ALG_3DES:
|
||||
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
ret = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
|
||||
break;
|
||||
case EIP93_ALG_AES:
|
||||
ret = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx->blksize = crypto_aead_blocksize(ctfm);
|
||||
/* Encryption key */
|
||||
eip93_set_sa_record(sa_record, keys.enckeylen, ctx->flags);
|
||||
sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
|
||||
sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
|
||||
EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH);
|
||||
sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
|
||||
sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
|
||||
ctx->authsize / sizeof(u32));
|
||||
|
||||
memcpy(sa_record->sa_key, keys.enckey, keys.enckeylen);
|
||||
ctx->sa_nonce = nonce;
|
||||
sa_record->sa_nonce = nonce;
|
||||
|
||||
/* authentication key */
|
||||
ret = eip93_authenc_setkey(ctfm, sa_record, keys.authkey,
|
||||
keys.authkeylen);
|
||||
|
||||
ctx->set_assoc = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int eip93_aead_setauthsize(struct crypto_aead *ctfm,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ctx->authsize = authsize;
|
||||
ctx->sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
|
||||
ctx->sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
|
||||
ctx->authsize / sizeof(u32));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eip93_aead_setassoc(struct eip93_crypto_ctx *ctx,
|
||||
struct aead_request *req)
|
||||
{
|
||||
struct sa_record *sa_record = ctx->sa_record;
|
||||
|
||||
sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HASH_CRYPT_OFFSET;
|
||||
sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_HASH_CRYPT_OFFSET,
|
||||
req->assoclen / sizeof(u32));
|
||||
|
||||
ctx->assoclen = req->assoclen;
|
||||
}
|
||||
|
||||
static int eip93_aead_crypt(struct aead_request *req)
|
||||
{
|
||||
struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
|
||||
struct crypto_async_request *async = &req->base;
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
int ret;
|
||||
|
||||
ctx->sa_record_base = dma_map_single(ctx->mtk->dev, ctx->sa_record,
|
||||
sizeof(*ctx->sa_record), DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(ctx->mtk->dev, ctx->sa_record_base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rctx->textsize = req->cryptlen;
|
||||
rctx->blksize = ctx->blksize;
|
||||
rctx->assoclen = req->assoclen;
|
||||
rctx->authsize = ctx->authsize;
|
||||
rctx->sg_src = req->src;
|
||||
rctx->sg_dst = req->dst;
|
||||
rctx->ivsize = crypto_aead_ivsize(aead);
|
||||
rctx->desc_flags = EIP93_DESC_AEAD;
|
||||
rctx->sa_record_base = ctx->sa_record_base;
|
||||
|
||||
if (IS_DECRYPT(rctx->flags))
|
||||
rctx->textsize -= rctx->authsize;
|
||||
|
||||
return eip93_aead_send_req(async);
|
||||
}
|
||||
|
||||
static int eip93_aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
|
||||
|
||||
rctx->flags = ctx->flags;
|
||||
rctx->flags |= EIP93_ENCRYPT;
|
||||
if (ctx->set_assoc) {
|
||||
eip93_aead_setassoc(ctx, req);
|
||||
ctx->set_assoc = false;
|
||||
}
|
||||
|
||||
if (req->assoclen != ctx->assoclen) {
|
||||
dev_err(ctx->mtk->dev, "Request AAD length error\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return eip93_aead_crypt(req);
|
||||
}
|
||||
|
||||
static int eip93_aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
|
||||
|
||||
ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN;
|
||||
ctx->sa_record->sa_cmd1_word &= ~(EIP93_SA_CMD_COPY_PAD |
|
||||
EIP93_SA_CMD_COPY_DIGEST);
|
||||
|
||||
rctx->flags = ctx->flags;
|
||||
rctx->flags |= EIP93_DECRYPT;
|
||||
if (ctx->set_assoc) {
|
||||
eip93_aead_setassoc(ctx, req);
|
||||
ctx->set_assoc = false;
|
||||
}
|
||||
|
||||
if (req->assoclen != ctx->assoclen) {
|
||||
dev_err(ctx->mtk->dev, "Request AAD length error\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return eip93_aead_crypt(req);
|
||||
}
|
||||
|
||||
/* Available authenc algorithms in this module */
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_AES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(aes))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(md5-eip93), cbc(aes-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_AES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha1-eip93),cbc(aes-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_AES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha224),cbc(aes))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha224-eip93),cbc(aes-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_AES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(aes))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha256-eip93),cbc(aes-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 |
|
||||
EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 |
|
||||
EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 |
|
||||
EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 |
|
||||
EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_DES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(md5-eip93),cbc(des-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_DES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha1-eip93),cbc(des-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_DES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha224),cbc(des))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha224-eip93),cbc(des-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_DES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(des))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha256-eip93),cbc(des-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_3DES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(md5-eip93),cbc(des3_ede-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0x0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_3DES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0x0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_3DES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0x0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede = {
|
||||
.type = EIP93_ALG_TYPE_AEAD,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_3DES,
|
||||
.alg.aead = {
|
||||
.setkey = eip93_aead_setkey,
|
||||
.encrypt = eip93_aead_encrypt,
|
||||
.decrypt = eip93_aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setauthsize = eip93_aead_setauthsize,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
|
||||
.cra_driver_name =
|
||||
"authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0x0,
|
||||
.cra_init = eip93_aead_cra_init,
|
||||
.cra_exit = eip93_aead_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
38
drivers/crypto/inside-secure/eip93/eip93-aead.h
Normal file
38
drivers/crypto/inside-secure/eip93/eip93-aead.h
Normal file
@@ -0,0 +1,38 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
#ifndef _EIP93_AEAD_H_
|
||||
#define _EIP93_AEAD_H_
|
||||
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ctr_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ctr_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ctr_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ctr_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ecb_null;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ecb_null;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ecb_null;
|
||||
extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ecb_null;
|
||||
|
||||
void eip93_aead_handle_result(struct crypto_async_request *async, int err);
|
||||
|
||||
#endif /* _EIP93_AEAD_H_ */
|
||||
16
drivers/crypto/inside-secure/eip93/eip93-aes.h
Normal file
16
drivers/crypto/inside-secure/eip93/eip93-aes.h
Normal file
@@ -0,0 +1,16 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
#ifndef _EIP93_AES_H_
|
||||
#define _EIP93_AES_H_
|
||||
|
||||
extern struct eip93_alg_template eip93_alg_ecb_aes;
|
||||
extern struct eip93_alg_template eip93_alg_cbc_aes;
|
||||
extern struct eip93_alg_template eip93_alg_ctr_aes;
|
||||
extern struct eip93_alg_template eip93_alg_rfc3686_aes;
|
||||
|
||||
#endif /* _EIP93_AES_H_ */
|
||||
407
drivers/crypto/inside-secure/eip93/eip93-cipher.c
Normal file
407
drivers/crypto/inside-secure/eip93/eip93-cipher.c
Normal file
@@ -0,0 +1,407 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/internal/des.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "eip93-cipher.h"
|
||||
#include "eip93-common.h"
|
||||
#include "eip93-regs.h"
|
||||
|
||||
void eip93_skcipher_handle_result(struct crypto_async_request *async, int err)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
|
||||
struct eip93_device *mtk = ctx->mtk;
|
||||
struct skcipher_request *req = skcipher_request_cast(async);
|
||||
struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
|
||||
|
||||
eip93_unmap_dma(mtk, rctx, req->src, req->dst);
|
||||
eip93_handle_result(mtk, rctx, req->iv);
|
||||
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int eip93_skcipher_send_req(struct crypto_async_request *async)
|
||||
{
|
||||
struct skcipher_request *req = skcipher_request_cast(async);
|
||||
struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
|
||||
int err;
|
||||
|
||||
err = check_valid_request(rctx);
|
||||
|
||||
if (err) {
|
||||
skcipher_request_complete(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return eip93_send_req(async, req->iv, rctx);
|
||||
}
|
||||
|
||||
/* Crypto skcipher API functions */
|
||||
static int eip93_skcipher_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
|
||||
struct eip93_alg_template, alg.skcipher.base);
|
||||
|
||||
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
|
||||
sizeof(struct eip93_cipher_reqctx));
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
ctx->mtk = tmpl->mtk;
|
||||
ctx->type = tmpl->type;
|
||||
|
||||
ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL);
|
||||
if (!ctx->sa_record)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eip93_skcipher_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
dma_unmap_single(ctx->mtk->dev, ctx->sa_record_base,
|
||||
sizeof(*ctx->sa_record), DMA_TO_DEVICE);
|
||||
kfree(ctx->sa_record);
|
||||
}
|
||||
|
||||
static int eip93_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
|
||||
struct eip93_alg_template,
|
||||
alg.skcipher.base);
|
||||
struct sa_record *sa_record = ctx->sa_record;
|
||||
unsigned int keylen = len;
|
||||
u32 flags = tmpl->flags;
|
||||
u32 nonce = 0;
|
||||
int ret;
|
||||
|
||||
if (!key || !keylen)
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_RFC3686(flags)) {
|
||||
if (len < CTR_RFC3686_NONCE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
keylen = len - CTR_RFC3686_NONCE_SIZE;
|
||||
memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE);
|
||||
}
|
||||
|
||||
if (flags & EIP93_ALG_DES) {
|
||||
ctx->blksize = DES_BLOCK_SIZE;
|
||||
ret = verify_skcipher_des_key(ctfm, key);
|
||||
}
|
||||
if (flags & EIP93_ALG_3DES) {
|
||||
ctx->blksize = DES3_EDE_BLOCK_SIZE;
|
||||
ret = verify_skcipher_des3_key(ctfm, key);
|
||||
}
|
||||
|
||||
if (flags & EIP93_ALG_AES) {
|
||||
struct crypto_aes_ctx aes;
|
||||
|
||||
ctx->blksize = AES_BLOCK_SIZE;
|
||||
ret = aes_expandkey(&aes, key, keylen);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
eip93_set_sa_record(sa_record, keylen, flags);
|
||||
|
||||
memcpy(sa_record->sa_key, key, keylen);
|
||||
ctx->sa_nonce = nonce;
|
||||
sa_record->sa_nonce = nonce;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int eip93_skcipher_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct crypto_async_request *async = &req->base;
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
int ret;
|
||||
|
||||
if (!req->cryptlen)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* ECB and CBC algorithms require message lengths to be
|
||||
* multiples of block size.
|
||||
*/
|
||||
if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
|
||||
if (!IS_ALIGNED(req->cryptlen,
|
||||
crypto_skcipher_blocksize(skcipher)))
|
||||
return -EINVAL;
|
||||
|
||||
ctx->sa_record_base = dma_map_single(ctx->mtk->dev, ctx->sa_record,
|
||||
sizeof(*ctx->sa_record), DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(ctx->mtk->dev, ctx->sa_record_base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rctx->assoclen = 0;
|
||||
rctx->textsize = req->cryptlen;
|
||||
rctx->authsize = 0;
|
||||
rctx->sg_src = req->src;
|
||||
rctx->sg_dst = req->dst;
|
||||
rctx->ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
rctx->blksize = ctx->blksize;
|
||||
rctx->desc_flags = EIP93_DESC_SKCIPHER;
|
||||
rctx->sa_record_base = ctx->sa_record_base;
|
||||
|
||||
return eip93_skcipher_send_req(async);
|
||||
}
|
||||
|
||||
static int eip93_skcipher_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
|
||||
struct eip93_alg_template, alg.skcipher.base);
|
||||
|
||||
rctx->flags = tmpl->flags;
|
||||
rctx->flags |= EIP93_ENCRYPT;
|
||||
|
||||
return eip93_skcipher_crypt(req);
|
||||
}
|
||||
|
||||
static int eip93_skcipher_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
|
||||
struct eip93_alg_template, alg.skcipher.base);
|
||||
|
||||
ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN;
|
||||
|
||||
rctx->flags = tmpl->flags;
|
||||
rctx->flags |= EIP93_DECRYPT;
|
||||
|
||||
return eip93_skcipher_crypt(req);
|
||||
}
|
||||
|
||||
/* Available algorithms in this module */
|
||||
struct eip93_alg_template eip93_alg_ecb_aes = {
|
||||
.type = EIP93_ALG_TYPE_SKCIPHER,
|
||||
.flags = EIP93_MODE_ECB | EIP93_ALG_AES,
|
||||
.alg.skcipher = {
|
||||
.setkey = eip93_skcipher_setkey,
|
||||
.encrypt = eip93_skcipher_encrypt,
|
||||
.decrypt = eip93_skcipher_decrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = 0,
|
||||
.base = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb(aes-eip93)",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0xf,
|
||||
.cra_init = eip93_skcipher_cra_init,
|
||||
.cra_exit = eip93_skcipher_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_cbc_aes = {
|
||||
.type = EIP93_ALG_TYPE_SKCIPHER,
|
||||
.flags = EIP93_MODE_CBC | EIP93_ALG_AES,
|
||||
.alg.skcipher = {
|
||||
.setkey = eip93_skcipher_setkey,
|
||||
.encrypt = eip93_skcipher_encrypt,
|
||||
.decrypt = eip93_skcipher_decrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc(aes-eip93)",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0xf,
|
||||
.cra_init = eip93_skcipher_cra_init,
|
||||
.cra_exit = eip93_skcipher_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_ctr_aes = {
|
||||
.type = EIP93_ALG_TYPE_SKCIPHER,
|
||||
.flags = EIP93_MODE_CTR | EIP93_ALG_AES,
|
||||
.alg.skcipher = {
|
||||
.setkey = eip93_skcipher_setkey,
|
||||
.encrypt = eip93_skcipher_encrypt,
|
||||
.decrypt = eip93_skcipher_decrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr(aes-eip93)",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0xf,
|
||||
.cra_init = eip93_skcipher_cra_init,
|
||||
.cra_exit = eip93_skcipher_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_rfc3686_aes = {
|
||||
.type = EIP93_ALG_TYPE_SKCIPHER,
|
||||
.flags = EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
|
||||
.alg.skcipher = {
|
||||
.setkey = eip93_skcipher_setkey,
|
||||
.encrypt = eip93_skcipher_encrypt,
|
||||
.decrypt = eip93_skcipher_decrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.base = {
|
||||
.cra_name = "rfc3686(ctr(aes))",
|
||||
.cra_driver_name = "rfc3686(ctr(aes-eip93))",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0xf,
|
||||
.cra_init = eip93_skcipher_cra_init,
|
||||
.cra_exit = eip93_skcipher_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_ecb_des = {
|
||||
.type = EIP93_ALG_TYPE_SKCIPHER,
|
||||
.flags = EIP93_MODE_ECB | EIP93_ALG_DES,
|
||||
.alg.skcipher = {
|
||||
.setkey = eip93_skcipher_setkey,
|
||||
.encrypt = eip93_skcipher_encrypt,
|
||||
.decrypt = eip93_skcipher_decrypt,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = 0,
|
||||
.base = {
|
||||
.cra_name = "ecb(des)",
|
||||
.cra_driver_name = "ebc(des-eip93)",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_skcipher_cra_init,
|
||||
.cra_exit = eip93_skcipher_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_cbc_des = {
|
||||
.type = EIP93_ALG_TYPE_SKCIPHER,
|
||||
.flags = EIP93_MODE_CBC | EIP93_ALG_DES,
|
||||
.alg.skcipher = {
|
||||
.setkey = eip93_skcipher_setkey,
|
||||
.encrypt = eip93_skcipher_encrypt,
|
||||
.decrypt = eip93_skcipher_decrypt,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "cbc(des)",
|
||||
.cra_driver_name = "cbc(des-eip93)",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_skcipher_cra_init,
|
||||
.cra_exit = eip93_skcipher_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_ecb_des3_ede = {
|
||||
.type = EIP93_ALG_TYPE_SKCIPHER,
|
||||
.flags = EIP93_MODE_ECB | EIP93_ALG_3DES,
|
||||
.alg.skcipher = {
|
||||
.setkey = eip93_skcipher_setkey,
|
||||
.encrypt = eip93_skcipher_encrypt,
|
||||
.decrypt = eip93_skcipher_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = 0,
|
||||
.base = {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "ecb(des3_ede-eip93)",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_skcipher_cra_init,
|
||||
.cra_exit = eip93_skcipher_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_cbc_des3_ede = {
|
||||
.type = EIP93_ALG_TYPE_SKCIPHER,
|
||||
.flags = EIP93_MODE_CBC | EIP93_ALG_3DES,
|
||||
.alg.skcipher = {
|
||||
.setkey = eip93_skcipher_setkey,
|
||||
.encrypt = eip93_skcipher_encrypt,
|
||||
.decrypt = eip93_skcipher_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "cbc(des3_ede-eip93)",
|
||||
.cra_priority = EIP93_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_init = eip93_skcipher_cra_init,
|
||||
.cra_exit = eip93_skcipher_cra_exit,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
};
|
||||
60
drivers/crypto/inside-secure/eip93/eip93-cipher.h
Normal file
60
drivers/crypto/inside-secure/eip93/eip93-cipher.h
Normal file
@@ -0,0 +1,60 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
#ifndef _EIP93_CIPHER_H_
|
||||
#define _EIP93_CIPHER_H_
|
||||
|
||||
#include "eip93-main.h"
|
||||
|
||||
struct eip93_crypto_ctx {
|
||||
struct eip93_device *mtk;
|
||||
u32 flags;
|
||||
struct sa_record *sa_record;
|
||||
u32 sa_nonce;
|
||||
int blksize;
|
||||
dma_addr_t sa_record_base;
|
||||
/* AEAD specific */
|
||||
unsigned int authsize;
|
||||
unsigned int assoclen;
|
||||
bool set_assoc;
|
||||
enum eip93_alg_type type;
|
||||
};
|
||||
|
||||
struct eip93_cipher_reqctx {
|
||||
u16 desc_flags;
|
||||
u16 flags;
|
||||
unsigned int blksize;
|
||||
unsigned int ivsize;
|
||||
unsigned int textsize;
|
||||
unsigned int assoclen;
|
||||
unsigned int authsize;
|
||||
dma_addr_t sa_record_base;
|
||||
struct sa_state *sa_state;
|
||||
dma_addr_t sa_state_base;
|
||||
struct eip93_descriptor *cdesc;
|
||||
struct scatterlist *sg_src;
|
||||
struct scatterlist *sg_dst;
|
||||
int src_nents;
|
||||
int dst_nents;
|
||||
struct sa_state *sa_state_ctr;
|
||||
dma_addr_t sa_state_ctr_base;
|
||||
};
|
||||
|
||||
int check_valid_request(struct eip93_cipher_reqctx *rctx);
|
||||
|
||||
void eip93_unmap_dma(struct eip93_device *mtk, struct eip93_cipher_reqctx *rctx,
|
||||
struct scatterlist *reqsrc, struct scatterlist *reqdst);
|
||||
|
||||
void eip93_skcipher_handle_result(struct crypto_async_request *async, int err);
|
||||
|
||||
int eip93_send_req(struct crypto_async_request *async,
|
||||
const u8 *reqiv, struct eip93_cipher_reqctx *rctx);
|
||||
|
||||
void eip93_handle_result(struct eip93_device *mtk, struct eip93_cipher_reqctx *rctx,
|
||||
u8 *reqiv);
|
||||
|
||||
#endif /* _EIP93_CIPHER_H_ */
|
||||
824
drivers/crypto/inside-secure/eip93/eip93-common.c
Normal file
824
drivers/crypto/inside-secure/eip93/eip93-common.c
Normal file
@@ -0,0 +1,824 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/hmac.h>
|
||||
#include <crypto/sha1.h>
|
||||
#include <crypto/sha2.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include "eip93-cipher.h"
|
||||
#include "eip93-hash.h"
|
||||
#include "eip93-common.h"
|
||||
#include "eip93-main.h"
|
||||
#include "eip93-regs.h"
|
||||
|
||||
int eip93_parse_ctrl_stat_err(struct eip93_device *mtk, int err)
|
||||
{
|
||||
u32 ext_err;
|
||||
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) {
|
||||
case EIP93_PE_CTRL_PE_AUTH_ERR:
|
||||
case EIP93_PE_CTRL_PE_PAD_ERR:
|
||||
return -EBADMSG;
|
||||
/* let software handle anti-replay errors */
|
||||
case EIP93_PE_CTRL_PE_SEQNUM_ERR:
|
||||
return 0;
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR:
|
||||
break;
|
||||
default:
|
||||
dev_err(mtk->dev, "Unhandled error 0x%08x\n", err);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Parse additional ext errors */
|
||||
ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err);
|
||||
switch (ext_err) {
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_BUS:
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING:
|
||||
return -EIO;
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER:
|
||||
return -EACCES;
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP:
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO:
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_SPI:
|
||||
return -EINVAL;
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH:
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH:
|
||||
case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR:
|
||||
return -EBADMSG;
|
||||
default:
|
||||
dev_err(mtk->dev, "Unhandled ext error 0x%08x\n", ext_err);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void *eip93_ring_next_wptr(struct eip93_device *mtk,
|
||||
struct eip93_desc_ring *ring)
|
||||
{
|
||||
void *ptr = ring->write;
|
||||
|
||||
if ((ring->write == ring->read - ring->offset) ||
|
||||
(ring->read == ring->base && ring->write == ring->base_end))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (ring->write == ring->base_end)
|
||||
ring->write = ring->base;
|
||||
else
|
||||
ring->write += ring->offset;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void *eip93_ring_next_rptr(struct eip93_device *mtk,
|
||||
struct eip93_desc_ring *ring)
|
||||
{
|
||||
void *ptr = ring->read;
|
||||
|
||||
if (ring->write == ring->read)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
if (ring->read == ring->base_end)
|
||||
ring->read = ring->base;
|
||||
else
|
||||
ring->read += ring->offset;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
int eip93_put_descriptor(struct eip93_device *mtk,
|
||||
struct eip93_descriptor *desc)
|
||||
{
|
||||
struct eip93_descriptor *cdesc;
|
||||
struct eip93_descriptor *rdesc;
|
||||
|
||||
guard(spinlock_irqsave)(&mtk->ring->write_lock);
|
||||
|
||||
rdesc = eip93_ring_next_wptr(mtk, &mtk->ring->rdr);
|
||||
|
||||
if (IS_ERR(rdesc))
|
||||
return -ENOENT;
|
||||
|
||||
cdesc = eip93_ring_next_wptr(mtk, &mtk->ring->cdr);
|
||||
if (IS_ERR(cdesc))
|
||||
return -ENOENT;
|
||||
|
||||
memset(rdesc, 0, sizeof(struct eip93_descriptor));
|
||||
|
||||
memcpy(cdesc, desc, sizeof(struct eip93_descriptor));
|
||||
|
||||
atomic_dec(&mtk->ring->free);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *eip93_get_descriptor(struct eip93_device *mtk)
|
||||
{
|
||||
struct eip93_descriptor *cdesc;
|
||||
void *ptr;
|
||||
|
||||
guard(spinlock_irqsave)(&mtk->ring->read_lock);
|
||||
|
||||
cdesc = eip93_ring_next_rptr(mtk, &mtk->ring->cdr);
|
||||
if (IS_ERR(cdesc))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
memset(cdesc, 0, sizeof(struct eip93_descriptor));
|
||||
|
||||
ptr = eip93_ring_next_rptr(mtk, &mtk->ring->rdr);
|
||||
if (IS_ERR(ptr))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
atomic_inc(&mtk->ring->free);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void eip93_free_sg_copy(const int len, struct scatterlist **sg)
|
||||
{
|
||||
if (!*sg || !len)
|
||||
return;
|
||||
|
||||
free_pages((unsigned long)sg_virt(*sg), get_order(len));
|
||||
kfree(*sg);
|
||||
*sg = NULL;
|
||||
}
|
||||
|
||||
static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst,
|
||||
const u32 len, const bool copy)
|
||||
{
|
||||
void *pages;
|
||||
|
||||
*dst = kmalloc(sizeof(**dst), GFP_KERNEL);
|
||||
if (!*dst)
|
||||
return -ENOMEM;
|
||||
|
||||
pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,
|
||||
get_order(len));
|
||||
if (!pages) {
|
||||
kfree(*dst);
|
||||
*dst = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_init_table(*dst, 1);
|
||||
sg_set_buf(*dst, pages, len);
|
||||
|
||||
/* copy only as requested */
|
||||
if (copy)
|
||||
sg_copy_to_buffer(src, sg_nents(src), pages, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len,
|
||||
const int blksize)
|
||||
{
|
||||
int nents;
|
||||
|
||||
for (nents = 0; sg; sg = sg_next(sg), ++nents) {
|
||||
if (!IS_ALIGNED(sg->offset, 4))
|
||||
return false;
|
||||
|
||||
if (len <= sg->length) {
|
||||
if (!IS_ALIGNED(len, blksize))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!IS_ALIGNED(sg->length, blksize))
|
||||
return false;
|
||||
|
||||
len -= sg->length;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int check_valid_request(struct eip93_cipher_reqctx *rctx)
|
||||
{
|
||||
struct scatterlist *src = rctx->sg_src;
|
||||
struct scatterlist *dst = rctx->sg_dst;
|
||||
u32 src_nents, dst_nents;
|
||||
u32 textsize = rctx->textsize;
|
||||
u32 authsize = rctx->authsize;
|
||||
u32 blksize = rctx->blksize;
|
||||
u32 totlen_src = rctx->assoclen + rctx->textsize;
|
||||
u32 totlen_dst = rctx->assoclen + rctx->textsize;
|
||||
u32 copy_len;
|
||||
bool src_align, dst_align;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (!IS_CTR(rctx->flags)) {
|
||||
if (!IS_ALIGNED(textsize, blksize))
|
||||
return err;
|
||||
}
|
||||
|
||||
if (authsize) {
|
||||
if (IS_ENCRYPT(rctx->flags))
|
||||
totlen_dst += authsize;
|
||||
else
|
||||
totlen_src += authsize;
|
||||
}
|
||||
|
||||
src_nents = sg_nents_for_len(src, totlen_src);
|
||||
dst_nents = sg_nents_for_len(dst, totlen_dst);
|
||||
|
||||
if (src == dst) {
|
||||
src_nents = max(src_nents, dst_nents);
|
||||
dst_nents = src_nents;
|
||||
if (unlikely((totlen_src || totlen_dst) && src_nents <= 0))
|
||||
return err;
|
||||
|
||||
} else {
|
||||
if (unlikely(totlen_src && src_nents <= 0))
|
||||
return err;
|
||||
|
||||
if (unlikely(totlen_dst && dst_nents <= 0))
|
||||
return err;
|
||||
}
|
||||
|
||||
if (authsize) {
|
||||
if (dst_nents == 1 && src_nents == 1) {
|
||||
src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
|
||||
if (src == dst)
|
||||
dst_align = src_align;
|
||||
else
|
||||
dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
|
||||
} else {
|
||||
src_align = false;
|
||||
dst_align = false;
|
||||
}
|
||||
} else {
|
||||
src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
|
||||
if (src == dst)
|
||||
dst_align = src_align;
|
||||
else
|
||||
dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
|
||||
}
|
||||
|
||||
copy_len = max(totlen_src, totlen_dst);
|
||||
if (!src_align) {
|
||||
err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!dst_align) {
|
||||
err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
rctx->src_nents = sg_nents_for_len(rctx->sg_src, totlen_src);
|
||||
rctx->dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set sa_record function:
|
||||
* Even sa_record is set to "0", keep " = 0" for readability.
|
||||
*/
|
||||
void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
|
||||
const u32 flags)
|
||||
{
|
||||
/* Reset cmd word */
|
||||
sa_record->sa_cmd0_word = 0;
|
||||
sa_record->sa_cmd1_word = 0;
|
||||
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE;
|
||||
if (!IS_ECB(flags))
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV;
|
||||
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC;
|
||||
|
||||
switch ((flags & EIP93_ALG_MASK)) {
|
||||
case EIP93_ALG_AES:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES;
|
||||
sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH,
|
||||
keylen >> 3);
|
||||
break;
|
||||
case EIP93_ALG_3DES:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES;
|
||||
break;
|
||||
case EIP93_ALG_DES:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES;
|
||||
break;
|
||||
default:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL;
|
||||
}
|
||||
|
||||
switch ((flags & EIP93_HASH_MASK)) {
|
||||
case EIP93_HASH_SHA256:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256;
|
||||
break;
|
||||
case EIP93_HASH_SHA224:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224;
|
||||
break;
|
||||
case EIP93_HASH_SHA1:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1;
|
||||
break;
|
||||
case EIP93_HASH_MD5:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5;
|
||||
break;
|
||||
default:
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL;
|
||||
}
|
||||
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO;
|
||||
|
||||
switch ((flags & EIP93_MODE_MASK)) {
|
||||
case EIP93_MODE_CBC:
|
||||
sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC;
|
||||
break;
|
||||
case EIP93_MODE_CTR:
|
||||
sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR;
|
||||
break;
|
||||
case EIP93_MODE_ECB:
|
||||
sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB;
|
||||
break;
|
||||
}
|
||||
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD;
|
||||
if (IS_HASH(flags)) {
|
||||
sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD;
|
||||
sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST;
|
||||
}
|
||||
|
||||
if (IS_HMAC(flags)) {
|
||||
sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC;
|
||||
sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER;
|
||||
}
|
||||
|
||||
sa_record->sa_spi = 0x0;
|
||||
sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF;
|
||||
sa_record->sa_seqmum_mask[1] = 0x0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Poor mans Scatter/gather function:
|
||||
* Create a Descriptor for every segment to avoid copying buffers.
|
||||
* For performance better to wait for hardware to perform multiple DMA
|
||||
*/
|
||||
static int eip93_scatter_combine(struct eip93_device *mtk,
|
||||
struct eip93_cipher_reqctx *rctx,
|
||||
u32 datalen, u32 split, int offsetin)
|
||||
{
|
||||
struct eip93_descriptor *cdesc = rctx->cdesc;
|
||||
struct scatterlist *sgsrc = rctx->sg_src;
|
||||
struct scatterlist *sgdst = rctx->sg_dst;
|
||||
unsigned int remainin = sg_dma_len(sgsrc);
|
||||
unsigned int remainout = sg_dma_len(sgdst);
|
||||
dma_addr_t saddr = sg_dma_address(sgsrc);
|
||||
dma_addr_t daddr = sg_dma_address(sgdst);
|
||||
dma_addr_t state_addr;
|
||||
u32 src_addr, dst_addr, len, n;
|
||||
bool nextin = false;
|
||||
bool nextout = false;
|
||||
int offsetout = 0;
|
||||
int ndesc_cdr = 0, err;
|
||||
|
||||
if (IS_ECB(rctx->flags))
|
||||
rctx->sa_state_base = 0;
|
||||
|
||||
if (split < datalen) {
|
||||
state_addr = rctx->sa_state_ctr_base;
|
||||
n = split;
|
||||
} else {
|
||||
state_addr = rctx->sa_state_base;
|
||||
n = datalen;
|
||||
}
|
||||
|
||||
do {
|
||||
if (nextin) {
|
||||
sgsrc = sg_next(sgsrc);
|
||||
remainin = sg_dma_len(sgsrc);
|
||||
if (remainin == 0)
|
||||
continue;
|
||||
|
||||
saddr = sg_dma_address(sgsrc);
|
||||
offsetin = 0;
|
||||
nextin = false;
|
||||
}
|
||||
|
||||
if (nextout) {
|
||||
sgdst = sg_next(sgdst);
|
||||
remainout = sg_dma_len(sgdst);
|
||||
if (remainout == 0)
|
||||
continue;
|
||||
|
||||
daddr = sg_dma_address(sgdst);
|
||||
offsetout = 0;
|
||||
nextout = false;
|
||||
}
|
||||
src_addr = saddr + offsetin;
|
||||
dst_addr = daddr + offsetout;
|
||||
|
||||
if (remainin == remainout) {
|
||||
len = remainin;
|
||||
if (len > n) {
|
||||
len = n;
|
||||
remainin -= n;
|
||||
remainout -= n;
|
||||
offsetin += n;
|
||||
offsetout += n;
|
||||
} else {
|
||||
nextin = true;
|
||||
nextout = true;
|
||||
}
|
||||
} else if (remainin < remainout) {
|
||||
len = remainin;
|
||||
if (len > n) {
|
||||
len = n;
|
||||
remainin -= n;
|
||||
remainout -= n;
|
||||
offsetin += n;
|
||||
offsetout += n;
|
||||
} else {
|
||||
offsetout += len;
|
||||
remainout -= len;
|
||||
nextin = true;
|
||||
}
|
||||
} else {
|
||||
len = remainout;
|
||||
if (len > n) {
|
||||
len = n;
|
||||
remainin -= n;
|
||||
remainout -= n;
|
||||
offsetin += n;
|
||||
offsetout += n;
|
||||
} else {
|
||||
offsetin += len;
|
||||
remainin -= len;
|
||||
nextout = true;
|
||||
}
|
||||
}
|
||||
n -= len;
|
||||
|
||||
cdesc->src_addr = src_addr;
|
||||
cdesc->dst_addr = dst_addr;
|
||||
cdesc->state_addr = state_addr;
|
||||
cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
|
||||
EIP93_PE_LENGTH_HOST_READY);
|
||||
cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len);
|
||||
|
||||
if (n == 0) {
|
||||
n = datalen - split;
|
||||
split = datalen;
|
||||
state_addr = rctx->sa_state_base;
|
||||
}
|
||||
|
||||
if (n == 0)
|
||||
cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS,
|
||||
EIP93_DESC_LAST);
|
||||
|
||||
/*
|
||||
* Loop - Delay - No need to rollback
|
||||
* Maybe refine by slowing down at EIP93_RING_BUSY
|
||||
*/
|
||||
again:
|
||||
err = eip93_put_descriptor(mtk, cdesc);
|
||||
if (err) {
|
||||
usleep_range(EIP93_RING_BUSY_DELAY,
|
||||
EIP93_RING_BUSY_DELAY * 2);
|
||||
goto again;
|
||||
}
|
||||
/* Writing new descriptor count starts DMA action */
|
||||
writel(1, mtk->base + EIP93_REG_PE_CD_COUNT);
|
||||
|
||||
ndesc_cdr++;
|
||||
} while (n);
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
int eip93_send_req(struct crypto_async_request *async,
|
||||
const u8 *reqiv, struct eip93_cipher_reqctx *rctx)
|
||||
{
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
|
||||
struct eip93_device *mtk = ctx->mtk;
|
||||
struct scatterlist *src = rctx->sg_src;
|
||||
struct scatterlist *dst = rctx->sg_dst;
|
||||
struct sa_state *sa_state;
|
||||
struct eip93_descriptor cdesc;
|
||||
u32 flags = rctx->flags;
|
||||
int offsetin = 0, err;
|
||||
u32 datalen = rctx->assoclen + rctx->textsize;
|
||||
u32 split = datalen;
|
||||
u32 start, end, ctr, blocks;
|
||||
u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
|
||||
int crypto_async_idr;
|
||||
|
||||
rctx->sa_state_ctr = NULL;
|
||||
rctx->sa_state = NULL;
|
||||
|
||||
if (IS_ECB(flags))
|
||||
goto skip_iv;
|
||||
|
||||
memcpy(iv, reqiv, rctx->ivsize);
|
||||
|
||||
rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL);
|
||||
if (!rctx->sa_state)
|
||||
return -ENOMEM;
|
||||
|
||||
sa_state = rctx->sa_state;
|
||||
|
||||
memcpy(sa_state->state_iv, iv, rctx->ivsize);
|
||||
if (IS_RFC3686(flags)) {
|
||||
sa_state->state_iv[0] = ctx->sa_nonce;
|
||||
sa_state->state_iv[1] = iv[0];
|
||||
sa_state->state_iv[2] = iv[1];
|
||||
sa_state->state_iv[3] = cpu_to_be32(1);
|
||||
} else if (!IS_HMAC(flags) && IS_CTR(flags)) {
|
||||
/* Compute data length. */
|
||||
blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE);
|
||||
ctr = be32_to_cpu(iv[3]);
|
||||
/* Check 32bit counter overflow. */
|
||||
start = ctr;
|
||||
end = start + blocks - 1;
|
||||
if (end < start) {
|
||||
split = AES_BLOCK_SIZE * -start;
|
||||
/*
|
||||
* Increment the counter manually to cope with
|
||||
* the hardware counter overflow.
|
||||
*/
|
||||
iv[3] = 0xffffffff;
|
||||
crypto_inc((u8 *)iv, AES_BLOCK_SIZE);
|
||||
|
||||
rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr),
|
||||
GFP_KERNEL);
|
||||
if (!rctx->sa_state_ctr)
|
||||
goto free_sa_state;
|
||||
|
||||
memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize);
|
||||
memcpy(sa_state->state_iv, iv, rctx->ivsize);
|
||||
|
||||
rctx->sa_state_ctr_base = dma_map_single(mtk->dev, rctx->sa_state_ctr,
|
||||
sizeof(*rctx->sa_state_ctr),
|
||||
DMA_TO_DEVICE);
|
||||
err = dma_mapping_error(mtk->dev, rctx->sa_state_ctr_base);
|
||||
if (err)
|
||||
goto free_sa_state_ctr;
|
||||
}
|
||||
}
|
||||
|
||||
rctx->sa_state_base = dma_map_single(mtk->dev, rctx->sa_state,
|
||||
sizeof(*rctx->sa_state), DMA_TO_DEVICE);
|
||||
err = dma_mapping_error(mtk->dev, rctx->sa_state_base);
|
||||
if (err)
|
||||
goto free_sa_state_ctr_dma;
|
||||
|
||||
skip_iv:
|
||||
|
||||
cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
|
||||
EIP93_PE_CTRL_HOST_READY);
|
||||
cdesc.sa_addr = rctx->sa_record_base;
|
||||
cdesc.arc4_addr = 0;
|
||||
|
||||
scoped_guard(spinlock_bh, &mtk->ring->idr_lock)
|
||||
crypto_async_idr = idr_alloc(&mtk->ring->crypto_async_idr, async, 0,
|
||||
EIP93_RING_NUM - 1, GFP_ATOMIC);
|
||||
|
||||
cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
|
||||
FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags);
|
||||
|
||||
rctx->cdesc = &cdesc;
|
||||
|
||||
/* map DMA_BIDIRECTIONAL to invalidate cache on destination
|
||||
* implies __dma_cache_wback_inv
|
||||
*/
|
||||
if (!dma_map_sg(mtk->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) {
|
||||
err = -ENOMEM;
|
||||
goto free_sa_state_ctr_dma;
|
||||
}
|
||||
|
||||
if (src != dst &&
|
||||
!dma_map_sg(mtk->dev, src, rctx->src_nents, DMA_TO_DEVICE)) {
|
||||
err = -ENOMEM;
|
||||
goto free_sg_dma;
|
||||
}
|
||||
|
||||
return eip93_scatter_combine(mtk, rctx, datalen, split, offsetin);
|
||||
|
||||
free_sg_dma:
|
||||
dma_unmap_sg(mtk->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL);
|
||||
free_sa_state_ctr_dma:
|
||||
if (rctx->sa_state_ctr)
|
||||
dma_unmap_single(mtk->dev, rctx->sa_state_ctr_base,
|
||||
sizeof(*rctx->sa_state_ctr),
|
||||
DMA_TO_DEVICE);
|
||||
free_sa_state_ctr:
|
||||
kfree(rctx->sa_state_ctr);
|
||||
if (rctx->sa_state)
|
||||
dma_unmap_single(mtk->dev, rctx->sa_state_base,
|
||||
sizeof(*rctx->sa_state),
|
||||
DMA_TO_DEVICE);
|
||||
free_sa_state:
|
||||
kfree(rctx->sa_state);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void eip93_unmap_dma(struct eip93_device *mtk, struct eip93_cipher_reqctx *rctx,
|
||||
struct scatterlist *reqsrc, struct scatterlist *reqdst)
|
||||
{
|
||||
u32 len = rctx->assoclen + rctx->textsize;
|
||||
u32 authsize = rctx->authsize;
|
||||
u32 flags = rctx->flags;
|
||||
u32 *otag;
|
||||
int i;
|
||||
|
||||
if (rctx->sg_src == rctx->sg_dst) {
|
||||
dma_unmap_sg(mtk->dev, rctx->sg_dst, rctx->dst_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
goto process_tag;
|
||||
}
|
||||
|
||||
dma_unmap_sg(mtk->dev, rctx->sg_src, rctx->src_nents,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (rctx->sg_src != reqsrc)
|
||||
eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_src);
|
||||
|
||||
dma_unmap_sg(mtk->dev, rctx->sg_dst, rctx->dst_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
/* SHA tags need conversion from net-to-host */
|
||||
process_tag:
|
||||
if (IS_DECRYPT(flags))
|
||||
authsize = 0;
|
||||
|
||||
if (authsize) {
|
||||
if (!IS_HASH_MD5(flags)) {
|
||||
otag = sg_virt(rctx->sg_dst) + len;
|
||||
for (i = 0; i < (authsize / 4); i++)
|
||||
otag[i] = be32_to_cpu(otag[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (rctx->sg_dst != reqdst) {
|
||||
sg_copy_from_buffer(reqdst, sg_nents(reqdst),
|
||||
sg_virt(rctx->sg_dst), len + authsize);
|
||||
eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst);
|
||||
}
|
||||
}
|
||||
|
||||
void eip93_handle_result(struct eip93_device *mtk, struct eip93_cipher_reqctx *rctx,
|
||||
u8 *reqiv)
|
||||
{
|
||||
if (rctx->sa_state_ctr)
|
||||
dma_unmap_single(mtk->dev, rctx->sa_state_ctr_base,
|
||||
sizeof(*rctx->sa_state_ctr),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (rctx->sa_state)
|
||||
dma_unmap_single(mtk->dev, rctx->sa_state_base,
|
||||
sizeof(*rctx->sa_state),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (!IS_ECB(rctx->flags))
|
||||
memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize);
|
||||
|
||||
kfree(rctx->sa_state_ctr);
|
||||
kfree(rctx->sa_state);
|
||||
}
|
||||
|
||||
/* basically this is set hmac - key */
|
||||
int eip93_authenc_setkey(struct crypto_aead *aead, struct sa_record *sa,
|
||||
const u8 *authkey, unsigned int authkeylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
|
||||
struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_ahash *ahash_tfm;
|
||||
struct eip93_hash_reqctx *rctx;
|
||||
struct scatterlist sg[1];
|
||||
struct ahash_request *req;
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
const char *alg_name;
|
||||
u8 *ipad, *opad;
|
||||
int i, ret;
|
||||
|
||||
switch ((ctx->flags & EIP93_HASH_MASK)) {
|
||||
case EIP93_HASH_SHA256:
|
||||
alg_name = "sha256-eip93";
|
||||
break;
|
||||
case EIP93_HASH_SHA224:
|
||||
alg_name = "sha224-eip93";
|
||||
break;
|
||||
case EIP93_HASH_SHA1:
|
||||
alg_name = "sha1-eip93";
|
||||
break;
|
||||
case EIP93_HASH_MD5:
|
||||
alg_name = "md5-eip93";
|
||||
break;
|
||||
default: /* Impossible */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
|
||||
if (IS_ERR(ahash_tfm))
|
||||
return PTR_ERR(ahash_tfm);
|
||||
|
||||
req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
ret = -ENOMEM;
|
||||
goto err_ahash;
|
||||
}
|
||||
|
||||
ipad = kcalloc(2, SHA256_BLOCK_SIZE, GFP_KERNEL);
|
||||
if (!ipad) {
|
||||
ret = -ENOMEM;
|
||||
goto err_req;
|
||||
}
|
||||
opad = ipad + SHA256_BLOCK_SIZE;
|
||||
|
||||
rctx = ahash_request_ctx(req);
|
||||
crypto_init_wait(&wait);
|
||||
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &wait);
|
||||
|
||||
/* Hash the key if > SHA256_BLOCK_SIZE */
|
||||
if (authkeylen > SHA256_BLOCK_SIZE) {
|
||||
sg_init_one(&sg[0], authkey, authkeylen);
|
||||
|
||||
ahash_request_set_crypt(req, sg, ipad, authkeylen);
|
||||
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
|
||||
|
||||
authkeylen = ctx->authsize;
|
||||
} else {
|
||||
memcpy(ipad, authkey, authkeylen);
|
||||
}
|
||||
|
||||
/* Copy to opad */
|
||||
memset(ipad + authkeylen, 0, SHA256_BLOCK_SIZE - authkeylen);
|
||||
memcpy(opad, ipad, SHA256_BLOCK_SIZE);
|
||||
|
||||
/* Pad with HMAC constants */
|
||||
for (i = 0; i < SHA256_BLOCK_SIZE; i++) {
|
||||
ipad[i] ^= HMAC_IPAD_VALUE;
|
||||
opad[i] ^= HMAC_OPAD_VALUE;
|
||||
}
|
||||
|
||||
/* Disable HASH_FINALIZE for ipad and opad hash */
|
||||
rctx->no_finalize = true;
|
||||
|
||||
/* Hash ipad */
|
||||
sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE);
|
||||
ahash_request_set_crypt(req, sg, sa->sa_i_digest, SHA256_BLOCK_SIZE);
|
||||
ret = crypto_ahash_init(req);
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
/* Disable HASH_FINALIZE for ipad hash */
|
||||
rctx->no_finalize = true;
|
||||
|
||||
ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
/* Hash opad */
|
||||
sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);
|
||||
ahash_request_set_crypt(req, sg, sa->sa_o_digest, SHA256_BLOCK_SIZE);
|
||||
ret = crypto_ahash_init(req);
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
/* Disable HASH_FINALIZE for opad hash */
|
||||
rctx->no_finalize = true;
|
||||
|
||||
ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
if (!IS_HASH_MD5(ctx->flags)) {
|
||||
for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) {
|
||||
u32 *ipad_hash = (u32 *)sa->sa_i_digest;
|
||||
u32 *opad_hash = (u32 *)sa->sa_o_digest;
|
||||
|
||||
ipad_hash[i] = cpu_to_be32(ipad_hash[i]);
|
||||
opad_hash[i] = cpu_to_be32(opad_hash[i]);
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
kfree(ipad);
|
||||
err_req:
|
||||
ahash_request_free(req);
|
||||
err_ahash:
|
||||
crypto_free_ahash(ahash_tfm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
25
drivers/crypto/inside-secure/eip93/eip93-common.h
Normal file
25
drivers/crypto/inside-secure/eip93/eip93-common.h
Normal file
@@ -0,0 +1,25 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _EIP93_COMMON_H_
|
||||
#define _EIP93_COMMON_H_
|
||||
|
||||
#include "eip93-main.h"
|
||||
|
||||
void *eip93_get_descriptor(struct eip93_device *mtk);
|
||||
int eip93_put_descriptor(struct eip93_device *mtk, struct eip93_descriptor *desc);
|
||||
|
||||
void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
|
||||
const u32 flags);
|
||||
|
||||
int eip93_parse_ctrl_stat_err(struct eip93_device *mtk, int err);
|
||||
|
||||
int eip93_authenc_setkey(struct crypto_aead *aead, struct sa_record *sa,
|
||||
const u8 *authkey, unsigned int authkeylen);
|
||||
|
||||
#endif /* _EIP93_COMMON_H_ */
|
||||
16
drivers/crypto/inside-secure/eip93/eip93-des.h
Normal file
16
drivers/crypto/inside-secure/eip93/eip93-des.h
Normal file
@@ -0,0 +1,16 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
#ifndef _EIP93_DES_H_
|
||||
#define _EIP93_DES_H_
|
||||
|
||||
extern struct eip93_alg_template eip93_alg_ecb_des;
|
||||
extern struct eip93_alg_template eip93_alg_cbc_des;
|
||||
extern struct eip93_alg_template eip93_alg_ecb_des3_ede;
|
||||
extern struct eip93_alg_template eip93_alg_cbc_des3_ede;
|
||||
|
||||
#endif /* _EIP93_DES_H_ */
|
||||
909
drivers/crypto/inside-secure/eip93/eip93-hash.c
Normal file
909
drivers/crypto/inside-secure/eip93/eip93-hash.c
Normal file
@@ -0,0 +1,909 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2024
|
||||
*
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
|
||||
#include <crypto/sha1.h>
|
||||
#include <crypto/sha2.h>
|
||||
#include <crypto/md5.h>
|
||||
#include <crypto/hmac.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "eip93-cipher.h"
|
||||
#include "eip93-hash.h"
|
||||
#include "eip93-main.h"
|
||||
#include "eip93-common.h"
|
||||
#include "eip93-regs.h"
|
||||
|
||||
static void eip93_hash_free_data_blocks(struct ahash_request *req)
|
||||
{
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct mkt_hash_block *block;
|
||||
|
||||
list_for_each_entry(block, &rctx->blocks, list) {
|
||||
dma_unmap_single(rctx->mtk->dev, block->data_dma,
|
||||
SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
|
||||
kfree(block);
|
||||
}
|
||||
}
|
||||
|
||||
static void eip93_hash_free_sa_record(struct ahash_request *req)
|
||||
{
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
|
||||
if (IS_HMAC(ctx->flags)) {
|
||||
dma_unmap_single(rctx->mtk->dev, rctx->sa_record_hmac_base,
|
||||
sizeof(*rctx->sa_record_hmac), DMA_TO_DEVICE);
|
||||
kfree(rctx->sa_record_hmac);
|
||||
}
|
||||
|
||||
dma_unmap_single(rctx->mtk->dev, rctx->sa_record_base,
|
||||
sizeof(*rctx->sa_record), DMA_TO_DEVICE);
|
||||
kfree(rctx->sa_record);
|
||||
}
|
||||
|
||||
static void eip93_hash_free_sa_state(struct ahash_request *req)
|
||||
{
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
|
||||
dma_unmap_single(rctx->mtk->dev, rctx->sa_state_base,
|
||||
sizeof(*rctx->sa_state), DMA_TO_DEVICE);
|
||||
kfree(rctx->sa_state);
|
||||
}
|
||||
|
||||
static struct sa_state *eip93_hash_get_sa_state(struct ahash_request *req,
|
||||
dma_addr_t *sa_state_base)
|
||||
{
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct eip93_device *mtk = ctx->mtk;
|
||||
struct sa_state *sa_state;
|
||||
int ret;
|
||||
|
||||
sa_state = kzalloc(sizeof(*sa_state), GFP_KERNEL);
|
||||
if (!sa_state)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Init HASH constant */
|
||||
switch ((ctx->flags & EIP93_HASH_MASK)) {
|
||||
case EIP93_HASH_SHA256:
|
||||
u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
|
||||
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
|
||||
|
||||
memcpy(sa_state->state_i_digest, sha256_init, sizeof(sha256_init));
|
||||
break;
|
||||
case EIP93_HASH_SHA224:
|
||||
u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
|
||||
SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
|
||||
|
||||
memcpy(sa_state->state_i_digest, sha224_init, sizeof(sha224_init));
|
||||
break;
|
||||
case EIP93_HASH_SHA1:
|
||||
u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
|
||||
|
||||
memcpy(sa_state->state_i_digest, sha1_init, sizeof(sha1_init));
|
||||
break;
|
||||
case EIP93_HASH_MD5:
|
||||
u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
|
||||
|
||||
memcpy(sa_state->state_i_digest, md5_init, sizeof(md5_init));
|
||||
break;
|
||||
default: /* Impossible */
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
*sa_state_base = dma_map_single(mtk->dev, sa_state,
|
||||
sizeof(*sa_state), DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(mtk->dev, *sa_state_base);
|
||||
if (ret) {
|
||||
kfree(sa_state);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return sa_state;
|
||||
}
|
||||
|
||||
static int _eip93_hash_init(struct ahash_request *req, struct sa_state *sa_state,
|
||||
dma_addr_t sa_state_base)
|
||||
{
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct sa_record *sa_record, *sa_record_hmac;
|
||||
struct eip93_device *mtk = rctx->mtk;
|
||||
int digestsize;
|
||||
int ret;
|
||||
|
||||
sa_record = kzalloc(sizeof(*sa_record), GFP_KERNEL);
|
||||
if (!sa_record)
|
||||
return -ENOMEM;
|
||||
|
||||
if (IS_HMAC(ctx->flags)) {
|
||||
sa_record_hmac = kzalloc(sizeof(*sa_record_hmac), GFP_KERNEL);
|
||||
if (!sa_record_hmac) {
|
||||
ret = -ENOMEM;
|
||||
goto free_sa_record;
|
||||
}
|
||||
}
|
||||
|
||||
digestsize = crypto_ahash_digestsize(ahash);
|
||||
|
||||
eip93_set_sa_record(sa_record, 0, ctx->flags);
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE;
|
||||
sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH;
|
||||
sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
|
||||
sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
|
||||
EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH);
|
||||
sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
|
||||
sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
|
||||
digestsize / sizeof(u32));
|
||||
|
||||
/*
|
||||
* HMAC special handling
|
||||
* Enabling CMD_HMAC force the inner hash to be always finalized.
|
||||
* This cause problems on handling message > 64 byte as we
|
||||
* need to produce intermediate inner hash on sending intermediate
|
||||
* 64 bytes blocks.
|
||||
*
|
||||
* To handle this, enable CMD_HMAC only on the last block.
|
||||
* We make a duplicate of sa_record and on the last descriptor,
|
||||
* we pass a dedicated sa_record with CMD_HMAC enabled to make
|
||||
* EIP93 apply the outer hash.
|
||||
*/
|
||||
if (IS_HMAC(ctx->flags)) {
|
||||
memcpy(sa_record_hmac, sa_record, sizeof(*sa_record));
|
||||
/* Copy pre-hashed opad for HMAC */
|
||||
memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE);
|
||||
|
||||
/* Disable HMAC for hash normal sa_record */
|
||||
sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC;
|
||||
}
|
||||
|
||||
rctx->mtk = ctx->mtk;
|
||||
rctx->sa_record = sa_record;
|
||||
rctx->sa_record_base = dma_map_single(mtk->dev, rctx->sa_record,
|
||||
sizeof(*rctx->sa_record),
|
||||
DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(mtk->dev, rctx->sa_record_base);
|
||||
if (ret)
|
||||
goto free_sa_record;
|
||||
|
||||
if (IS_HMAC(ctx->flags)) {
|
||||
rctx->sa_record_hmac = sa_record_hmac;
|
||||
rctx->sa_record_hmac_base = dma_map_single(mtk->dev,
|
||||
rctx->sa_record_hmac,
|
||||
sizeof(*rctx->sa_record_hmac),
|
||||
DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(mtk->dev, rctx->sa_record_hmac_base);
|
||||
if (ret)
|
||||
goto free_sa_record_base;
|
||||
}
|
||||
|
||||
rctx->sa_state = sa_state;
|
||||
rctx->sa_state_base = sa_state_base;
|
||||
|
||||
rctx->len = 0;
|
||||
rctx->left_last = 0;
|
||||
rctx->no_finalize = false;
|
||||
INIT_LIST_HEAD(&rctx->blocks);
|
||||
|
||||
return 0;
|
||||
|
||||
free_sa_record_base:
|
||||
dma_unmap_single(mtk->dev, rctx->sa_record_base, sizeof(*rctx->sa_record),
|
||||
DMA_TO_DEVICE);
|
||||
free_sa_record:
|
||||
kfree(sa_record);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int eip93_hash_init(struct ahash_request *req)
|
||||
{
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct sa_state *sa_state;
|
||||
dma_addr_t sa_state_base;
|
||||
int ret;
|
||||
|
||||
sa_state = eip93_hash_get_sa_state(req, &sa_state_base);
|
||||
if (IS_ERR(sa_state))
|
||||
return PTR_ERR(sa_state);
|
||||
|
||||
ret = _eip93_hash_init(req, sa_state, sa_state_base);
|
||||
if (ret)
|
||||
eip93_hash_free_sa_state(req);
|
||||
|
||||
/* For HMAC setup the initial block for ipad */
|
||||
if (IS_HMAC(ctx->flags)) {
|
||||
struct mkt_hash_block *block;
|
||||
|
||||
block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
if (!block) {
|
||||
eip93_hash_free_sa_record(req);
|
||||
eip93_hash_free_sa_state(req);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(block->data, ctx->ipad, SHA256_BLOCK_SIZE);
|
||||
|
||||
list_add(&block->list, &rctx->blocks);
|
||||
|
||||
rctx->len += SHA256_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void eip93_send_hash_req(struct crypto_async_request *async, dma_addr_t src_addr,
|
||||
u32 len, bool last)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(async);
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct eip93_device *mtk = rctx->mtk;
|
||||
struct eip93_descriptor cdesc = { };
|
||||
int ret;
|
||||
|
||||
cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
|
||||
EIP93_PE_CTRL_HOST_READY);
|
||||
cdesc.sa_addr = rctx->sa_record_base;
|
||||
cdesc.arc4_addr = 0;
|
||||
|
||||
cdesc.state_addr = rctx->sa_state_base;
|
||||
cdesc.src_addr = src_addr;
|
||||
cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
|
||||
EIP93_PE_LENGTH_HOST_READY);
|
||||
cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH,
|
||||
len);
|
||||
|
||||
cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH);
|
||||
|
||||
if (last) {
|
||||
int crypto_async_idr;
|
||||
|
||||
/* For last block, pass sa_record with CMD_HMAC enabled */
|
||||
if (IS_HMAC(ctx->flags))
|
||||
cdesc.sa_addr = rctx->sa_record_hmac_base;
|
||||
|
||||
if (!rctx->no_finalize)
|
||||
cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL;
|
||||
|
||||
scoped_guard(spinlock_bh, &mtk->ring->idr_lock)
|
||||
crypto_async_idr = idr_alloc(&mtk->ring->crypto_async_idr, async, 0,
|
||||
EIP93_RING_NUM - 1, GFP_ATOMIC);
|
||||
|
||||
cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
|
||||
FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST);
|
||||
}
|
||||
|
||||
again:
|
||||
ret = eip93_put_descriptor(mtk, &cdesc);
|
||||
if (ret) {
|
||||
usleep_range(EIP93_RING_BUSY_DELAY,
|
||||
EIP93_RING_BUSY_DELAY * 2);
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* Writing new descriptor count starts DMA action */
|
||||
writel(1, mtk->base + EIP93_REG_PE_CD_COUNT);
|
||||
}
|
||||
|
||||
static int eip93_hash_update(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_async_request *async = &req->base;
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
unsigned int to_consume = req->nbytes;
|
||||
struct eip93_device *mtk = rctx->mtk;
|
||||
struct mkt_hash_block *block;
|
||||
int read = 0;
|
||||
int ret;
|
||||
|
||||
/* If the request is 0 length, do nothing */
|
||||
if (!to_consume)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Check if we are at a second iteration.
|
||||
* 1. Try to fill the first block to 64byte (if not already)
|
||||
* 2. Send full block (if we have more data to consume)
|
||||
*/
|
||||
if (rctx->len > 0) {
|
||||
int offset = SHA256_BLOCK_SIZE - rctx->left_last;
|
||||
|
||||
block = list_first_entry(&rctx->blocks,
|
||||
struct mkt_hash_block, list);
|
||||
|
||||
/* Fill the first block */
|
||||
if (rctx->left_last) {
|
||||
read += sg_pcopy_to_buffer(req->src, sg_nents(req->src),
|
||||
block->data + offset,
|
||||
min(to_consume, rctx->left_last),
|
||||
0);
|
||||
to_consume -= read;
|
||||
rctx->left_last -= read;
|
||||
}
|
||||
|
||||
/* Send descriptor if we have more data to consume */
|
||||
if (to_consume > 0) {
|
||||
block->data_dma = dma_map_single(mtk->dev, block->data,
|
||||
SHA256_BLOCK_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(mtk->dev, block->data_dma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
eip93_send_hash_req(async, block->data_dma,
|
||||
SHA256_BLOCK_SIZE, false);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Consume remaining data.
|
||||
* 1. Loop until we consume all the data in block of 64bytes
|
||||
* 2. Send full block of 64bytes
|
||||
* 3. Skip sending last block for future update() or for final() to
|
||||
* enable HASH_FINALIZE bit.
|
||||
*/
|
||||
while (to_consume > 0) {
|
||||
int to_read = min(to_consume, SHA256_BLOCK_SIZE);
|
||||
|
||||
block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
read += sg_pcopy_to_buffer(req->src, sg_nents(req->src),
|
||||
block->data, to_read,
|
||||
read);
|
||||
|
||||
list_add(&block->list, &rctx->blocks);
|
||||
|
||||
to_consume -= to_read;
|
||||
rctx->left_last = SHA256_BLOCK_SIZE - to_read;
|
||||
|
||||
/* Send descriptor if we have more data to consume */
|
||||
if (to_consume > 0) {
|
||||
block->data_dma = dma_map_single(mtk->dev, block->data,
|
||||
SHA256_BLOCK_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(mtk->dev, block->data_dma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
eip93_send_hash_req(async, block->data_dma,
|
||||
SHA256_BLOCK_SIZE, false);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update counter with processed bytes.
|
||||
* This is also used to check if we are at the second iteration
|
||||
* of an update().
|
||||
*/
|
||||
rctx->len += req->nbytes;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void eip93_hash_handle_result(struct crypto_async_request *async, int err)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(async);
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct sa_state *sa_state = rctx->sa_state;
|
||||
int i;
|
||||
|
||||
/* Unmap and sync sa_state for host */
|
||||
dma_unmap_single(rctx->mtk->dev, rctx->sa_state_base,
|
||||
sizeof(*sa_state), DMA_FROM_DEVICE);
|
||||
|
||||
/*
|
||||
* With no_finalize assume SHA256_DIGEST_SIZE buffer is passed.
|
||||
* This is to handle SHA224 that have a 32 byte intermediate digest.
|
||||
*/
|
||||
if (rctx->no_finalize)
|
||||
digestsize = SHA256_DIGEST_SIZE;
|
||||
|
||||
/* bytes needs to be swapped for req->result */
|
||||
if (!IS_HASH_MD5(ctx->flags)) {
|
||||
for (i = 0; i < digestsize / sizeof(u32); i++) {
|
||||
u32 *digest = (u32 *)sa_state->state_i_digest;
|
||||
|
||||
digest[i] = be32_to_cpu(digest[i]);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(req->result, sa_state->state_i_digest, digestsize);
|
||||
|
||||
kfree(sa_state);
|
||||
eip93_hash_free_data_blocks(req);
|
||||
eip93_hash_free_sa_record(req);
|
||||
|
||||
ahash_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int eip93_hash_final(struct ahash_request *req)
|
||||
{
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct crypto_async_request *async = &req->base;
|
||||
struct eip93_device *mtk = rctx->mtk;
|
||||
struct mkt_hash_block *block;
|
||||
int ret;
|
||||
|
||||
/* EIP93 can't handle zero bytes hash */
|
||||
if (!rctx->len && !IS_HMAC(ctx->flags)) {
|
||||
switch ((ctx->flags & EIP93_HASH_MASK)) {
|
||||
case EIP93_HASH_SHA256:
|
||||
memcpy(req->result, sha256_zero_message_hash,
|
||||
SHA256_DIGEST_SIZE);
|
||||
break;
|
||||
case EIP93_HASH_SHA224:
|
||||
memcpy(req->result, sha224_zero_message_hash,
|
||||
SHA224_DIGEST_SIZE);
|
||||
break;
|
||||
case EIP93_HASH_SHA1:
|
||||
memcpy(req->result, sha1_zero_message_hash,
|
||||
SHA1_DIGEST_SIZE);
|
||||
break;
|
||||
case EIP93_HASH_MD5:
|
||||
memcpy(req->result, md5_zero_message_hash,
|
||||
MD5_DIGEST_SIZE);
|
||||
break;
|
||||
default: /* Impossible */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
eip93_hash_free_sa_state(req);
|
||||
eip93_hash_free_sa_record(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Send last block */
|
||||
block = list_first_entry(&rctx->blocks, struct mkt_hash_block, list);
|
||||
|
||||
block->data_dma = dma_map_single(mtk->dev, block->data,
|
||||
SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(mtk->dev, block->data_dma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
eip93_send_hash_req(async, block->data_dma,
|
||||
SHA256_BLOCK_SIZE - rctx->left_last,
|
||||
true);
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static int eip93_hash_finup(struct ahash_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = eip93_hash_update(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return eip93_hash_final(req);
|
||||
}
|
||||
|
||||
static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
unsigned int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
|
||||
struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_ahash *ahash_tfm;
|
||||
struct eip93_hash_reqctx *rctx;
|
||||
struct scatterlist sg[1];
|
||||
struct ahash_request *req;
|
||||
DECLARE_CRYPTO_WAIT(wait);
|
||||
const char *alg_name;
|
||||
int i, ret = 0;
|
||||
u8 *opad;
|
||||
|
||||
switch ((ctx->flags & EIP93_HASH_MASK)) {
|
||||
case EIP93_HASH_SHA256:
|
||||
alg_name = "sha256-eip93";
|
||||
break;
|
||||
case EIP93_HASH_SHA224:
|
||||
alg_name = "sha224-eip93";
|
||||
break;
|
||||
case EIP93_HASH_SHA1:
|
||||
alg_name = "sha1-eip93";
|
||||
break;
|
||||
case EIP93_HASH_MD5:
|
||||
alg_name = "md5-eip93";
|
||||
break;
|
||||
default: /* Impossible */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
|
||||
if (IS_ERR(ahash_tfm))
|
||||
return PTR_ERR(ahash_tfm);
|
||||
|
||||
req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
ret = -ENOMEM;
|
||||
goto err_ahash;
|
||||
}
|
||||
|
||||
opad = kzalloc(SHA256_BLOCK_SIZE, GFP_KERNEL);
|
||||
if (!opad) {
|
||||
ret = -ENOMEM;
|
||||
goto err_req;
|
||||
}
|
||||
|
||||
rctx = ahash_request_ctx(req);
|
||||
crypto_init_wait(&wait);
|
||||
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &wait);
|
||||
|
||||
/* Hash the key if > SHA256_BLOCK_SIZE */
|
||||
if (keylen > SHA256_BLOCK_SIZE) {
|
||||
sg_init_one(&sg[0], key, keylen);
|
||||
|
||||
ahash_request_set_crypt(req, sg, ctx->ipad, keylen);
|
||||
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
|
||||
|
||||
keylen = digestsize;
|
||||
} else {
|
||||
memcpy(ctx->ipad, key, keylen);
|
||||
}
|
||||
|
||||
/* Copy to opad */
|
||||
memset(ctx->ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen);
|
||||
memcpy(opad, ctx->ipad, SHA256_BLOCK_SIZE);
|
||||
|
||||
/* Pad with HMAC constants */
|
||||
for (i = 0; i < SHA256_BLOCK_SIZE; i++) {
|
||||
ctx->ipad[i] ^= HMAC_IPAD_VALUE;
|
||||
opad[i] ^= HMAC_OPAD_VALUE;
|
||||
}
|
||||
|
||||
sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);
|
||||
|
||||
/* Hash opad */
|
||||
ahash_request_set_crypt(req, sg, ctx->opad, SHA256_BLOCK_SIZE);
|
||||
ret = crypto_ahash_init(req);
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
/* Disable HASH_FINALIZE for opad hash */
|
||||
rctx->no_finalize = true;
|
||||
|
||||
ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
|
||||
if (ret)
|
||||
goto exit;
|
||||
|
||||
if (!IS_HASH_MD5(ctx->flags)) {
|
||||
u32 *opad_hash = (u32 *)ctx->opad;
|
||||
|
||||
for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
|
||||
opad_hash[i] = cpu_to_be32(opad_hash[i]);
|
||||
}
|
||||
|
||||
exit:
|
||||
kfree(opad);
|
||||
err_req:
|
||||
ahash_request_free(req);
|
||||
err_ahash:
|
||||
crypto_free_ahash(ahash_tfm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int eip93_hash_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
|
||||
struct eip93_alg_template, alg.ahash.halg.base);
|
||||
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
sizeof(struct eip93_hash_reqctx));
|
||||
|
||||
ctx->mtk = tmpl->mtk;
|
||||
ctx->flags = tmpl->flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int eip93_hash_digest(struct ahash_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = eip93_hash_init(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return eip93_hash_finup(req);
|
||||
}
|
||||
|
||||
static int eip93_hash_import(struct ahash_request *req, const void *in)
|
||||
{
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
const struct eip93_hash_export_state *state = in;
|
||||
int ret;
|
||||
|
||||
ret = _eip93_hash_init(req, state->sa_state, state->sa_state_base);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
rctx->len = state->len;
|
||||
rctx->left_last = state->left_last;
|
||||
memcpy(&rctx->blocks, &state->blocks, sizeof(rctx->blocks));
|
||||
|
||||
return 0;
|
||||
err:
|
||||
eip93_hash_free_data_blocks(req);
|
||||
eip93_hash_free_sa_state(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int eip93_hash_export(struct ahash_request *req, void *out)
|
||||
{
|
||||
struct eip93_hash_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct eip93_hash_export_state *state = out;
|
||||
|
||||
state->sa_state = rctx->sa_state;
|
||||
state->sa_state_base = rctx->sa_state_base;
|
||||
state->len = rctx->len;
|
||||
state->left_last = rctx->left_last;
|
||||
memcpy(&state->blocks, &rctx->blocks, sizeof(rctx->blocks));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct eip93_alg_template eip93_alg_md5 = {
|
||||
.type = EIP93_ALG_TYPE_HASH,
|
||||
.flags = EIP93_HASH_MD5,
|
||||
.alg.ahash = {
|
||||
.init = eip93_hash_init,
|
||||
.update = eip93_hash_update,
|
||||
.final = eip93_hash_final,
|
||||
.finup = eip93_hash_finup,
|
||||
.digest = eip93_hash_digest,
|
||||
.export = eip93_hash_export,
|
||||
.import = eip93_hash_import,
|
||||
.halg = {
|
||||
.digestsize = MD5_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct eip93_hash_export_state),
|
||||
.base = {
|
||||
.cra_name = "md5",
|
||||
.cra_driver_name = "md5-eip93",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
|
||||
.cra_init = eip93_hash_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_sha1 = {
|
||||
.type = EIP93_ALG_TYPE_HASH,
|
||||
.flags = EIP93_HASH_SHA1,
|
||||
.alg.ahash = {
|
||||
.init = eip93_hash_init,
|
||||
.update = eip93_hash_update,
|
||||
.final = eip93_hash_final,
|
||||
.finup = eip93_hash_finup,
|
||||
.digest = eip93_hash_digest,
|
||||
.export = eip93_hash_export,
|
||||
.import = eip93_hash_import,
|
||||
.halg = {
|
||||
.digestsize = SHA1_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct eip93_hash_export_state),
|
||||
.base = {
|
||||
.cra_name = "sha1",
|
||||
.cra_driver_name = "sha1-eip93",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA1_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
|
||||
.cra_init = eip93_hash_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_sha224 = {
|
||||
.type = EIP93_ALG_TYPE_HASH,
|
||||
.flags = EIP93_HASH_SHA224,
|
||||
.alg.ahash = {
|
||||
.init = eip93_hash_init,
|
||||
.update = eip93_hash_update,
|
||||
.final = eip93_hash_final,
|
||||
.finup = eip93_hash_finup,
|
||||
.digest = eip93_hash_digest,
|
||||
.export = eip93_hash_export,
|
||||
.import = eip93_hash_import,
|
||||
.halg = {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct eip93_hash_export_state),
|
||||
.base = {
|
||||
.cra_name = "sha224",
|
||||
.cra_driver_name = "sha224-eip93",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
|
||||
.cra_init = eip93_hash_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_sha256 = {
|
||||
.type = EIP93_ALG_TYPE_HASH,
|
||||
.flags = EIP93_HASH_SHA256,
|
||||
.alg.ahash = {
|
||||
.init = eip93_hash_init,
|
||||
.update = eip93_hash_update,
|
||||
.final = eip93_hash_final,
|
||||
.finup = eip93_hash_finup,
|
||||
.digest = eip93_hash_digest,
|
||||
.export = eip93_hash_export,
|
||||
.import = eip93_hash_import,
|
||||
.halg = {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct eip93_hash_export_state),
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name = "sha256-eip93",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
|
||||
.cra_init = eip93_hash_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_hmac_md5 = {
|
||||
.type = EIP93_ALG_TYPE_HASH,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_MD5,
|
||||
.alg.ahash = {
|
||||
.init = eip93_hash_init,
|
||||
.update = eip93_hash_update,
|
||||
.final = eip93_hash_final,
|
||||
.finup = eip93_hash_finup,
|
||||
.digest = eip93_hash_digest,
|
||||
.setkey = eip93_hash_hmac_setkey,
|
||||
.export = eip93_hash_export,
|
||||
.import = eip93_hash_import,
|
||||
.halg = {
|
||||
.digestsize = MD5_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct eip93_hash_export_state),
|
||||
.base = {
|
||||
.cra_name = "hmac(md5)",
|
||||
.cra_driver_name = "hmac(md5-eip93)",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
|
||||
.cra_init = eip93_hash_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_hmac_sha1 = {
|
||||
.type = EIP93_ALG_TYPE_HASH,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1,
|
||||
.alg.ahash = {
|
||||
.init = eip93_hash_init,
|
||||
.update = eip93_hash_update,
|
||||
.final = eip93_hash_final,
|
||||
.finup = eip93_hash_finup,
|
||||
.digest = eip93_hash_digest,
|
||||
.setkey = eip93_hash_hmac_setkey,
|
||||
.export = eip93_hash_export,
|
||||
.import = eip93_hash_import,
|
||||
.halg = {
|
||||
.digestsize = SHA1_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct eip93_hash_export_state),
|
||||
.base = {
|
||||
.cra_name = "hmac(sha1)",
|
||||
.cra_driver_name = "hmac(sha1-eip93)",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA1_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
|
||||
.cra_init = eip93_hash_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_hmac_sha224 = {
|
||||
.type = EIP93_ALG_TYPE_HASH,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224,
|
||||
.alg.ahash = {
|
||||
.init = eip93_hash_init,
|
||||
.update = eip93_hash_update,
|
||||
.final = eip93_hash_final,
|
||||
.finup = eip93_hash_finup,
|
||||
.digest = eip93_hash_digest,
|
||||
.setkey = eip93_hash_hmac_setkey,
|
||||
.export = eip93_hash_export,
|
||||
.import = eip93_hash_import,
|
||||
.halg = {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct eip93_hash_export_state),
|
||||
.base = {
|
||||
.cra_name = "hmac(sha224)",
|
||||
.cra_driver_name = "hmac(sha224-eip93)",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
|
||||
.cra_init = eip93_hash_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct eip93_alg_template eip93_alg_hmac_sha256 = {
|
||||
.type = EIP93_ALG_TYPE_HASH,
|
||||
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256,
|
||||
.alg.ahash = {
|
||||
.init = eip93_hash_init,
|
||||
.update = eip93_hash_update,
|
||||
.final = eip93_hash_final,
|
||||
.finup = eip93_hash_finup,
|
||||
.digest = eip93_hash_digest,
|
||||
.setkey = eip93_hash_hmac_setkey,
|
||||
.export = eip93_hash_export,
|
||||
.import = eip93_hash_import,
|
||||
.halg = {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct eip93_hash_export_state),
|
||||
.base = {
|
||||
.cra_name = "hmac(sha256)",
|
||||
.cra_driver_name = "hmac(sha256-eip93)",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
|
||||
.cra_init = eip93_hash_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
72
drivers/crypto/inside-secure/eip93/eip93-hash.h
Normal file
72
drivers/crypto/inside-secure/eip93/eip93-hash.h
Normal file
@@ -0,0 +1,72 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
#ifndef _EIP93_HASH_H_
|
||||
#define _EIP93_HASH_H_
|
||||
|
||||
#include <crypto/sha2.h>
|
||||
|
||||
#include "eip93-main.h"
|
||||
|
||||
struct eip93_hash_ctx {
|
||||
struct eip93_device *mtk;
|
||||
u32 flags;
|
||||
|
||||
u8 ipad[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
|
||||
u8 opad[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
|
||||
};
|
||||
|
||||
struct eip93_hash_reqctx {
|
||||
struct eip93_device *mtk;
|
||||
|
||||
struct sa_record *sa_record;
|
||||
dma_addr_t sa_record_base;
|
||||
|
||||
struct sa_record *sa_record_hmac;
|
||||
dma_addr_t sa_record_hmac_base;
|
||||
|
||||
struct sa_state *sa_state;
|
||||
dma_addr_t sa_state_base;
|
||||
|
||||
/* Don't enable HASH_FINALIZE when last block is sent */
|
||||
bool no_finalize;
|
||||
|
||||
/*
|
||||
* EIP93 requires data to be accumulated in block of 64 bytes
|
||||
* for intermediate hash calculation.
|
||||
*/
|
||||
u64 len;
|
||||
u32 left_last;
|
||||
struct list_head blocks;
|
||||
};
|
||||
|
||||
struct mkt_hash_block {
|
||||
struct list_head list;
|
||||
u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
|
||||
dma_addr_t data_dma;
|
||||
};
|
||||
|
||||
struct eip93_hash_export_state {
|
||||
u64 len;
|
||||
u32 left_last;
|
||||
struct sa_state *sa_state;
|
||||
dma_addr_t sa_state_base;
|
||||
struct list_head blocks;
|
||||
};
|
||||
|
||||
void eip93_hash_handle_result(struct crypto_async_request *async, int err);
|
||||
|
||||
extern struct eip93_alg_template eip93_alg_md5;
|
||||
extern struct eip93_alg_template eip93_alg_sha1;
|
||||
extern struct eip93_alg_template eip93_alg_sha224;
|
||||
extern struct eip93_alg_template eip93_alg_sha256;
|
||||
extern struct eip93_alg_template eip93_alg_hmac_md5;
|
||||
extern struct eip93_alg_template eip93_alg_hmac_sha1;
|
||||
extern struct eip93_alg_template eip93_alg_hmac_sha224;
|
||||
extern struct eip93_alg_template eip93_alg_hmac_sha256;
|
||||
|
||||
#endif /* _EIP93_HASH_H_ */
|
||||
502
drivers/crypto/inside-secure/eip93/eip93-main.c
Normal file
502
drivers/crypto/inside-secure/eip93/eip93-main.c
Normal file
@@ -0,0 +1,502 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/ctr.h>
|
||||
|
||||
#include "eip93-main.h"
|
||||
#include "eip93-regs.h"
|
||||
#include "eip93-common.h"
|
||||
#include "eip93-cipher.h"
|
||||
#include "eip93-aes.h"
|
||||
#include "eip93-des.h"
|
||||
#include "eip93-aead.h"
|
||||
#include "eip93-hash.h"
|
||||
|
||||
static struct eip93_alg_template *eip93_algs[] = {
|
||||
&eip93_alg_ecb_des,
|
||||
&eip93_alg_cbc_des,
|
||||
&eip93_alg_ecb_des3_ede,
|
||||
&eip93_alg_cbc_des3_ede,
|
||||
&eip93_alg_ecb_aes,
|
||||
&eip93_alg_cbc_aes,
|
||||
&eip93_alg_ctr_aes,
|
||||
&eip93_alg_rfc3686_aes,
|
||||
&eip93_alg_authenc_hmac_md5_cbc_des,
|
||||
&eip93_alg_authenc_hmac_sha1_cbc_des,
|
||||
&eip93_alg_authenc_hmac_sha224_cbc_des,
|
||||
&eip93_alg_authenc_hmac_sha256_cbc_des,
|
||||
&eip93_alg_authenc_hmac_md5_cbc_des3_ede,
|
||||
&eip93_alg_authenc_hmac_sha1_cbc_des3_ede,
|
||||
&eip93_alg_authenc_hmac_sha224_cbc_des3_ede,
|
||||
&eip93_alg_authenc_hmac_sha256_cbc_des3_ede,
|
||||
&eip93_alg_authenc_hmac_md5_cbc_aes,
|
||||
&eip93_alg_authenc_hmac_sha1_cbc_aes,
|
||||
&eip93_alg_authenc_hmac_sha224_cbc_aes,
|
||||
&eip93_alg_authenc_hmac_sha256_cbc_aes,
|
||||
&eip93_alg_authenc_hmac_md5_rfc3686_aes,
|
||||
&eip93_alg_authenc_hmac_sha1_rfc3686_aes,
|
||||
&eip93_alg_authenc_hmac_sha224_rfc3686_aes,
|
||||
&eip93_alg_authenc_hmac_sha256_rfc3686_aes,
|
||||
&eip93_alg_md5,
|
||||
&eip93_alg_sha1,
|
||||
&eip93_alg_sha224,
|
||||
&eip93_alg_sha256,
|
||||
&eip93_alg_hmac_md5,
|
||||
&eip93_alg_hmac_sha1,
|
||||
&eip93_alg_hmac_sha224,
|
||||
&eip93_alg_hmac_sha256,
|
||||
};
|
||||
|
||||
inline void eip93_irq_disable(struct eip93_device *mtk, u32 mask)
|
||||
{
|
||||
__raw_writel(mask, mtk->base + EIP93_REG_MASK_DISABLE);
|
||||
}
|
||||
|
||||
inline void eip93_irq_enable(struct eip93_device *mtk, u32 mask)
|
||||
{
|
||||
__raw_writel(mask, mtk->base + EIP93_REG_MASK_ENABLE);
|
||||
}
|
||||
|
||||
inline void eip93_irq_clear(struct eip93_device *mtk, u32 mask)
|
||||
{
|
||||
__raw_writel(mask, mtk->base + EIP93_REG_INT_CLR);
|
||||
}
|
||||
|
||||
static void eip93_unregister_algs(unsigned int i)
|
||||
{
|
||||
unsigned int j;
|
||||
|
||||
for (j = 0; j < i; j++) {
|
||||
switch (eip93_algs[j]->type) {
|
||||
case EIP93_ALG_TYPE_SKCIPHER:
|
||||
crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher);
|
||||
break;
|
||||
case EIP93_ALG_TYPE_AEAD:
|
||||
crypto_unregister_aead(&eip93_algs[j]->alg.aead);
|
||||
break;
|
||||
case EIP93_ALG_TYPE_HASH:
|
||||
crypto_unregister_ahash(&eip93_algs[i]->alg.ahash);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int eip93_register_algs(struct eip93_device *mtk, u32 supported_algo_flags)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) {
|
||||
u32 alg_flags = eip93_algs[i]->flags;
|
||||
|
||||
eip93_algs[i]->mtk = mtk;
|
||||
|
||||
if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
|
||||
!(supported_algo_flags & EIP93_PE_OPTION_TDES))
|
||||
continue;
|
||||
|
||||
if (IS_AES(alg_flags)) {
|
||||
if (!(supported_algo_flags & EIP93_PE_OPTION_AES))
|
||||
continue;
|
||||
|
||||
if (!IS_HMAC(alg_flags)) {
|
||||
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
|
||||
eip93_algs[i]->alg.skcipher.max_keysize =
|
||||
AES_KEYSIZE_128;
|
||||
|
||||
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
|
||||
eip93_algs[i]->alg.skcipher.max_keysize =
|
||||
AES_KEYSIZE_192;
|
||||
|
||||
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
|
||||
eip93_algs[i]->alg.skcipher.max_keysize =
|
||||
AES_KEYSIZE_256;
|
||||
|
||||
if (IS_RFC3686(alg_flags))
|
||||
eip93_algs[i]->alg.skcipher.max_keysize +=
|
||||
CTR_RFC3686_NONCE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_HASH_MD5(alg_flags) &&
|
||||
!(supported_algo_flags & EIP93_PE_OPTION_MD5))
|
||||
continue;
|
||||
|
||||
if (IS_HASH_SHA1(alg_flags) &&
|
||||
!(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
|
||||
continue;
|
||||
|
||||
if (IS_HASH_SHA224(alg_flags) &&
|
||||
!(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
|
||||
continue;
|
||||
|
||||
if (IS_HASH_SHA256(alg_flags) &&
|
||||
!(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
|
||||
continue;
|
||||
|
||||
switch (eip93_algs[i]->type) {
|
||||
case EIP93_ALG_TYPE_SKCIPHER:
|
||||
ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher);
|
||||
break;
|
||||
case EIP93_ALG_TYPE_AEAD:
|
||||
ret = crypto_register_aead(&eip93_algs[i]->alg.aead);
|
||||
break;
|
||||
case EIP93_ALG_TYPE_HASH:
|
||||
ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash);
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
eip93_unregister_algs(i);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void eip93_handle_result_descriptor(struct eip93_device *mtk)
|
||||
{
|
||||
struct crypto_async_request *async;
|
||||
struct eip93_descriptor *rdesc;
|
||||
u16 desc_flags, crypto_idr;
|
||||
bool last_entry;
|
||||
int handled, left, err;
|
||||
u32 pe_ctrl_stat;
|
||||
u32 pe_length;
|
||||
|
||||
get_more:
|
||||
handled = 0;
|
||||
|
||||
left = readl(mtk->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT;
|
||||
|
||||
if (!left) {
|
||||
eip93_irq_clear(mtk, EIP93_INT_RDR_THRESH);
|
||||
eip93_irq_enable(mtk, EIP93_INT_RDR_THRESH);
|
||||
return;
|
||||
}
|
||||
|
||||
last_entry = false;
|
||||
|
||||
while (left) {
|
||||
rdesc = eip93_get_descriptor(mtk);
|
||||
if (IS_ERR(rdesc)) {
|
||||
dev_err(mtk->dev, "Ndesc: %d nreq: %d\n",
|
||||
handled, left);
|
||||
err = -EIO;
|
||||
break;
|
||||
}
|
||||
/* make sure DMA is finished writing */
|
||||
do {
|
||||
pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word);
|
||||
pe_length = READ_ONCE(rdesc->pe_length_word);
|
||||
} while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) !=
|
||||
EIP93_PE_CTRL_PE_READY ||
|
||||
FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) !=
|
||||
EIP93_PE_LENGTH_PE_READY);
|
||||
|
||||
err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE |
|
||||
EIP93_PE_CTRL_PE_EXT_ERR |
|
||||
EIP93_PE_CTRL_PE_SEQNUM_ERR |
|
||||
EIP93_PE_CTRL_PE_PAD_ERR |
|
||||
EIP93_PE_CTRL_PE_AUTH_ERR);
|
||||
|
||||
desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id);
|
||||
crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id);
|
||||
|
||||
writel(1, mtk->base + EIP93_REG_PE_RD_COUNT);
|
||||
eip93_irq_clear(mtk, EIP93_INT_RDR_THRESH);
|
||||
|
||||
handled++;
|
||||
left--;
|
||||
|
||||
if (desc_flags & EIP93_DESC_LAST) {
|
||||
last_entry = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!last_entry)
|
||||
goto get_more;
|
||||
|
||||
/* Get crypto async ref only for last descriptor */
|
||||
scoped_guard(spinlock_bh, &mtk->ring->idr_lock) {
|
||||
async = idr_find(&mtk->ring->crypto_async_idr, crypto_idr);
|
||||
idr_remove(&mtk->ring->crypto_async_idr, crypto_idr);
|
||||
}
|
||||
|
||||
/* Parse error in ctrl stat word */
|
||||
err = eip93_parse_ctrl_stat_err(mtk, err);
|
||||
|
||||
if (desc_flags & EIP93_DESC_SKCIPHER)
|
||||
eip93_skcipher_handle_result(async, err);
|
||||
|
||||
if (desc_flags & EIP93_DESC_AEAD)
|
||||
eip93_aead_handle_result(async, err);
|
||||
|
||||
if (desc_flags & EIP93_DESC_HASH)
|
||||
eip93_hash_handle_result(async, err);
|
||||
|
||||
goto get_more;
|
||||
}
|
||||
|
||||
static void eip93_done_task(unsigned long data)
|
||||
{
|
||||
struct eip93_device *mtk = (struct eip93_device *)data;
|
||||
|
||||
eip93_handle_result_descriptor(mtk);
|
||||
}
|
||||
|
||||
static irqreturn_t eip93_irq_handler(int irq, void *data)
|
||||
{
|
||||
struct eip93_device *mtk = data;
|
||||
u32 irq_status;
|
||||
|
||||
irq_status = readl(mtk->base + EIP93_REG_INT_MASK_STAT);
|
||||
if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) {
|
||||
eip93_irq_disable(mtk, EIP93_INT_RDR_THRESH);
|
||||
tasklet_schedule(&mtk->ring->done_task);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Ignore errors in AUTO mode, handled by the RDR */
|
||||
eip93_irq_clear(mtk, irq_status);
|
||||
if (irq_status)
|
||||
eip93_irq_disable(mtk, irq_status);
|
||||
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static void eip93_initialize(struct eip93_device *mtk, u32 supported_algo_flags)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Reset PE and rings */
|
||||
val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING;
|
||||
val |= EIP93_PE_TARGET_AUTO_RING_MODE;
|
||||
/* For Auto more, update the CDR ring owner after processing */
|
||||
val |= EIP93_PE_CONFIG_EN_CDR_UPDATE;
|
||||
writel(val, mtk->base + EIP93_REG_PE_CONFIG);
|
||||
|
||||
/* Wait for PE and ring to reset */
|
||||
usleep_range(10, 20);
|
||||
|
||||
/* Release PE and ring reset */
|
||||
val = readl(mtk->base + EIP93_REG_PE_CONFIG);
|
||||
val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING);
|
||||
writel(val, mtk->base + EIP93_REG_PE_CONFIG);
|
||||
|
||||
/* Config Clocks */
|
||||
val = EIP93_PE_CLOCK_EN_PE_CLK;
|
||||
if (supported_algo_flags & EIP93_PE_OPTION_TDES)
|
||||
val |= EIP93_PE_CLOCK_EN_DES_CLK;
|
||||
if (supported_algo_flags & EIP93_PE_OPTION_AES)
|
||||
val |= EIP93_PE_CLOCK_EN_AES_CLK;
|
||||
if (supported_algo_flags &
|
||||
(EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 |
|
||||
EIP93_PE_OPTION_SHA_256))
|
||||
val |= EIP93_PE_CLOCK_EN_HASH_CLK;
|
||||
writel(val, mtk->base + EIP93_REG_PE_CLOCK_CTRL);
|
||||
|
||||
/* Config DMA thresholds */
|
||||
val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) |
|
||||
FIELD_PREP(EIP93_PE_INBUF_THRESH, 128);
|
||||
writel(val, mtk->base + EIP93_REG_PE_BUF_THRESH);
|
||||
|
||||
/* Clear/ack all interrupts before disable all */
|
||||
eip93_irq_clear(mtk, EIP93_INT_ALL);
|
||||
eip93_irq_disable(mtk, EIP93_INT_ALL);
|
||||
|
||||
/* Setup CRD threshold to trigger interrupt */
|
||||
val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY);
|
||||
/*
|
||||
* Configure RDR interrupt to be triggered if RD counter is not 0
|
||||
* for more than 2^(N+10) system clocks.
|
||||
*/
|
||||
val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN;
|
||||
writel(val, mtk->base + EIP93_REG_PE_RING_THRESH);
|
||||
}
|
||||
|
||||
static void eip93_desc_free(struct eip93_device *mtk)
|
||||
{
|
||||
writel(0, mtk->base + EIP93_REG_PE_RING_CONFIG);
|
||||
writel(0, mtk->base + EIP93_REG_PE_CDR_BASE);
|
||||
writel(0, mtk->base + EIP93_REG_PE_RDR_BASE);
|
||||
}
|
||||
|
||||
static int eip93_set_ring(struct eip93_device *mtk, struct eip93_desc_ring *ring)
|
||||
{
|
||||
ring->offset = sizeof(struct eip93_descriptor);
|
||||
ring->base = dmam_alloc_coherent(mtk->dev,
|
||||
sizeof(struct eip93_descriptor) * EIP93_RING_NUM,
|
||||
&ring->base_dma, GFP_KERNEL);
|
||||
if (!ring->base)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->write = ring->base;
|
||||
ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1);
|
||||
ring->read = ring->base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int eip93_desc_init(struct eip93_device *mtk)
|
||||
{
|
||||
struct eip93_desc_ring *cdr = &mtk->ring->cdr;
|
||||
struct eip93_desc_ring *rdr = &mtk->ring->rdr;
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
ret = eip93_set_ring(mtk, cdr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = eip93_set_ring(mtk, rdr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
writel((u32 __force)cdr->base_dma, mtk->base + EIP93_REG_PE_CDR_BASE);
|
||||
writel((u32 __force)rdr->base_dma, mtk->base + EIP93_REG_PE_RDR_BASE);
|
||||
|
||||
val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1);
|
||||
writel(val, mtk->base + EIP93_REG_PE_RING_CONFIG);
|
||||
|
||||
atomic_set(&mtk->ring->free, EIP93_RING_NUM - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eip93_cleanup(struct eip93_device *mtk)
|
||||
{
|
||||
tasklet_kill(&mtk->ring->done_task);
|
||||
|
||||
/* Clear/ack all interrupts before disable all */
|
||||
eip93_irq_clear(mtk, EIP93_INT_ALL);
|
||||
eip93_irq_disable(mtk, EIP93_INT_ALL);
|
||||
|
||||
writel(0, mtk->base + EIP93_REG_PE_CLOCK_CTRL);
|
||||
|
||||
eip93_desc_free(mtk);
|
||||
|
||||
idr_destroy(&mtk->ring->crypto_async_idr);
|
||||
}
|
||||
|
||||
static int eip93_crypto_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct eip93_device *mtk;
|
||||
u32 ver, algo_flags;
|
||||
int ret;
|
||||
|
||||
mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
|
||||
if (!mtk)
|
||||
return -ENOMEM;
|
||||
|
||||
mtk->dev = dev;
|
||||
platform_set_drvdata(pdev, mtk);
|
||||
|
||||
mtk->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(mtk->base))
|
||||
return PTR_ERR(mtk->base);
|
||||
|
||||
mtk->irq = platform_get_irq(pdev, 0);
|
||||
if (mtk->irq < 0)
|
||||
return mtk->irq;
|
||||
|
||||
ret = devm_request_threaded_irq(mtk->dev, mtk->irq, eip93_irq_handler,
|
||||
NULL, IRQF_ONESHOT,
|
||||
dev_name(mtk->dev), mtk);
|
||||
|
||||
mtk->ring = devm_kcalloc(mtk->dev, 1, sizeof(*mtk->ring), GFP_KERNEL);
|
||||
if (!mtk->ring)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = eip93_desc_init(mtk);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tasklet_init(&mtk->ring->done_task, eip93_done_task, (unsigned long)mtk);
|
||||
|
||||
spin_lock_init(&mtk->ring->read_lock);
|
||||
spin_lock_init(&mtk->ring->write_lock);
|
||||
|
||||
spin_lock_init(&mtk->ring->idr_lock);
|
||||
idr_init(&mtk->ring->crypto_async_idr);
|
||||
|
||||
algo_flags = readl(mtk->base + EIP93_REG_PE_OPTION_1);
|
||||
|
||||
eip93_initialize(mtk, algo_flags);
|
||||
|
||||
/* Init finished, enable RDR interrupt */
|
||||
eip93_irq_enable(mtk, EIP93_INT_RDR_THRESH);
|
||||
|
||||
ret = eip93_register_algs(mtk, algo_flags);
|
||||
if (ret) {
|
||||
eip93_cleanup(mtk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ver = readl(mtk->base + EIP93_REG_PE_REVISION);
|
||||
/* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */
|
||||
dev_info(mtk->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n",
|
||||
FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver),
|
||||
FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver),
|
||||
FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver),
|
||||
FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver),
|
||||
algo_flags,
|
||||
readl(mtk->base + EIP93_REG_PE_OPTION_0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eip93_crypto_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct eip93_device *mtk = platform_get_drvdata(pdev);
|
||||
|
||||
eip93_unregister_algs(ARRAY_SIZE(eip93_algs));
|
||||
eip93_cleanup(mtk);
|
||||
}
|
||||
|
||||
static const struct of_device_id eip93_crypto_of_match[] = {
|
||||
{ .compatible = "inside-secure,safexcel-eip93i", },
|
||||
{ .compatible = "inside-secure,safexcel-eip93ie", },
|
||||
{ .compatible = "inside-secure,safexcel-eip93is", },
|
||||
{ .compatible = "inside-secure,safexcel-eip93ies", },
|
||||
/* IW not supported currently, missing AES-XCB-MAC/AES-CCM */
|
||||
/* { .compatible = "inside-secure,safexcel-eip93iw", }, */
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, eip93_crypto_of_match);
|
||||
|
||||
static struct platform_driver eip93_crypto_driver = {
|
||||
.probe = eip93_crypto_probe,
|
||||
.remove_new = eip93_crypto_remove,
|
||||
.driver = {
|
||||
.name = "mtk-eip93",
|
||||
.of_match_table = eip93_crypto_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(eip93_crypto_driver);
|
||||
|
||||
MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>");
|
||||
MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
|
||||
MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
155
drivers/crypto/inside-secure/eip93/eip93-main.h
Normal file
155
drivers/crypto/inside-secure/eip93/eip93-main.h
Normal file
@@ -0,0 +1,155 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
#ifndef _EIP93_MAIN_H_
|
||||
#define _EIP93_MAIN_H_
|
||||
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/rng.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include "eip93-regs.h"
|
||||
|
||||
#define EIP93_RING_BUSY_DELAY 500
|
||||
|
||||
#define EIP93_RING_NUM 512
|
||||
#define EIP93_RING_BUSY 32
|
||||
#define EIP93_CRA_PRIORITY 1500
|
||||
|
||||
#define EIP93_RING_SA_STATE_ADDR(base, idx) ((base) + (idx))
|
||||
#define EIP93_RING_SA_STATE_DMA(dma_base, idx) ((u32 __force)(dma_base) + \
|
||||
((idx) * sizeof(struct sa_state)))
|
||||
|
||||
/* cipher algorithms */
|
||||
#define EIP93_ALG_DES BIT(0)
|
||||
#define EIP93_ALG_3DES BIT(1)
|
||||
#define EIP93_ALG_AES BIT(2)
|
||||
#define EIP93_ALG_MASK GENMASK(2, 0)
|
||||
/* hash and hmac algorithms */
|
||||
#define EIP93_HASH_MD5 BIT(3)
|
||||
#define EIP93_HASH_SHA1 BIT(4)
|
||||
#define EIP93_HASH_SHA224 BIT(5)
|
||||
#define EIP93_HASH_SHA256 BIT(6)
|
||||
#define EIP93_HASH_HMAC BIT(7)
|
||||
#define EIP93_HASH_MASK GENMASK(6, 3)
|
||||
/* cipher modes */
|
||||
#define EIP93_MODE_CBC BIT(8)
|
||||
#define EIP93_MODE_ECB BIT(9)
|
||||
#define EIP93_MODE_CTR BIT(10)
|
||||
#define EIP93_MODE_RFC3686 BIT(11)
|
||||
#define EIP93_MODE_MASK GENMASK(10, 8)
|
||||
|
||||
/* cipher encryption/decryption operations */
|
||||
#define EIP93_ENCRYPT BIT(12)
|
||||
#define EIP93_DECRYPT BIT(13)
|
||||
|
||||
#define EIP93_BUSY BIT(14)
|
||||
|
||||
/* descriptor flags */
|
||||
#define EIP93_DESC_DMA_IV BIT(0)
|
||||
#define EIP93_DESC_IPSEC BIT(1)
|
||||
#define EIP93_DESC_FINISH BIT(2)
|
||||
#define EIP93_DESC_LAST BIT(3)
|
||||
#define EIP93_DESC_FAKE_HMAC BIT(4)
|
||||
#define EIP93_DESC_PRNG BIT(5)
|
||||
#define EIP93_DESC_HASH BIT(6)
|
||||
#define EIP93_DESC_AEAD BIT(7)
|
||||
#define EIP93_DESC_SKCIPHER BIT(8)
|
||||
#define EIP93_DESC_ASYNC BIT(9)
|
||||
|
||||
#define IS_DMA_IV(desc_flags) ((desc_flags) & EIP93_DESC_DMA_IV)
|
||||
|
||||
#define IS_DES(flags) ((flags) & EIP93_ALG_DES)
|
||||
#define IS_3DES(flags) ((flags) & EIP93_ALG_3DES)
|
||||
#define IS_AES(flags) ((flags) & EIP93_ALG_AES)
|
||||
|
||||
#define IS_HASH_MD5(flags) ((flags) & EIP93_HASH_MD5)
|
||||
#define IS_HASH_SHA1(flags) ((flags) & EIP93_HASH_SHA1)
|
||||
#define IS_HASH_SHA224(flags) ((flags) & EIP93_HASH_SHA224)
|
||||
#define IS_HASH_SHA256(flags) ((flags) & EIP93_HASH_SHA256)
|
||||
#define IS_HMAC(flags) ((flags) & EIP93_HASH_HMAC)
|
||||
|
||||
#define IS_CBC(mode) ((mode) & EIP93_MODE_CBC)
|
||||
#define IS_ECB(mode) ((mode) & EIP93_MODE_ECB)
|
||||
#define IS_CTR(mode) ((mode) & EIP93_MODE_CTR)
|
||||
#define IS_RFC3686(mode) ((mode) & EIP93_MODE_RFC3686)
|
||||
|
||||
#define IS_BUSY(flags) ((flags) & EIP93_BUSY)
|
||||
|
||||
#define IS_ENCRYPT(dir) ((dir) & EIP93_ENCRYPT)
|
||||
#define IS_DECRYPT(dir) ((dir) & EIP93_DECRYPT)
|
||||
|
||||
#define IS_CIPHER(flags) ((flags) & (EIP93_ALG_DES | \
|
||||
EIP93_ALG_3DES | \
|
||||
EIP93_ALG_AES))
|
||||
|
||||
#define IS_HASH(flags) ((flags) & (EIP93_HASH_MD5 | \
|
||||
EIP93_HASH_SHA1 | \
|
||||
EIP93_HASH_SHA224 | \
|
||||
EIP93_HASH_SHA256))
|
||||
|
||||
/**
|
||||
* struct eip93_device - crypto engine device structure
|
||||
*/
|
||||
struct eip93_device {
|
||||
void __iomem *base;
|
||||
struct device *dev;
|
||||
struct clk *clk;
|
||||
int irq;
|
||||
struct eip93_ring *ring;
|
||||
};
|
||||
|
||||
struct eip93_desc_ring {
|
||||
void *base;
|
||||
void *base_end;
|
||||
dma_addr_t base_dma;
|
||||
/* write and read pointers */
|
||||
void *read;
|
||||
void *write;
|
||||
/* descriptor element offset */
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
struct eip93_state_pool {
|
||||
void *base;
|
||||
dma_addr_t base_dma;
|
||||
};
|
||||
|
||||
struct eip93_ring {
|
||||
struct tasklet_struct done_task;
|
||||
/* command/result rings */
|
||||
struct eip93_desc_ring cdr;
|
||||
struct eip93_desc_ring rdr;
|
||||
spinlock_t write_lock;
|
||||
spinlock_t read_lock;
|
||||
atomic_t free;
|
||||
/* aync idr */
|
||||
spinlock_t idr_lock;
|
||||
struct idr crypto_async_idr;
|
||||
};
|
||||
|
||||
enum eip93_alg_type {
|
||||
EIP93_ALG_TYPE_AEAD,
|
||||
EIP93_ALG_TYPE_SKCIPHER,
|
||||
EIP93_ALG_TYPE_HASH,
|
||||
};
|
||||
|
||||
struct eip93_alg_template {
|
||||
struct eip93_device *mtk;
|
||||
enum eip93_alg_type type;
|
||||
u32 flags;
|
||||
union {
|
||||
struct aead_alg aead;
|
||||
struct skcipher_alg skcipher;
|
||||
struct ahash_alg ahash;
|
||||
} alg;
|
||||
};
|
||||
|
||||
#endif /* _EIP93_MAIN_H_ */
|
||||
335
drivers/crypto/inside-secure/eip93/eip93-regs.h
Normal file
335
drivers/crypto/inside-secure/eip93/eip93-regs.h
Normal file
@@ -0,0 +1,335 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2019 - 2021
|
||||
*
|
||||
* Richard van Schagen <vschagen@icloud.com>
|
||||
* Christian Marangi <ansuelsmth@gmail.com
|
||||
*/
|
||||
#ifndef REG_EIP93_H
|
||||
#define REG_EIP93_H
|
||||
|
||||
#define EIP93_REG_PE_CTRL_STAT 0x0
|
||||
#define EIP93_PE_CTRL_PE_PAD_CTRL_STAT GENMASK(31, 24)
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_CODE GENMASK(23, 20)
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING 0x8
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR 0x7
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH 0x6
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH 0x5
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_SPI 0x4
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO 0x3
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP 0x2
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER 0x1
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR_BUS 0x0
|
||||
#define EIP93_PE_CTRL_PE_EXT_ERR BIT(19)
|
||||
#define EIP93_PE_CTRL_PE_SEQNUM_ERR BIT(18)
|
||||
#define EIP93_PE_CTRL_PE_PAD_ERR BIT(17)
|
||||
#define EIP93_PE_CTRL_PE_AUTH_ERR BIT(16)
|
||||
#define EIP93_PE_CTRL_PE_PAD_VALUE GENMASK(15, 8)
|
||||
#define EIP93_PE_CTRL_PE_PRNG_MODE GENMASK(7, 6)
|
||||
#define EIP93_PE_CTRL_PE_HASH_FINAL BIT(4)
|
||||
#define EIP93_PE_CTRL_PE_INIT_ARC4 BIT(3)
|
||||
#define EIP93_PE_CTRL_PE_READY_DES_TRING_OWN GENMASK(1, 0)
|
||||
#define EIP93_PE_CTRL_PE_READY 0x2
|
||||
#define EIP93_PE_CTRL_HOST_READY 0x1
|
||||
#define EIP93_REG_PE_SOURCE_ADDR 0x4
|
||||
#define EIP93_REG_PE_DEST_ADDR 0x8
|
||||
#define EIP93_REG_PE_SA_ADDR 0xc
|
||||
#define EIP93_REG_PE_ADDR 0x10 /* STATE_ADDR */
|
||||
/*
|
||||
* Special implementation for user ID
|
||||
* user_id in eip93_descriptor is used to identify the
|
||||
* descriptor and is opaque and can be used by the driver
|
||||
* in custom way.
|
||||
*
|
||||
* The usage of this should be to put an address to the crypto
|
||||
* request struct from the kernel but this can't work in 64bit
|
||||
* world.
|
||||
*
|
||||
* Also it's required to put some flags to identify the last
|
||||
* descriptor.
|
||||
*
|
||||
* To handle this, split the u32 in 2 part:
|
||||
* - 31:16 descriptor flags
|
||||
* - 15:0 IDR to connect the crypto request address
|
||||
*/
|
||||
#define EIP93_REG_PE_USER_ID 0x18
|
||||
#define EIP93_PE_USER_ID_DESC_FLAGS GENMASK(31, 16)
|
||||
#define EIP93_PE_USER_ID_CRYPTO_IDR GENMASK(15, 0)
|
||||
#define EIP93_REG_PE_LENGTH 0x1c
|
||||
#define EIP93_PE_LENGTH_BYPASS GENMASK(31, 24)
|
||||
#define EIP93_PE_LENGTH_HOST_PE_READY GENMASK(23, 22)
|
||||
#define EIP93_PE_LENGTH_PE_READY 0x2
|
||||
#define EIP93_PE_LENGTH_HOST_READY 0x1
|
||||
#define EIP93_PE_LENGTH_LENGTH GENMASK(19, 0)
|
||||
|
||||
/* PACKET ENGINE RING configuration registers */
|
||||
#define EIP93_REG_PE_CDR_BASE 0x80
|
||||
#define EIP93_REG_PE_RDR_BASE 0x84
|
||||
#define EIP93_REG_PE_RING_CONFIG 0x88
|
||||
#define EIP93_PE_EN_EXT_TRIG BIT(31)
|
||||
/* Absent in later revision of eip93 */
|
||||
/* #define EIP93_PE_RING_OFFSET GENMASK(23, 15) */
|
||||
#define EIP93_PE_RING_SIZE GENMASK(9, 0)
|
||||
#define EIP93_REG_PE_RING_THRESH 0x8c
|
||||
#define EIPR93_PE_TIMEROUT_EN BIT(31)
|
||||
#define EIPR93_PE_RD_TIMEOUT GENMASK(29, 26)
|
||||
#define EIPR93_PE_RDR_THRESH GENMASK(25, 16)
|
||||
#define EIPR93_PE_CDR_THRESH GENMASK(9, 0)
|
||||
#define EIP93_REG_PE_CD_COUNT 0x90
|
||||
#define EIP93_PE_CD_COUNT GENMASK(10, 0)
|
||||
/*
|
||||
* In the same register, writing a value in GENMASK(7, 0) will
|
||||
* increment the descriptor count and start DMA action.
|
||||
*/
|
||||
#define EIP93_PE_CD_COUNT_INCR GENMASK(7, 0)
|
||||
#define EIP93_REG_PE_RD_COUNT 0x94
|
||||
#define EIP93_PE_RD_COUNT GENMASK(10, 0)
|
||||
/*
|
||||
* In the same register, writing a value in GENMASK(7, 0) will
|
||||
* increment the descriptor count and start DMA action.
|
||||
*/
|
||||
#define EIP93_PE_RD_COUNT_INCR GENMASK(7, 0)
|
||||
#define EIP93_REG_PE_RING_RW_PNTR 0x98 /* RING_PNTR */
|
||||
|
||||
/* PACKET ENGINE configuration registers */
|
||||
#define EIP93_REG_PE_CONFIG 0x100
|
||||
#define EIP93_PE_CONFIG_SWAP_TARGET BIT(20)
|
||||
#define EIP93_PE_CONFIG_SWAP_DATA BIT(18)
|
||||
#define EIP93_PE_CONFIG_SWAP_SA BIT(17)
|
||||
#define EIP93_PE_CONFIG_SWAP_CDRD BIT(16)
|
||||
#define EIP93_PE_CONFIG_EN_CDR_UPDATE BIT(10)
|
||||
#define EIP93_PE_CONFIG_PE_MODE GENMASK(9, 8)
|
||||
#define EIP93_PE_TARGET_AUTO_RING_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x3)
|
||||
#define EIP93_PE_TARGET_COMMAND_NO_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x2)
|
||||
#define EIP93_PE_TARGET_COMMAND_WITH_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x1)
|
||||
#define EIP93_PE_DIRECT_HOST_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x0)
|
||||
#define EIP93_PE_CONFIG_RST_RING BIT(2)
|
||||
#define EIP93_PE_CONFIG_RST_PE BIT(0)
|
||||
#define EIP93_REG_PE_STATUS 0x104
|
||||
#define EIP93_REG_PE_BUF_THRESH 0x10c
|
||||
#define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16)
|
||||
#define EIP93_PE_INBUF_THRESH GENMASK(7, 0)
|
||||
#define EIP93_REG_PE_INBUF_COUNT 0x100
|
||||
#define EIP93_REG_PE_OUTBUF_COUNT 0x114
|
||||
#define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */
|
||||
|
||||
/* PACKET ENGINE endian config */
|
||||
#define EIP93_REG_PE_ENDIAN_CONFIG 0x1cc
|
||||
#define EIP93_AIROHA_REG_PE_ENDIAN_CONFIG 0x1d0
|
||||
#define EIP93_PE_ENDIAN_TARGET_BYTE_SWAP GENMASK(23, 16)
|
||||
#define EIP93_PE_ENDIAN_MASTER_BYTE_SWAP GENMASK(7, 0)
|
||||
/*
|
||||
* Byte goes 2 and 2 and are referenced by ID
|
||||
* Split GENMASK(7, 0) in 4 part, one for each byte.
|
||||
* Example LITTLE ENDIAN: Example BIG ENDIAN
|
||||
* GENMASK(7, 6) 0x3 GENMASK(7, 6) 0x0
|
||||
* GENMASK(5, 4) 0x2 GENMASK(7, 6) 0x1
|
||||
* GENMASK(3, 2) 0x1 GENMASK(3, 2) 0x2
|
||||
* GENMASK(1, 0) 0x0 GENMASK(1, 0) 0x3
|
||||
*/
|
||||
#define EIP93_PE_ENDIAN_BYTE0 0x0
|
||||
#define EIP93_PE_ENDIAN_BYTE1 0x1
|
||||
#define EIP93_PE_ENDIAN_BYTE2 0x2
|
||||
#define EIP93_PE_ENDIAN_BYTE3 0x3
|
||||
|
||||
/* EIP93 CLOCK control registers */
|
||||
#define EIP93_REG_PE_CLOCK_CTRL 0x1e8
|
||||
#define EIP93_PE_CLOCK_EN_HASH_CLK BIT(4)
|
||||
#define EIP93_PE_CLOCK_EN_ARC4_CLK BIT(3)
|
||||
#define EIP93_PE_CLOCK_EN_AES_CLK BIT(2)
|
||||
#define EIP93_PE_CLOCK_EN_DES_CLK BIT(1)
|
||||
#define EIP93_PE_CLOCK_EN_PE_CLK BIT(0)
|
||||
|
||||
/* EIP93 Device Option and Revision Register */
|
||||
#define EIP93_REG_PE_OPTION_1 0x1f4
|
||||
#define EIP93_PE_OPTION_MAC_KEY256 BIT(31)
|
||||
#define EIP93_PE_OPTION_MAC_KEY192 BIT(30)
|
||||
#define EIP93_PE_OPTION_MAC_KEY128 BIT(29)
|
||||
#define EIP93_PE_OPTION_AES_CBC_MAC BIT(28)
|
||||
#define EIP93_PE_OPTION_AES_XCBX BIT(23)
|
||||
#define EIP93_PE_OPTION_SHA_256 BIT(19)
|
||||
#define EIP93_PE_OPTION_SHA_224 BIT(18)
|
||||
#define EIP93_PE_OPTION_SHA_1 BIT(17)
|
||||
#define EIP93_PE_OPTION_MD5 BIT(16)
|
||||
#define EIP93_PE_OPTION_AES_KEY256 BIT(15)
|
||||
#define EIP93_PE_OPTION_AES_KEY192 BIT(14)
|
||||
#define EIP93_PE_OPTION_AES_KEY128 BIT(13)
|
||||
#define EIP93_PE_OPTION_AES BIT(2)
|
||||
#define EIP93_PE_OPTION_ARC4 BIT(1)
|
||||
#define EIP93_PE_OPTION_TDES BIT(0) /* DES and TDES */
|
||||
#define EIP93_REG_PE_OPTION_0 0x1f8
|
||||
#define EIP93_REG_PE_REVISION 0x1fc
|
||||
#define EIP93_PE_REVISION_MAJ_HW_REV GENMASK(27, 24)
|
||||
#define EIP93_PE_REVISION_MIN_HW_REV GENMASK(23, 20)
|
||||
#define EIP93_PE_REVISION_HW_PATCH GENMASK(19, 16)
|
||||
#define EIP93_PE_REVISION_EIP_NO GENMASK(7, 0)
|
||||
|
||||
/* EIP93 Interrupt Control Register */
|
||||
#define EIP93_REG_INT_UNMASK_STAT 0x200
|
||||
#define EIP93_REG_INT_MASK_STAT 0x204
|
||||
#define EIP93_REG_INT_CLR 0x204
|
||||
#define EIP93_REG_INT_MASK 0x208 /* INT_EN */
|
||||
/* Each int reg have the same bitmap */
|
||||
#define EIP93_INT_INTERFACE_ERR BIT(18)
|
||||
#define EIP93_INT_RPOC_ERR BIT(17)
|
||||
#define EIP93_INT_PE_RING_ERR BIT(16)
|
||||
#define EIP93_INT_HALT BIT(15)
|
||||
#define EIP93_INT_OUTBUF_THRESH BIT(11)
|
||||
#define EIP93_INT_INBUF_THRESH BIT(10)
|
||||
#define EIP93_INT_OPERATION_DONE BIT(9)
|
||||
#define EIP93_INT_RDR_THRESH BIT(1)
|
||||
#define EIP93_INT_CDR_THRESH BIT(0)
|
||||
#define EIP93_INT_ALL (EIP93_INT_INTERFACE_ERR | \
|
||||
EIP93_INT_RPOC_ERR | \
|
||||
EIP93_INT_PE_RING_ERR | \
|
||||
EIP93_INT_HALT | \
|
||||
EIP93_INT_OUTBUF_THRESH | \
|
||||
EIP93_INT_INBUF_THRESH | \
|
||||
EIP93_INT_OPERATION_DONE | \
|
||||
EIP93_INT_RDR_THRESH | \
|
||||
EIP93_INT_CDR_THRESH)
|
||||
|
||||
#define EIP93_REG_INT_CFG 0x20c
|
||||
#define EIP93_INT_TYPE_PULSE BIT(0)
|
||||
#define EIP93_REG_MASK_ENABLE 0x210
|
||||
#define EIP93_REG_MASK_DISABLE 0x214
|
||||
|
||||
/* EIP93 SA Record register */
|
||||
#define EIP93_REG_SA_CMD_0 0x400
|
||||
#define EIP93_SA_CMD_SAVE_HASH BIT(29)
|
||||
#define EIP93_SA_CMD_SAVE_IV BIT(28)
|
||||
#define EIP93_SA_CMD_HASH_SOURCE GENMASK(27, 26)
|
||||
#define EIP93_SA_CMD_HASH_NO_LOAD FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x3)
|
||||
#define EIP93_SA_CMD_HASH_FROM_STATE FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x2)
|
||||
#define EIP93_SA_CMD_HASH_FROM_SA FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x0)
|
||||
#define EIP93_SA_CMD_IV_SOURCE GENMASK(25, 24)
|
||||
#define EIP93_SA_CMD_IV_FROM_PRNG FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x3)
|
||||
#define EIP93_SA_CMD_IV_FROM_STATE FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x2)
|
||||
#define EIP93_SA_CMD_IV_FROM_INPUT FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x1)
|
||||
#define EIP93_SA_CMD_IV_NO_LOAD FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x0)
|
||||
#define EIP93_SA_CMD_DIGEST_LENGTH GENMASK(23, 20)
|
||||
#define EIP93_SA_CMD_DIGEST_10WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0xa) /* SRTP and TLS */
|
||||
#define EIP93_SA_CMD_DIGEST_8WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x8) /* SHA-256 */
|
||||
#define EIP93_SA_CMD_DIGEST_7WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x7) /* SHA-224 */
|
||||
#define EIP93_SA_CMD_DIGEST_6WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x6)
|
||||
#define EIP93_SA_CMD_DIGEST_5WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x5) /* SHA1 */
|
||||
#define EIP93_SA_CMD_DIGEST_4WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x4) /* MD5 and AES-based */
|
||||
#define EIP93_SA_CMD_DIGEST_3WORD_IPSEC FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x3) /* IPSEC */
|
||||
#define EIP93_SA_CMD_DIGEST_2WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x2)
|
||||
#define EIP93_SA_CMD_DIGEST_1WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x1)
|
||||
#define EIP93_SA_CMD_DIGEST_3WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x0) /* 96bit output */
|
||||
#define EIP93_SA_CMD_HDR_PROC BIT(19)
|
||||
#define EIP93_SA_CMD_EXT_PAD BIT(18)
|
||||
#define EIP93_SA_CMD_SCPAD BIT(17)
|
||||
#define EIP93_SA_CMD_HASH GENMASK(15, 12)
|
||||
#define EIP93_SA_CMD_HASH_NULL FIELD_PREP(EIP93_SA_CMD_HASH, 0xf)
|
||||
#define EIP93_SA_CMD_HASH_SHA256 FIELD_PREP(EIP93_SA_CMD_HASH, 0x3)
|
||||
#define EIP93_SA_CMD_HASH_SHA224 FIELD_PREP(EIP93_SA_CMD_HASH, 0x2)
|
||||
#define EIP93_SA_CMD_HASH_SHA1 FIELD_PREP(EIP93_SA_CMD_HASH, 0x1)
|
||||
#define EIP93_SA_CMD_HASH_MD5 FIELD_PREP(EIP93_SA_CMD_HASH, 0x0)
|
||||
#define EIP93_SA_CMD_CIPHER GENMASK(11, 8)
|
||||
#define EIP93_SA_CMD_CIPHER_NULL FIELD_PREP(EIP93_SA_CMD_CIPHER, 0xf)
|
||||
#define EIP93_SA_CMD_CIPHER_AES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x3)
|
||||
#define EIP93_SA_CMD_CIPHER_ARC4 FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x2)
|
||||
#define EIP93_SA_CMD_CIPHER_3DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x1)
|
||||
#define EIP93_SA_CMD_CIPHER_DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x0)
|
||||
#define EIP93_SA_CMD_PAD_TYPE GENMASK(7, 6)
|
||||
#define EIP93_SA_CMD_PAD_CONST_SSL FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x6)
|
||||
#define EIP93_SA_CMD_PAD_TLS_DTLS FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x5)
|
||||
#define EIP93_SA_CMD_PAD_ZERO FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x3)
|
||||
#define EIP93_SA_CMD_PAD_CONST FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x2)
|
||||
#define EIP93_SA_CMD_PAD_PKCS7 FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x1)
|
||||
#define EIP93_SA_CMD_PAD_IPSEC FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x0)
|
||||
#define EIP93_SA_CMD_OPGROUP GENMASK(5, 4)
|
||||
#define EIP93_SA_CMD_OP_EXT FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x2)
|
||||
#define EIP93_SA_CMD_OP_PROTOCOL FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x1)
|
||||
#define EIP93_SA_CMD_OP_BASIC FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x0)
|
||||
#define EIP93_SA_CMD_DIRECTION_IN BIT(3) /* 0: outbount 1: inbound */
|
||||
#define EIP93_SA_CMD_OPCODE GENMASK(2, 0)
|
||||
#define EIP93_SA_CMD_OPCODE_BASIC_OUT_PRNG 0x7
|
||||
#define EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH 0x3
|
||||
#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH 0x1
|
||||
#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC 0x0
|
||||
#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH 0x3
|
||||
#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH_DEC 0x1
|
||||
#define EIP93_SA_CMD_OPCODE_BASIC_IN_DEC 0x0
|
||||
#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_ESP 0x0
|
||||
#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SSL 0x4
|
||||
#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_TLS 0x5
|
||||
#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SRTP 0x7
|
||||
#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_ESP 0x0
|
||||
#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SSL 0x2
|
||||
#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_TLS 0x3
|
||||
#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SRTP 0x7
|
||||
#define EIP93_SA_CMD_OPCODE_EXT_OUT_DTSL 0x1
|
||||
#define EIP93_SA_CMD_OPCODE_EXT_OUT_SSL 0x4
|
||||
#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV10 0x5
|
||||
#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV11 0x6
|
||||
#define EIP93_SA_CMD_OPCODE_EXT_IN_DTSL 0x1
|
||||
#define EIP93_SA_CMD_OPCODE_EXT_IN_SSL 0x4
|
||||
#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV10 0x5
|
||||
#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV11 0x6
|
||||
#define EIP93_REG_SA_CMD_1 0x404
|
||||
#define EIP93_SA_CMD_EN_SEQNUM_CHK BIT(29)
|
||||
/* This mask can be either used for ARC4 or AES */
|
||||
#define EIP93_SA_CMD_ARC4_KEY_LENGHT GENMASK(28, 24)
|
||||
#define EIP93_SA_CMD_AES_DEC_KEY BIT(28) /* 0: encrypt key 1: decrypt key */
|
||||
#define EIP93_SA_CMD_AES_KEY_LENGTH GENMASK(26, 24)
|
||||
#define EIP93_SA_CMD_AES_KEY_256BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x4)
|
||||
#define EIP93_SA_CMD_AES_KEY_192BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x3)
|
||||
#define EIP93_SA_CMD_AES_KEY_128BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x2)
|
||||
#define EIP93_SA_CMD_HASH_CRYPT_OFFSET GENMASK(23, 16)
|
||||
#define EIP93_SA_CMD_BYTE_OFFSET BIT(13) /* 0: CRYPT_OFFSET in 32bit word 1: CRYPT_OFFSET in 8bit bytes */
|
||||
#define EIP93_SA_CMD_HMAC BIT(12)
|
||||
#define EIP93_SA_CMD_SSL_MAC BIT(12)
|
||||
/* This mask can be either used for ARC4 or AES */
|
||||
#define EIP93_SA_CMD_CHIPER_MODE GENMASK(9, 8)
|
||||
/* AES or DES operations */
|
||||
#define EIP93_SA_CMD_CHIPER_MODE_ICM FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x3)
|
||||
#define EIP93_SA_CMD_CHIPER_MODE_CTR FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x2)
|
||||
#define EIP93_SA_CMD_CHIPER_MODE_CBC FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1)
|
||||
#define EIP93_SA_CMD_CHIPER_MODE_ECB FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0)
|
||||
/* ARC4 operations */
|
||||
#define EIP93_SA_CMD_CHIPER_MODE_STATEFULL FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1)
|
||||
#define EIP93_SA_CMD_CHIPER_MODE_STATELESS FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0)
|
||||
#define EIP93_SA_CMD_COPY_PAD BIT(3)
|
||||
#define EIP93_SA_CMD_COPY_PAYLOAD BIT(2)
|
||||
#define EIP93_SA_CMD_COPY_HEADER BIT(1)
|
||||
#define EIP93_SA_CMD_COPY_DIGEST BIT(0) /* With this enabled, COPY_PAD is required */
|
||||
|
||||
/* State save register */
|
||||
#define EIP93_REG_STATE_IV_0 0x500
|
||||
#define EIP93_REG_STATE_IV_1 0x504
|
||||
|
||||
#define EIP93_REG_PE_ARC4STATE 0x700
|
||||
|
||||
struct sa_record {
|
||||
u32 sa_cmd0_word;
|
||||
u32 sa_cmd1_word;
|
||||
u32 sa_key[8];
|
||||
u8 sa_i_digest[32];
|
||||
u8 sa_o_digest[32];
|
||||
u32 sa_spi;
|
||||
u32 sa_seqnum[2];
|
||||
u32 sa_seqmum_mask[2];
|
||||
u32 sa_nonce;
|
||||
} __packed;
|
||||
|
||||
struct sa_state {
|
||||
u32 state_iv[4];
|
||||
u32 state_byte_cnt[2];
|
||||
u8 state_i_digest[32];
|
||||
} __packed;
|
||||
|
||||
struct eip93_descriptor {
|
||||
u32 pe_ctrl_stat_word;
|
||||
u32 src_addr;
|
||||
u32 dst_addr;
|
||||
u32 sa_addr;
|
||||
u32 state_addr;
|
||||
u32 arc4_addr;
|
||||
u32 user_id;
|
||||
u32 pe_length_word;
|
||||
} __packed;
|
||||
|
||||
#endif
|
||||
@@ -823,8 +823,9 @@ static int devm_tegra_devfreq_init_hw(struct device *dev,
|
||||
|
||||
static int tegra_devfreq_config_clks_nop(struct device *dev,
|
||||
struct opp_table *opp_table,
|
||||
struct dev_pm_opp *opp, void *data,
|
||||
bool scaling_down)
|
||||
struct dev_pm_opp *old_opp,
|
||||
struct dev_pm_opp *opp,
|
||||
void *data, bool scaling_down)
|
||||
{
|
||||
/* We want to skip clk configuration via dev_pm_opp_set_opp() */
|
||||
return 0;
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
|
||||
obj-$(CONFIG_DMA_SHARED_BUFFER) := dma-shared-buffer.o
|
||||
|
||||
dma-buf-objs-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
|
||||
dma-fence-unwrap.o dma-resv.o
|
||||
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
|
||||
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
|
||||
obj-$(CONFIG_SYNC_FILE) += sync_file.o
|
||||
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
|
||||
obj-$(CONFIG_UDMABUF) += udmabuf.o
|
||||
obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
|
||||
dma-buf-objs-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
|
||||
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
|
||||
dma-buf-objs-$(CONFIG_SYNC_FILE) += sync_file.o
|
||||
dma-buf-objs-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
|
||||
dma-buf-objs-$(CONFIG_UDMABUF) += udmabuf.o
|
||||
dma-buf-objs-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
|
||||
|
||||
dmabuf_selftests-y := \
|
||||
selftest.o \
|
||||
@@ -15,4 +17,6 @@ dmabuf_selftests-y := \
|
||||
st-dma-fence-unwrap.o \
|
||||
st-dma-resv.o
|
||||
|
||||
obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
|
||||
dma-buf-objs-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
|
||||
|
||||
dma-shared-buffer-objs := $(dma-buf-objs-y)
|
||||
|
||||
@@ -1743,4 +1743,5 @@ static void __exit dma_buf_deinit(void)
|
||||
kern_unmount(dma_buf_mnt);
|
||||
dma_buf_uninit_sysfs_statistics();
|
||||
}
|
||||
__exitcall(dma_buf_deinit);
|
||||
module_exit(dma_buf_deinit);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
|
||||
obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
|
||||
dma-buf-objs-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
|
||||
dma-buf-objs-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
|
||||
|
||||
@@ -734,6 +734,8 @@ config XILINX_ZYNQMP_DPDMA
|
||||
display driver.
|
||||
|
||||
# driver files
|
||||
source "drivers/dma/airoha/Kconfig"
|
||||
|
||||
source "drivers/dma/amd/Kconfig"
|
||||
|
||||
source "drivers/dma/bestcomm/Kconfig"
|
||||
|
||||
@@ -85,6 +85,7 @@ obj-$(CONFIG_ST_FDMA) += st_fdma.o
|
||||
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
|
||||
obj-$(CONFIG_INTEL_LDMA) += lgm/
|
||||
|
||||
obj-y += airoha/
|
||||
obj-y += amd/
|
||||
obj-y += mediatek/
|
||||
obj-y += qcom/
|
||||
|
||||
14
drivers/dma/airoha/Kconfig
Normal file
14
drivers/dma/airoha/Kconfig
Normal file
@@ -0,0 +1,14 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
config AIROHA_HSDMA
|
||||
tristate "Airoha High-Speed DMA controller support"
|
||||
depends on ARCH_AIROHA || COMPILE_TEST
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Enable support for High-Speed DMA controller on Airoha
|
||||
SoCs.
|
||||
|
||||
This controller provides the channels which is dedicated to
|
||||
memory-to-memory transfer to offload from CPU through ring-
|
||||
based descriptor management.
|
||||
2
drivers/dma/airoha/Makefile
Normal file
2
drivers/dma/airoha/Makefile
Normal file
@@ -0,0 +1,2 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(AIROHA_HSDMA) += airoha-hsdma.o
|
||||
1113
drivers/dma/airoha/airoha-hsdma.c
Normal file
1113
drivers/dma/airoha/airoha-hsdma.c
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user