0
0
mirror of https://git.openwrt.org/openwrt/openwrt.git synced 2024-11-22 04:56:15 +00:00
openwrt/target/linux/bcm27xx/patches-6.6/950-1104-drivers-media-pci-Add-Hailo-accelerator-device-drive.patch
Álvaro Fernández Rojas 8c405cdccc bcm27xx: add 6.6 kernel patches
The patches were generated from the RPi repo with the following command:
git format-patch v6.6.34..rpi-6.1.y

Some patches needed rebasing and, as usual, the applied and reverted, wireless
drivers, Github workflows, READMEs and defconfigs patches were removed.

Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com>
2024-06-18 18:52:49 +02:00

7063 lines
248 KiB
Diff

From b01457f2cabf7e9b16f217ef7e4cb739655c407b Mon Sep 17 00:00:00 2001
From: Naushir Patuck <naush@raspberrypi.com>
Date: Tue, 21 May 2024 12:56:17 +0100
Subject: [PATCH 1104/1135] drivers: media: pci: Add Hailo accelerator device
drivers
Add version 4.17.1 of the Hailo PCIe device drivers.
Sourced from https://github.com/hailo-ai/hailort-drivers/
Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
---
drivers/media/pci/Kconfig | 1 +
drivers/media/pci/Makefile | 3 +-
drivers/media/pci/hailo/Kconfig | 6 +
drivers/media/pci/hailo/Makefile | 32 +
drivers/media/pci/hailo/common/fw_operation.c | 103 ++
drivers/media/pci/hailo/common/fw_operation.h | 25 +
.../media/pci/hailo/common/fw_validation.c | 112 ++
.../media/pci/hailo/common/fw_validation.h | 66 ++
.../pci/hailo/common/hailo_ioctl_common.h | 575 ++++++++++
.../pci/hailo/common/hailo_pcie_version.h | 13 +
.../media/pci/hailo/common/hailo_resource.c | 128 +++
.../media/pci/hailo/common/hailo_resource.h | 39 +
drivers/media/pci/hailo/common/pcie_common.c | 641 +++++++++++
drivers/media/pci/hailo/common/pcie_common.h | 128 +++
drivers/media/pci/hailo/common/utils.h | 39 +
drivers/media/pci/hailo/common/vdma_common.c | 684 +++++++++++
drivers/media/pci/hailo/common/vdma_common.h | 243 ++++
.../pci/hailo/include/hailo_pcie_version.h | 14 +
drivers/media/pci/hailo/src/fops.c | 736 ++++++++++++
drivers/media/pci/hailo/src/fops.h | 21 +
drivers/media/pci/hailo/src/pcie.c | 1012 +++++++++++++++++
drivers/media/pci/hailo/src/pcie.h | 82 ++
drivers/media/pci/hailo/src/sysfs.c | 36 +
drivers/media/pci/hailo/src/sysfs.h | 13 +
drivers/media/pci/hailo/src/utils.c | 27 +
drivers/media/pci/hailo/src/utils.h | 21 +
drivers/media/pci/hailo/utils/compact.h | 153 +++
drivers/media/pci/hailo/utils/fw_common.h | 19 +
drivers/media/pci/hailo/utils/logs.c | 8 +
drivers/media/pci/hailo/utils/logs.h | 45 +
drivers/media/pci/hailo/vdma/ioctl.c | 698 ++++++++++++
drivers/media/pci/hailo/vdma/ioctl.h | 37 +
drivers/media/pci/hailo/vdma/memory.c | 551 +++++++++
drivers/media/pci/hailo/vdma/memory.h | 54 +
drivers/media/pci/hailo/vdma/vdma.c | 336 ++++++
drivers/media/pci/hailo/vdma/vdma.h | 143 +++
39 files changed, 6849 insertions(+), 1 deletion(-)
create mode 100644 drivers/media/pci/hailo/Kconfig
create mode 100644 drivers/media/pci/hailo/Makefile
create mode 100644 drivers/media/pci/hailo/common/fw_operation.c
create mode 100644 drivers/media/pci/hailo/common/fw_operation.h
create mode 100644 drivers/media/pci/hailo/common/fw_validation.c
create mode 100644 drivers/media/pci/hailo/common/fw_validation.h
create mode 100644 drivers/media/pci/hailo/common/hailo_ioctl_common.h
create mode 100644 drivers/media/pci/hailo/common/hailo_pcie_version.h
create mode 100644 drivers/media/pci/hailo/common/hailo_resource.c
create mode 100644 drivers/media/pci/hailo/common/hailo_resource.h
create mode 100644 drivers/media/pci/hailo/common/pcie_common.c
create mode 100644 drivers/media/pci/hailo/common/pcie_common.h
create mode 100644 drivers/media/pci/hailo/common/utils.h
create mode 100644 drivers/media/pci/hailo/common/vdma_common.c
create mode 100644 drivers/media/pci/hailo/common/vdma_common.h
create mode 100755 drivers/media/pci/hailo/include/hailo_pcie_version.h
create mode 100644 drivers/media/pci/hailo/src/fops.c
create mode 100644 drivers/media/pci/hailo/src/fops.h
create mode 100644 drivers/media/pci/hailo/src/pcie.c
create mode 100644 drivers/media/pci/hailo/src/pcie.h
create mode 100644 drivers/media/pci/hailo/src/sysfs.c
create mode 100644 drivers/media/pci/hailo/src/sysfs.h
create mode 100644 drivers/media/pci/hailo/src/utils.c
create mode 100644 drivers/media/pci/hailo/src/utils.h
create mode 100644 drivers/media/pci/hailo/utils/compact.h
create mode 100644 drivers/media/pci/hailo/utils/fw_common.h
create mode 100644 drivers/media/pci/hailo/utils/logs.c
create mode 100644 drivers/media/pci/hailo/utils/logs.h
create mode 100644 drivers/media/pci/hailo/vdma/ioctl.c
create mode 100644 drivers/media/pci/hailo/vdma/ioctl.h
create mode 100644 drivers/media/pci/hailo/vdma/memory.c
create mode 100644 drivers/media/pci/hailo/vdma/memory.h
create mode 100644 drivers/media/pci/hailo/vdma/vdma.c
create mode 100644 drivers/media/pci/hailo/vdma/vdma.h
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -74,6 +74,7 @@ config VIDEO_PCI_SKELETON
when developing new drivers.
source "drivers/media/pci/intel/Kconfig"
+source "drivers/media/pci/hailo/Kconfig"
endif #MEDIA_PCI_SUPPORT
endif #PCI
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -17,7 +17,8 @@ obj-y += ttpci/ \
saa7146/ \
smipcie/ \
netup_unidvb/ \
- intel/
+ intel/ \
+ hailo/
# Please keep it alphabetically sorted by Kconfig name
# (e. g. LC_ALL=C sort Makefile)
--- /dev/null
+++ b/drivers/media/pci/hailo/Kconfig
@@ -0,0 +1,6 @@
+
+config MEDIA_PCI_HAILO
+ tristate "Hailo AI accelerator PCIe driver"
+ depends on PCI
+ help
+ Enable build of Hailo AI accelerator PCIe driver.
--- /dev/null
+++ b/drivers/media/pci/hailo/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+
+COMMON_SRC_DIRECTORY=common
+VDMA_SRC_DIRECTORY=vdma
+UTILS_SRC_DIRECTORY=utils
+
+obj-$(CONFIG_MEDIA_PCI_HAILO) := hailo_pci.o
+
+hailo_pci-objs += src/pcie.o
+hailo_pci-objs += src/fops.o
+hailo_pci-objs += src/utils.o
+hailo_pci-objs += src/sysfs.o
+
+hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/fw_validation.o
+hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/fw_operation.o
+hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/pcie_common.o
+hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/vdma_common.o
+hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/hailo_resource.o
+
+hailo_pci-objs += $(UTILS_SRC_DIRECTORY)/logs.o
+
+hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/vdma.o
+hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/memory.o
+hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/ioctl.o
+
+ccflags-y += -Werror
+ccflags-y += -DHAILO_RASBERRY_PIE
+ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(srctree)/$(src)/include
+ccflags-y += -I$(srctree)/$(src)/common
+
+clean-files := $(hailo_pci-objs)
--- /dev/null
+++ b/drivers/media/pci/hailo/common/fw_operation.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+**/
+
+#include "fw_operation.h"
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+typedef struct {
+ u32 host_offset;
+ u32 chip_offset;
+} FW_DEBUG_BUFFER_HEADER_t;
+
+#define DEBUG_BUFFER_DATA_SIZE (DEBUG_BUFFER_TOTAL_SIZE - sizeof(FW_DEBUG_BUFFER_HEADER_t))
+
+int hailo_read_firmware_notification(struct hailo_resource *resource, struct hailo_d2h_notification *notification)
+{
+ hailo_d2h_buffer_details_t d2h_buffer_details = {0, 0};
+ hailo_resource_read_buffer(resource, 0, sizeof(d2h_buffer_details),
+ &d2h_buffer_details);
+
+ if ((sizeof(notification->buffer) < d2h_buffer_details.buffer_len) || (0 == d2h_buffer_details.is_buffer_in_use)) {
+ return -EINVAL;
+ }
+
+ notification->buffer_len = d2h_buffer_details.buffer_len;
+ hailo_resource_read_buffer(resource, sizeof(d2h_buffer_details), notification->buffer_len, notification->buffer);
+
+ // Write is_buffer_in_use = false
+ hailo_resource_write16(resource, 0, 0);
+ return 0;
+}
+
+static inline size_t calculate_log_ready_to_read(FW_DEBUG_BUFFER_HEADER_t *header)
+{
+ size_t ready_to_read = 0;
+ size_t host_offset = header->host_offset;
+ size_t chip_offset = header->chip_offset;
+
+ if (chip_offset >= host_offset) {
+ ready_to_read = chip_offset - host_offset;
+ } else {
+ ready_to_read = DEBUG_BUFFER_DATA_SIZE - (host_offset - chip_offset);
+ }
+
+ return ready_to_read;
+}
+
+long hailo_read_firmware_log(struct hailo_resource *fw_logger_resource, struct hailo_read_log_params *params)
+{
+ FW_DEBUG_BUFFER_HEADER_t debug_buffer_header = {0};
+ size_t read_offset = 0;
+ size_t ready_to_read = 0;
+ size_t size_to_read = 0;
+ uintptr_t user_buffer = (uintptr_t)params->buffer;
+
+ if (params->buffer_size > ARRAY_SIZE(params->buffer)) {
+ return -EINVAL;
+ }
+
+ hailo_resource_read_buffer(fw_logger_resource, 0, sizeof(debug_buffer_header),
+ &debug_buffer_header);
+
+ /* Point to the start of the data buffer. */
+ ready_to_read = calculate_log_ready_to_read(&debug_buffer_header);
+ if (0 == ready_to_read) {
+ params->read_bytes = 0;
+ return 0;
+ }
+ /* If ready to read is bigger than the buffer size, read only buffer size bytes. */
+ ready_to_read = min(ready_to_read, params->buffer_size);
+
+ /* Point to the data that is read to be read by the host. */
+ read_offset = sizeof(debug_buffer_header) + debug_buffer_header.host_offset;
+ /* Check if the offset should cycle back to beginning. */
+ if (DEBUG_BUFFER_DATA_SIZE <= debug_buffer_header.host_offset + ready_to_read) {
+ size_to_read = DEBUG_BUFFER_DATA_SIZE - debug_buffer_header.host_offset;
+ hailo_resource_read_buffer(fw_logger_resource, read_offset, size_to_read, (void*)user_buffer);
+
+ user_buffer += size_to_read;
+ size_to_read = ready_to_read - size_to_read;
+ /* Point back to the beginning of the data buffer. */
+ read_offset -= debug_buffer_header.host_offset;
+ }
+ else {
+ size_to_read = ready_to_read;
+ }
+
+ /* size_to_read may become 0 if the read reached DEBUG_BUFFER_DATA_SIZE exactly */
+ hailo_resource_read_buffer(fw_logger_resource, read_offset, size_to_read, (void*)user_buffer);
+
+ /* Change current_offset to represent the new host offset. */
+ read_offset += size_to_read;
+ hailo_resource_write32(fw_logger_resource, offsetof(FW_DEBUG_BUFFER_HEADER_t, host_offset),
+ (u32)(read_offset - sizeof(debug_buffer_header)));
+
+ params->read_bytes = ready_to_read;
+ return 0;
+}
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/common/fw_operation.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+**/
+
+#ifndef _HAILO_COMMON_FIRMWARE_OPERATION_H_
+#define _HAILO_COMMON_FIRMWARE_OPERATION_H_
+
+#include "hailo_resource.h"
+
+#define DEBUG_BUFFER_TOTAL_SIZE (4*1024)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int hailo_read_firmware_notification(struct hailo_resource *resource, struct hailo_d2h_notification *notification);
+
+long hailo_read_firmware_log(struct hailo_resource *fw_logger_resource, struct hailo_read_log_params *params);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HAILO_COMMON_FIRMWARE_OPERATION_H_ */
--- /dev/null
+++ b/drivers/media/pci/hailo/common/fw_validation.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include "fw_validation.h"
+#include <linux/errno.h>
+#include <linux/types.h>
+
+
+
+/* when reading the firmware we don't want to read past the firmware_size,
+ so we have a consumed_firmware_offset that is updated _before_ accessing data at that offset
+ of firmware_base_address */
+#define CONSUME_FIRMWARE(__size, __err) do { \
+ consumed_firmware_offset += (u32) (__size); \
+ if ((firmware_size < (__size)) || (firmware_size < consumed_firmware_offset)) { \
+ err = __err; \
+ goto exit; \
+ } \
+ } while(0)
+
+int FW_VALIDATION__validate_fw_header(uintptr_t firmware_base_address,
+ size_t firmware_size, u32 max_code_size, u32 *outer_consumed_firmware_offset,
+ firmware_header_t **out_firmware_header, enum hailo_board_type board_type)
+{
+ int err = -EINVAL;
+ firmware_header_t *firmware_header = NULL;
+ u32 consumed_firmware_offset = *outer_consumed_firmware_offset;
+ u32 expected_firmware_magic = 0;
+
+ firmware_header = (firmware_header_t *) (firmware_base_address + consumed_firmware_offset);
+ CONSUME_FIRMWARE(sizeof(firmware_header_t), -EINVAL);
+
+ switch (board_type) {
+ case HAILO_BOARD_TYPE_HAILO8:
+ expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO8;
+ break;
+ case HAILO_BOARD_TYPE_HAILO15:
+ expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15;
+ break;
+ case HAILO_BOARD_TYPE_PLUTO:
+ expected_firmware_magic = FIRMWARE_HEADER_MAGIC_PLUTO;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (expected_firmware_magic != firmware_header->magic) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Validate that the firmware header version is supported */
+ switch(firmware_header->header_version) {
+ case FIRMWARE_HEADER_VERSION_INITIAL:
+ break;
+ default:
+ err = -EINVAL;
+ goto exit;
+ break;
+ }
+
+ if (MINIMUM_FIRMWARE_CODE_SIZE > firmware_header->code_size) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (max_code_size < firmware_header->code_size) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ CONSUME_FIRMWARE(firmware_header->code_size, -EINVAL);
+
+ *outer_consumed_firmware_offset = consumed_firmware_offset;
+ *out_firmware_header = firmware_header;
+ err = 0;
+
+exit:
+ return err;
+}
+
+int FW_VALIDATION__validate_cert_header(uintptr_t firmware_base_address,
+ size_t firmware_size, u32 *outer_consumed_firmware_offset, secure_boot_certificate_t **out_firmware_cert)
+{
+
+ secure_boot_certificate_t *firmware_cert = NULL;
+ int err = -EINVAL;
+ u32 consumed_firmware_offset = *outer_consumed_firmware_offset;
+
+ firmware_cert = (secure_boot_certificate_t *) (firmware_base_address + consumed_firmware_offset);
+ CONSUME_FIRMWARE(sizeof(secure_boot_certificate_t), -EINVAL);
+
+ if ((MAXIMUM_FIRMWARE_CERT_KEY_SIZE < firmware_cert->key_size) ||
+ (MAXIMUM_FIRMWARE_CERT_CONTENT_SIZE < firmware_cert->content_size)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ CONSUME_FIRMWARE(firmware_cert->key_size, -EINVAL);
+ CONSUME_FIRMWARE(firmware_cert->content_size, -EINVAL);
+
+ *outer_consumed_firmware_offset = consumed_firmware_offset;
+ *out_firmware_cert = firmware_cert;
+ err = 0;
+
+exit:
+ return err;
+}
+
--- /dev/null
+++ b/drivers/media/pci/hailo/common/fw_validation.h
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef PCIE_COMMON_FIRMWARE_HEADER_UTILS_H_
+#define PCIE_COMMON_FIRMWARE_HEADER_UTILS_H_
+
+#include "hailo_ioctl_common.h"
+#include <linux/types.h>
+
+#define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0)
+#define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB)
+// TODO - HRT-11344 : change fw magic to pluto specific
+#define FIRMWARE_HEADER_MAGIC_PLUTO (0xE905DAAB)
+
+#ifndef HAILO_EMULATOR
+#define FIRMWARE_WAIT_TIMEOUT_MS (5000)
+#else /* ifndef HAILO_EMULATOR */
+#define FIRMWARE_WAIT_TIMEOUT_MS (500000)
+#endif /* ifndef HAILO_EMULATOR */
+
+typedef enum {
+ FIRMWARE_HEADER_VERSION_INITIAL = 0,
+
+ /* MUST BE LAST */
+ FIRMWARE_HEADER_VERSION_COUNT
+} firmware_header_version_t;
+
+typedef struct {
+ u32 magic;
+ u32 header_version;
+ u32 firmware_major;
+ u32 firmware_minor;
+ u32 firmware_revision;
+ u32 code_size;
+} firmware_header_t;
+
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4200)
+#endif /* _MSC_VER */
+
+typedef struct {
+ u32 key_size;
+ u32 content_size;
+ u8 certificates_data[0];
+} secure_boot_certificate_t;
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif /* _MSC_VER */
+
+#define MINIMUM_FIRMWARE_CODE_SIZE (20*4)
+#define MAXIMUM_FIRMWARE_CERT_KEY_SIZE (0x1000)
+#define MAXIMUM_FIRMWARE_CERT_CONTENT_SIZE (0x1000)
+
+int FW_VALIDATION__validate_fw_header(uintptr_t firmware_base_address,
+ size_t firmware_size, u32 max_code_size, u32 *outer_consumed_firmware_offset,
+ firmware_header_t **out_firmware_header, enum hailo_board_type board_type);
+
+int FW_VALIDATION__validate_cert_header(uintptr_t firmware_base_address,
+ size_t firmware_size, u32 *outer_consumed_firmware_offset, secure_boot_certificate_t **out_firmware_cert);
+
+#endif
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/common/hailo_ioctl_common.h
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) AND MIT
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_IOCTL_COMMON_H_
+#define _HAILO_IOCTL_COMMON_H_
+
+
+// This value is not easily changeable.
+// For example: the channel interrupts ioctls assume we have up to 32 channels
+#define MAX_VDMA_CHANNELS_PER_ENGINE (32)
+#define MAX_VDMA_ENGINES (3)
+#define SIZE_OF_VDMA_DESCRIPTOR (16)
+#define VDMA_DEST_CHANNELS_START (16)
+
+#define HAILO_VDMA_MAX_ONGOING_TRANSFERS (128)
+#define HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK (HAILO_VDMA_MAX_ONGOING_TRANSFERS - 1)
+
+#define CHANNEL_IRQ_TIMESTAMPS_SIZE (HAILO_VDMA_MAX_ONGOING_TRANSFERS * 2)
+#define CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK (CHANNEL_IRQ_TIMESTAMPS_SIZE - 1)
+
+#define INVALID_DRIVER_HANDLE_VALUE ((uintptr_t)-1)
+
+// Used by windows and unix driver to raise the right CPU control handle to the FW. The same as in pcie_service FW
+#define FW_ACCESS_CORE_CPU_CONTROL_SHIFT (1)
+#define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
+#define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
+#define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
+#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
+#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
+
+#define INVALID_VDMA_CHANNEL (0xff)
+
+#if !defined(__cplusplus) && defined(NTDDI_VERSION)
+#include <wdm.h>
+typedef ULONG uint32_t;
+typedef UCHAR uint8_t;
+typedef USHORT uint16_t;
+typedef ULONGLONG uint64_t;
+#endif /* !defined(__cplusplus) && defined(NTDDI_VERSION) */
+
+
+#ifdef _MSC_VER
+
+#include <initguid.h>
+
+#if !defined(bool) && !defined(__cplusplus)
+typedef uint8_t bool;
+#endif // !defined(bool) && !defined(__cplusplus)
+
+#if !defined(INT_MAX)
+#define INT_MAX 0x7FFFFFFF
+#endif // !defined(INT_MAX)
+
+
+// {d88d31f1-fede-4e71-ac2a-6ce0018c1501}
+DEFINE_GUID (GUID_DEVINTERFACE_HailoKM,
+ 0xd88d31f1,0xfede,0x4e71,0xac,0x2a,0x6c,0xe0,0x01,0x8c,0x15,0x01);
+
+#define HAILO_GENERAL_IOCTL_MAGIC 0
+#define HAILO_VDMA_IOCTL_MAGIC 1
+#define HAILO_NON_LINUX_IOCTL_MAGIC 2
+
+#define HAILO_IOCTL_COMPATIBLE CTL_CODE(FILE_DEVICE_UNKNOWN, 0x802, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+
+typedef struct tCompatibleHailoIoctlParam
+{
+ union {
+ struct {
+ ULONG Size : 16;
+ ULONG Code : 8;
+ ULONG Type : 6;
+ ULONG Read : 1;
+ ULONG Write : 1;
+ } bits;
+ ULONG value;
+ } u;
+} tCompatibleHailoIoctlParam;
+
+static ULONG FORCEINLINE _IOC_(ULONG nr, ULONG type, ULONG size, bool read, bool write)
+{
+ struct tCompatibleHailoIoctlParam param;
+ param.u.bits.Code = nr;
+ param.u.bits.Size = size;
+ param.u.bits.Type = type;
+ param.u.bits.Read = read ? 1 : 0;
+ param.u.bits.Write = write ? 1 : 0;
+ return param.u.value;
+}
+
+#define _IOW_(type,nr,size) _IOC_(nr, type, sizeof(size), true, false)
+#define _IOR_(type,nr,size) _IOC_(nr, type, sizeof(size), false, true)
+#define _IOWR_(type,nr,size) _IOC_(nr, type, sizeof(size), true, true)
+#define _IO_(type,nr) _IOC_(nr, type, 0, false, false)
+
+#elif defined(__linux__) // #ifdef _MSC_VER
+#ifndef __KERNEL__
+// include the userspace headers only if this file is included by user space program
+// It is discourged to include them when compiling the driver (https://lwn.net/Articles/113349/)
+#include <stdint.h>
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/kernel.h>
+#endif // ifndef __KERNEL__
+
+#include <linux/ioctl.h>
+
+#define _IOW_ _IOW
+#define _IOR_ _IOR
+#define _IOWR_ _IOWR
+#define _IO_ _IO
+
+#define HAILO_GENERAL_IOCTL_MAGIC 'g'
+#define HAILO_VDMA_IOCTL_MAGIC 'v'
+#define HAILO_NON_LINUX_IOCTL_MAGIC 'w'
+
+#elif defined(__QNX__) // #ifdef _MSC_VER
+#include <devctl.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <stdbool.h>
+
+// defines for devctl
+#define _IOW_ __DIOF
+#define _IOR_ __DIOT
+#define _IOWR_ __DIOTF
+#define _IO_ __DION
+#define HAILO_GENERAL_IOCTL_MAGIC _DCMD_ALL
+#define HAILO_VDMA_IOCTL_MAGIC _DCMD_MISC
+#define HAILO_NON_LINUX_IOCTL_MAGIC _DCMD_PROC
+
+#else // #ifdef _MSC_VER
+#error "unsupported platform!"
+#endif
+
+#pragma pack(push, 1)
+
+struct hailo_channel_interrupt_timestamp {
+ uint64_t timestamp_ns;
+ uint16_t desc_num_processed;
+};
+
+typedef struct {
+ uint16_t is_buffer_in_use;
+ uint16_t buffer_len;
+} hailo_d2h_buffer_details_t;
+
+// This struct is the same as `enum dma_data_direction` (defined in linux/dma-direction)
+enum hailo_dma_data_direction {
+ HAILO_DMA_BIDIRECTIONAL = 0,
+ HAILO_DMA_TO_DEVICE = 1,
+ HAILO_DMA_FROM_DEVICE = 2,
+ HAILO_DMA_NONE = 3,
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_DMA_MAX_ENUM = INT_MAX,
+};
+
+// Enum that determines if buffer should be allocated from user space or from driver
+enum hailo_allocation_mode {
+ HAILO_ALLOCATION_MODE_USERSPACE = 0,
+ HAILO_ALLOCATION_MODE_DRIVER = 1,
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_ALLOCATION_MODE_MAX_ENUM = INT_MAX,
+};
+
+/* structure used in ioctl HAILO_VDMA_BUFFER_MAP */
+struct hailo_vdma_buffer_map_params {
+#if defined(__linux__) || defined(_MSC_VER)
+ void* user_address; // in
+#elif defined(__QNX__)
+ shm_handle_t shared_memory_handle; // in
+#else
+#error "unsupported platform!"
+#endif // __linux__
+ size_t size; // in
+ enum hailo_dma_data_direction data_direction; // in
+ uintptr_t allocated_buffer_handle; // in
+ size_t mapped_handle; // out
+};
+
+/* structure used in ioctl HAILO_VDMA_BUFFER_UNMAP */
+struct hailo_vdma_buffer_unmap_params {
+ size_t mapped_handle;
+};
+
+/* structure used in ioctl HAILO_DESC_LIST_CREATE */
+struct hailo_desc_list_create_params {
+ size_t desc_count; // in
+ uint16_t desc_page_size; // in
+ bool is_circular; // in
+ uintptr_t desc_handle; // out
+ uint64_t dma_address; // out
+};
+
+/* structure used in ioctl HAILO_DESC_LIST_RELEASE */
+struct hailo_desc_list_release_params {
+ uintptr_t desc_handle; // in
+};
+
+/* structure used in ioctl HAILO_NON_LINUX_DESC_LIST_MMAP */
+struct hailo_non_linux_desc_list_mmap_params {
+ uintptr_t desc_handle; // in
+ size_t size; // in
+ void* user_address; // out
+};
+
+/* structure used in ioctl HAILO_DESC_LIST_BIND_VDMA_BUFFER */
+struct hailo_desc_list_bind_vdma_buffer_params {
+ size_t buffer_handle; // in
+ size_t buffer_size; // in
+ size_t buffer_offset; // in
+ uintptr_t desc_handle; // in
+ uint8_t channel_index; // in
+ uint32_t starting_desc; // in
+};
+
+/* structure used in ioctl HAILO_VDMA_INTERRUPTS_ENABLE */
+struct hailo_vdma_interrupts_enable_params {
+ uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
+ bool enable_timestamps_measure; // in
+};
+
+/* structure used in ioctl HAILO_VDMA_INTERRUPTS_DISABLE */
+struct hailo_vdma_interrupts_disable_params {
+ uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
+};
+
+/* structure used in ioctl HAILO_VDMA_INTERRUPTS_WAIT */
+struct hailo_vdma_interrupts_channel_data {
+ uint8_t engine_index;
+ uint8_t channel_index;
+ bool is_active; // If not activate, num_processed is ignored.
+ uint16_t host_num_processed;
+ uint8_t host_error; // Channel errors bits on source side
+ uint8_t device_error; // Channel errors bits on dest side
+ bool validation_success; // If the validation of the channel was successful
+};
+
+struct hailo_vdma_interrupts_wait_params {
+ uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
+ uint8_t channels_count; // out
+ struct hailo_vdma_interrupts_channel_data
+ irq_data[MAX_VDMA_CHANNELS_PER_ENGINE * MAX_VDMA_ENGINES]; // out
+};
+
+/* structure used in ioctl HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS */
+struct hailo_vdma_interrupts_read_timestamp_params {
+ uint8_t engine_index; // in
+ uint8_t channel_index; // in
+ uint32_t timestamps_count; // out
+ struct hailo_channel_interrupt_timestamp timestamps[CHANNEL_IRQ_TIMESTAMPS_SIZE]; // out
+};
+
+/* structure used in ioctl HAILO_FW_CONTROL */
+#define MAX_CONTROL_LENGTH (1500)
+#define PCIE_EXPECTED_MD5_LENGTH (16)
+
+
+/* structure used in ioctl HAILO_FW_CONTROL and HAILO_READ_LOG */
+enum hailo_cpu_id {
+ HAILO_CPU_ID_CPU0 = 0,
+ HAILO_CPU_ID_CPU1,
+ HAILO_CPU_ID_NONE,
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_CPU_MAX_ENUM = INT_MAX,
+};
+
+struct hailo_fw_control {
+ // expected_md5+buffer_len+buffer must be in this order at the start of the struct
+ uint8_t expected_md5[PCIE_EXPECTED_MD5_LENGTH];
+ uint32_t buffer_len;
+ uint8_t buffer[MAX_CONTROL_LENGTH];
+ uint32_t timeout_ms;
+ enum hailo_cpu_id cpu_id;
+};
+
+/* structure used in ioctl HAILO_MEMORY_TRANSFER */
+// Max bar transfer size gotten from ATR0_TABLE_SIZE
+#define MAX_MEMORY_TRANSFER_LENGTH (4096)
+
+enum hailo_transfer_direction {
+ TRANSFER_READ = 0,
+ TRANSFER_WRITE,
+
+ /** Max enum value to maintain ABI Integrity */
+ TRANSFER_MAX_ENUM = INT_MAX,
+};
+
+enum hailo_transfer_memory_type {
+ HAILO_TRANSFER_DEVICE_DIRECT_MEMORY,
+
+ // vDMA memories
+ HAILO_TRANSFER_MEMORY_VDMA0 = 0x100,
+ HAILO_TRANSFER_MEMORY_VDMA1,
+ HAILO_TRANSFER_MEMORY_VDMA2,
+
+ // PCIe driver memories
+ HAILO_TRANSFER_MEMORY_PCIE_BAR0 = 0x200,
+ HAILO_TRANSFER_MEMORY_PCIE_BAR2 = 0x202,
+ HAILO_TRANSFER_MEMORY_PCIE_BAR4 = 0x204,
+
+ // DRAM DMA driver memories
+ HAILO_TRANSFER_MEMORY_DMA_ENGINE0 = 0x300,
+ HAILO_TRANSFER_MEMORY_DMA_ENGINE1,
+ HAILO_TRANSFER_MEMORY_DMA_ENGINE2,
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_TRANSFER_MEMORY_MAX_ENUM = INT_MAX,
+};
+
+struct hailo_memory_transfer_params {
+ enum hailo_transfer_direction transfer_direction; // in
+ enum hailo_transfer_memory_type memory_type; // in
+ uint64_t address; // in
+ size_t count; // in
+ uint8_t buffer[MAX_MEMORY_TRANSFER_LENGTH]; // in/out
+};
+
+/* structure used in ioctl HAILO_VDMA_BUFFER_SYNC */
+enum hailo_vdma_buffer_sync_type {
+ HAILO_SYNC_FOR_CPU,
+ HAILO_SYNC_FOR_DEVICE,
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_SYNC_MAX_ENUM = INT_MAX,
+};
+
+struct hailo_vdma_buffer_sync_params {
+ size_t handle; // in
+ enum hailo_vdma_buffer_sync_type sync_type; // in
+ size_t offset; // in
+ size_t count; // in
+};
+
+/* structure used in ioctl HAILO_READ_NOTIFICATION */
+#define MAX_NOTIFICATION_LENGTH (1500)
+
+struct hailo_d2h_notification {
+ size_t buffer_len; // out
+ uint8_t buffer[MAX_NOTIFICATION_LENGTH]; // out
+};
+
+enum hailo_board_type {
+ HAILO_BOARD_TYPE_HAILO8 = 0,
+ HAILO_BOARD_TYPE_HAILO15,
+ HAILO_BOARD_TYPE_PLUTO,
+ HAILO_BOARD_TYPE_COUNT,
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_BOARD_TYPE_MAX_ENUM = INT_MAX
+};
+
+enum hailo_dma_type {
+ HAILO_DMA_TYPE_PCIE,
+ HAILO_DMA_TYPE_DRAM,
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_DMA_TYPE_MAX_ENUM = INT_MAX,
+};
+
+struct hailo_device_properties {
+ uint16_t desc_max_page_size;
+ enum hailo_board_type board_type;
+ enum hailo_allocation_mode allocation_mode;
+ enum hailo_dma_type dma_type;
+ size_t dma_engines_count;
+ bool is_fw_loaded;
+#ifdef __QNX__
+ pid_t resource_manager_pid;
+#endif // __QNX__
+};
+
+struct hailo_driver_info {
+ uint32_t major_version;
+ uint32_t minor_version;
+ uint32_t revision_version;
+};
+
+/* structure used in ioctl HAILO_READ_LOG */
+#define MAX_FW_LOG_BUFFER_LENGTH (512)
+
+struct hailo_read_log_params {
+ enum hailo_cpu_id cpu_id; // in
+ uint8_t buffer[MAX_FW_LOG_BUFFER_LENGTH]; // out
+ size_t buffer_size; // in
+ size_t read_bytes; // out
+};
+
+/* structure used in ioctl HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC */
+struct hailo_allocate_low_memory_buffer_params {
+ size_t buffer_size; // in
+ uintptr_t buffer_handle; // out
+};
+
+/* structure used in ioctl HAILO_VDMA_LOW_MEMORY_BUFFER_FREE */
+struct hailo_free_low_memory_buffer_params {
+ uintptr_t buffer_handle; // in
+};
+
+struct hailo_mark_as_in_use_params {
+ bool in_use; // out
+};
+
+/* structure used in ioctl HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC */
+struct hailo_allocate_continuous_buffer_params {
+ size_t buffer_size; // in
+ uintptr_t buffer_handle; // out
+ uint64_t dma_address; // out
+};
+
+/* structure used in ioctl HAILO_VDMA_CONTINUOUS_BUFFER_FREE */
+struct hailo_free_continuous_buffer_params {
+ uintptr_t buffer_handle; // in
+};
+
+/* structures used in ioctl HAILO_VDMA_LAUNCH_TRANSFER */
+struct hailo_vdma_transfer_buffer {
+ size_t mapped_buffer_handle; // in
+ uint32_t offset; // in
+ uint32_t size; // in
+};
+
+enum hailo_vdma_interrupts_domain {
+ HAILO_VDMA_INTERRUPTS_DOMAIN_NONE = 0,
+ HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE = (1 << 0),
+ HAILO_VDMA_INTERRUPTS_DOMAIN_HOST = (1 << 1),
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_VDMA_INTERRUPTS_DOMAIN_MAX_ENUM = INT_MAX,
+};
+
+// We allow maximum 2 buffers per transfer since we may have an extra buffer
+// to make sure each buffer is aligned to page size.
+#define HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER (2)
+
+struct hailo_vdma_launch_transfer_params {
+ uint8_t engine_index; // in
+ uint8_t channel_index; // in
+
+ uintptr_t desc_handle; // in
+ uint32_t starting_desc; // in
+
+ bool should_bind; // in, if false, assumes buffer already bound.
+ uint8_t buffers_count; // in
+ struct hailo_vdma_transfer_buffer
+ buffers[HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER]; // in
+
+ enum hailo_vdma_interrupts_domain first_interrupts_domain; // in
+ enum hailo_vdma_interrupts_domain last_interrupts_domain; // in
+
+ bool is_debug; // in, if set program hw to send
+ // more info (e.g desc complete status)
+
+ uint32_t descs_programed; // out, amount of descriptors programed.
+};
+
+#ifdef _MSC_VER
+struct tCompatibleHailoIoctlData
+{
+ tCompatibleHailoIoctlParam Parameters;
+ ULONG_PTR Value;
+ union {
+ struct hailo_memory_transfer_params MemoryTransfer;
+ struct hailo_vdma_interrupts_enable_params VdmaInterruptsEnable;
+ struct hailo_vdma_interrupts_disable_params VdmaInterruptsDisable;
+ struct hailo_vdma_interrupts_read_timestamp_params VdmaInterruptsReadTimestamps;
+ struct hailo_vdma_interrupts_wait_params VdmaInterruptsWait;
+ struct hailo_vdma_buffer_sync_params VdmaBufferSync;
+ struct hailo_fw_control FirmwareControl;
+ struct hailo_vdma_buffer_map_params VdmaBufferMap;
+ struct hailo_vdma_buffer_unmap_params VdmaBufferUnmap;
+ struct hailo_desc_list_create_params DescListCreate;
+ struct hailo_desc_list_release_params DescListReleaseParam;
+ struct hailo_desc_list_bind_vdma_buffer_params DescListBind;
+ struct hailo_d2h_notification D2HNotification;
+ struct hailo_device_properties DeviceProperties;
+ struct hailo_driver_info DriverInfo;
+ struct hailo_non_linux_desc_list_mmap_params DescListMmap;
+ struct hailo_read_log_params ReadLog;
+ struct hailo_mark_as_in_use_params MarkAsInUse;
+ struct hailo_vdma_launch_transfer_params LaunchTransfer;
+ } Buffer;
+};
+#endif // _MSC_VER
+
+#pragma pack(pop)
+
+enum hailo_general_ioctl_code {
+ HAILO_MEMORY_TRANSFER_CODE,
+ HAILO_FW_CONTROL_CODE,
+ HAILO_READ_NOTIFICATION_CODE,
+ HAILO_DISABLE_NOTIFICATION_CODE,
+ HAILO_QUERY_DEVICE_PROPERTIES_CODE,
+ HAILO_QUERY_DRIVER_INFO_CODE,
+ HAILO_READ_LOG_CODE,
+ HAILO_RESET_NN_CORE_CODE,
+
+ // Must be last
+ HAILO_GENERAL_IOCTL_MAX_NR,
+};
+
+#define HAILO_MEMORY_TRANSFER _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_MEMORY_TRANSFER_CODE, struct hailo_memory_transfer_params)
+#define HAILO_FW_CONTROL _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_FW_CONTROL_CODE, struct hailo_fw_control)
+#define HAILO_READ_NOTIFICATION _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_NOTIFICATION_CODE, struct hailo_d2h_notification)
+#define HAILO_DISABLE_NOTIFICATION _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_DISABLE_NOTIFICATION_CODE)
+#define HAILO_QUERY_DEVICE_PROPERTIES _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DEVICE_PROPERTIES_CODE, struct hailo_device_properties)
+#define HAILO_QUERY_DRIVER_INFO _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DRIVER_INFO_CODE, struct hailo_driver_info)
+#define HAILO_READ_LOG _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_LOG_CODE, struct hailo_read_log_params)
+#define HAILO_RESET_NN_CORE _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_RESET_NN_CORE_CODE)
+
+enum hailo_vdma_ioctl_code {
+ HAILO_VDMA_INTERRUPTS_ENABLE_CODE,
+ HAILO_VDMA_INTERRUPTS_DISABLE_CODE,
+ HAILO_VDMA_INTERRUPTS_WAIT_CODE,
+ HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE,
+ HAILO_VDMA_BUFFER_MAP_CODE,
+ HAILO_VDMA_BUFFER_UNMAP_CODE,
+ HAILO_VDMA_BUFFER_SYNC_CODE,
+ HAILO_DESC_LIST_CREATE_CODE,
+ HAILO_DESC_LIST_RELEASE_CODE,
+ HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE,
+ HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE,
+ HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE,
+ HAILO_MARK_AS_IN_USE_CODE,
+ HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE,
+ HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE,
+ HAILO_VDMA_LAUNCH_TRANSFER_CODE,
+
+ // Must be last
+ HAILO_VDMA_IOCTL_MAX_NR,
+};
+
+#define HAILO_VDMA_INTERRUPTS_ENABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_ENABLE_CODE, struct hailo_vdma_interrupts_enable_params)
+#define HAILO_VDMA_INTERRUPTS_DISABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_DISABLE_CODE, struct hailo_vdma_interrupts_disable_params)
+#define HAILO_VDMA_INTERRUPTS_WAIT _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_WAIT_CODE, struct hailo_vdma_interrupts_wait_params)
+#define HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE, struct hailo_vdma_interrupts_read_timestamp_params)
+
+#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
+#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
+#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
+
+#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
+#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, struct hailo_desc_list_release_params)
+#define HAILO_DESC_LIST_BIND_VDMA_BUFFER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE, struct hailo_desc_list_bind_vdma_buffer_params)
+
+#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
+#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE, struct hailo_free_low_memory_buffer_params)
+
+#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
+
+#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
+#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE, struct hailo_free_continuous_buffer_params)
+
+#define HAILO_VDMA_LAUNCH_TRANSFER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LAUNCH_TRANSFER_CODE, struct hailo_vdma_launch_transfer_params)
+
+
+enum hailo_non_linux_ioctl_code {
+ HAILO_NON_LINUX_DESC_LIST_MMAP_CODE,
+
+ // Must be last
+ HAILO_NON_LINUX_IOCTL_MAX_NR,
+};
+
+#define HAILO_NON_LINUX_DESC_LIST_MMAP _IOWR_(HAILO_NON_LINUX_IOCTL_MAGIC, HAILO_NON_LINUX_DESC_LIST_MMAP_CODE, struct hailo_non_linux_desc_list_mmap_params)
+
+
+#endif /* _HAILO_IOCTL_COMMON_H_ */
--- /dev/null
+++ b/drivers/media/pci/hailo/common/hailo_pcie_version.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_COMMON_PCIE_VERSION_H_
+#define _HAILO_COMMON_PCIE_VERSION_H_
+
+#define HAILO_DRV_VER_MAJOR 4
+#define HAILO_DRV_VER_MINOR 17
+#define HAILO_DRV_VER_REVISION 0
+
+#endif /* _HAILO_COMMON_PCIE_VERSION_H_ */
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/common/hailo_resource.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include "hailo_resource.h"
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+
+u8 hailo_resource_read8(struct hailo_resource *resource, size_t offset)
+{
+ return ioread8((u8*)resource->address + offset);
+}
+
+u16 hailo_resource_read16(struct hailo_resource *resource, size_t offset)
+{
+ return ioread16((u8*)resource->address + offset);
+}
+
+u32 hailo_resource_read32(struct hailo_resource *resource, size_t offset)
+{
+ return ioread32((u8*)resource->address + offset);
+}
+
+void hailo_resource_write8(struct hailo_resource *resource, size_t offset, u8 value)
+{
+ iowrite8(value, (u8*)resource->address + offset);
+}
+
+void hailo_resource_write16(struct hailo_resource *resource, size_t offset, u16 value)
+{
+ iowrite16(value, (u8*)resource->address + offset);
+}
+
+void hailo_resource_write32(struct hailo_resource *resource, size_t offset, u32 value)
+{
+ iowrite32(value, (u8*)resource->address + offset);
+}
+
+void hailo_resource_read_buffer(struct hailo_resource *resource, size_t offset, size_t count, void *to)
+{
+ // Copied and modified from linux aarch64 (using ioread32 instead of readq that does not work all the time)
+ uintptr_t to_ptr = (uintptr_t)to;
+ while ((count > 0) && (!IS_ALIGNED(to_ptr, 4) || !IS_ALIGNED((uintptr_t)resource->address + offset, 4))) {
+ *(u8*)to_ptr = hailo_resource_read8(resource, offset);
+ to_ptr++;
+ offset++;
+ count--;
+ }
+
+ while (count >= 4) {
+ *(u32*)to_ptr = hailo_resource_read32(resource, offset);
+ to_ptr += 4;
+ offset += 4;
+ count -= 4;
+ }
+
+ while (count > 0) {
+ *(u8*)to_ptr = hailo_resource_read8(resource, offset);
+ to_ptr++;
+ offset++;
+ count--;
+ }
+}
+
+int hailo_resource_write_buffer(struct hailo_resource *resource, size_t offset, size_t count, const void *from)
+{
+ // read the bytes after writing them for flushing the data. This function also checks if the pcie link
+ // is broken.
+ uintptr_t from_ptr = (uintptr_t)from;
+ while (count && (!IS_ALIGNED(resource->address + offset, 4) || !IS_ALIGNED(from_ptr, 4))) {
+ hailo_resource_write8(resource, offset, *(u8*)from_ptr);
+ if (hailo_resource_read8(resource, offset) != *(u8*)from_ptr) {
+ return -EIO;
+ }
+ from_ptr++;
+ offset++;
+ count--;
+ }
+
+ while (count >= 4) {
+ hailo_resource_write32(resource, offset, *(u32*)from_ptr);
+ if (hailo_resource_read32(resource, offset) != *(u32*)from_ptr) {
+ return -EIO;
+ }
+ from_ptr += 4;
+ offset += 4;
+ count -= 4;
+ }
+
+ while (count) {
+ hailo_resource_write8(resource, offset, *(u8*)from_ptr);
+ if (hailo_resource_read8(resource, offset) != *(u8*)from_ptr) {
+ return -EIO;
+ }
+ from_ptr++;
+ offset++;
+ count--;
+ }
+
+ return 0;
+}
+
+int hailo_resource_transfer(struct hailo_resource *resource, struct hailo_memory_transfer_params *transfer)
+{
+ // Check for transfer size (address is in resources address-space)
+ if ((transfer->address + transfer->count) > (u64)resource->size) {
+ return -EINVAL;
+ }
+
+ if (transfer->count > ARRAY_SIZE(transfer->buffer)) {
+ return -EINVAL;
+ }
+
+ switch (transfer->transfer_direction) {
+ case TRANSFER_READ:
+ hailo_resource_read_buffer(resource, (u32)transfer->address, transfer->count, transfer->buffer);
+ return 0;
+ case TRANSFER_WRITE:
+ return hailo_resource_write_buffer(resource, (u32)transfer->address, transfer->count, transfer->buffer);
+ default:
+ return -EINVAL;
+ }
+}
--- /dev/null
+++ b/drivers/media/pci/hailo/common/hailo_resource.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_COMMON_HAILO_RESOURCE_H_
+#define _HAILO_COMMON_HAILO_RESOURCE_H_
+
+#include "hailo_ioctl_common.h"
+#include <linux/types.h>
+
+struct hailo_resource {
+ uintptr_t address;
+ size_t size;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Implemented by the specific platform
+u32 hailo_resource_read32(struct hailo_resource *resource, size_t offset);
+u16 hailo_resource_read16(struct hailo_resource *resource, size_t offset);
+u8 hailo_resource_read8(struct hailo_resource *resource, size_t offset);
+void hailo_resource_write32(struct hailo_resource *resource, size_t offset, u32 value);
+void hailo_resource_write16(struct hailo_resource *resource, size_t offset, u16 value);
+void hailo_resource_write8(struct hailo_resource *resource, size_t offset, u8 value);
+
+void hailo_resource_read_buffer(struct hailo_resource *resource, size_t offset, size_t count, void *to);
+int hailo_resource_write_buffer(struct hailo_resource *resource, size_t offset, size_t count, const void *from);
+
+// Transfer (read/write) the given resource into/from transfer params.
+int hailo_resource_transfer(struct hailo_resource *resource, struct hailo_memory_transfer_params *transfer);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HAILO_COMMON_HAILO_RESOURCE_H_ */
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/common/pcie_common.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include "pcie_common.h"
+#include "fw_operation.h"
+
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+
+
+#define BSC_IMASK_HOST (0x0188)
+#define BCS_ISTATUS_HOST (0x018C)
+#define BCS_SOURCE_INTERRUPT_PER_CHANNEL (0x400)
+#define BCS_DESTINATION_INTERRUPT_PER_CHANNEL (0x500)
+
+#define PO2_ROUND_UP(size, alignment) ((size + alignment-1) & ~(alignment-1))
+
+#define ATR0_PARAM (0x17)
+#define ATR0_SRC_ADDR (0x0)
+#define ATR0_TRSL_ADDR2 (0x0)
+#define ATR0_TRSL_PARAM (6)
+
+#define ATR0_PCIE_BRIDGE_OFFSET (0x700)
+#define ATR0_TABLE_SIZE (0x1000u)
+#define ATR0_TABLE_SIZE_MASK (0x1000u - 1)
+
+#define MAXIMUM_APP_FIRMWARE_CODE_SIZE (0x40000)
+#define MAXIMUM_CORE_FIRMWARE_CODE_SIZE (0x20000)
+
+#define FIRMWARE_LOAD_WAIT_MAX_RETRIES (100)
+#define FIRMWARE_LOAD_SLEEP_MS (50)
+
+#define PCIE_APP_CPU_DEBUG_OFFSET (8*1024)
+#define PCIE_CORE_CPU_DEBUG_OFFSET (PCIE_APP_CPU_DEBUG_OFFSET + DEBUG_BUFFER_TOTAL_SIZE)
+
+#define PCIE_D2H_NOTIFICATION_SRAM_OFFSET (0x640 + 0x640)
+#define PCIE_REQUEST_SIZE_OFFSET (0x640)
+
+#define PCIE_CONFIG_VENDOR_OFFSET (0x0098)
+
+#define HAILO_PCIE_HOST_DMA_DATA_ID (0)
+#define HAILO_PCIE_DMA_DEVICE_INTERRUPTS_BITMASK (1 << 4)
+#define HAILO_PCIE_DMA_HOST_INTERRUPTS_BITMASK (1 << 5)
+
+typedef u32 hailo_ptr_t;
+
+struct hailo_fw_addresses {
+ u32 boot_fw_header;
+ u32 app_fw_code_ram_base;
+ u32 boot_key_cert;
+ u32 boot_cont_cert;
+ u32 boot_fw_trigger;
+ u32 core_code_ram_base;
+ u32 core_fw_header;
+ u32 atr0_trsl_addr1;
+ u32 raise_ready_offset;
+};
+
+struct hailo_atr_config {
+ u32 atr_param;
+ u32 atr_src;
+ u32 atr_trsl_addr_1;
+ u32 atr_trsl_addr_2;
+ u32 atr_trsl_param;
+};
+
+struct hailo_board_compatibility {
+ struct hailo_fw_addresses fw_addresses;
+ const char *fw_filename;
+ const struct hailo_config_constants board_cfg;
+ const struct hailo_config_constants fw_cfg;
+};
+
+static const struct hailo_board_compatibility compat[HAILO_BOARD_TYPE_COUNT] = {
+ [HAILO_BOARD_TYPE_HAILO8] = {
+ .fw_addresses = {
+ .boot_fw_header = 0xE0030,
+ .boot_fw_trigger = 0xE0980,
+ .boot_key_cert = 0xE0048,
+ .boot_cont_cert = 0xE0390,
+ .app_fw_code_ram_base = 0x60000,
+ .core_code_ram_base = 0xC0000,
+ .core_fw_header = 0xA0000,
+ .atr0_trsl_addr1 = 0x60000000,
+ .raise_ready_offset = 0x1684,
+ },
+ .fw_filename = "hailo/hailo8_fw.bin",
+ .board_cfg = {
+ .filename = "hailo/hailo8_board_cfg.bin",
+ .address = 0x60001000,
+ .max_size = PCIE_HAILO8_BOARD_CFG_MAX_SIZE,
+ },
+ .fw_cfg = {
+ .filename = "hailo/hailo8_fw_cfg.bin",
+ .address = 0x60001500,
+ .max_size = PCIE_HAILO8_FW_CFG_MAX_SIZE,
+ },
+ },
+ [HAILO_BOARD_TYPE_HAILO15] = {
+ .fw_addresses = {
+ .boot_fw_header = 0x88000,
+ .boot_fw_trigger = 0x88c98,
+ .boot_key_cert = 0x88018,
+ .boot_cont_cert = 0x886a8,
+ .app_fw_code_ram_base = 0x20000,
+ .core_code_ram_base = 0x60000,
+ .core_fw_header = 0xC0000,
+ .atr0_trsl_addr1 = 0x000BE000,
+ .raise_ready_offset = 0x1754,
+ },
+ .fw_filename = "hailo/hailo15_fw.bin",
+ .board_cfg = {
+ .filename = NULL,
+ .address = 0,
+ .max_size = 0,
+ },
+ .fw_cfg = {
+ .filename = NULL,
+ .address = 0,
+ .max_size = 0,
+ },
+ },
+ // HRT-11344 : none of these matter except raise_ready_offset seeing as we load fw seperately - not through driver
+ // After implementing bootloader put correct values here
+ [HAILO_BOARD_TYPE_PLUTO] = {
+ .fw_addresses = {
+ .boot_fw_header = 0x88000,
+ .boot_fw_trigger = 0x88c98,
+ .boot_key_cert = 0x88018,
+ .boot_cont_cert = 0x886a8,
+ .app_fw_code_ram_base = 0x20000,
+ .core_code_ram_base = 0x60000,
+ .core_fw_header = 0xC0000,
+ .atr0_trsl_addr1 = 0x000BE000,
+ // NOTE: After they update hw consts - check register fw_access_interrupt_w1s of pcie_config
+ .raise_ready_offset = 0x174c,
+ },
+ .fw_filename = "hailo/pluto_fw.bin",
+ .board_cfg = {
+ .filename = NULL,
+ .address = 0,
+ .max_size = 0,
+ },
+ .fw_cfg = {
+ .filename = NULL,
+ .address = 0,
+ .max_size = 0,
+ },
+ }
+};
+
+
+bool hailo_pcie_read_interrupt(struct hailo_pcie_resources *resources, struct hailo_pcie_interrupt_source *source)
+{
+ u32 channel_data_source = 0;
+ u32 channel_data_dest = 0;
+ memset(source, 0, sizeof(*source));
+
+ source->interrupt_bitmask = hailo_resource_read32(&resources->config, BCS_ISTATUS_HOST);
+ if (0 == source->interrupt_bitmask) {
+ return false;
+ }
+
+ // clear signal
+ hailo_resource_write32(&resources->config, BCS_ISTATUS_HOST, source->interrupt_bitmask);
+
+ if (source->interrupt_bitmask & BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK) {
+ channel_data_source = hailo_resource_read32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL);
+ hailo_resource_write32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL, channel_data_source);
+ }
+ if (source->interrupt_bitmask & BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK) {
+ channel_data_dest = hailo_resource_read32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL);
+ hailo_resource_write32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL, channel_data_dest);
+ }
+ source->vdma_channels_bitmap = channel_data_source | channel_data_dest;
+
+ return true;
+}
+
+int hailo_pcie_write_firmware_control(struct hailo_pcie_resources *resources, const struct hailo_fw_control *command)
+{
+ int err = 0;
+ u32 request_size = 0;
+ u8 fw_access_value = FW_ACCESS_APP_CPU_CONTROL_MASK;
+ const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
+
+ if (!hailo_pcie_is_firmware_loaded(resources)) {
+ return -ENODEV;
+ }
+
+ // Copy md5 + buffer_len + buffer
+ request_size = sizeof(command->expected_md5) + sizeof(command->buffer_len) + command->buffer_len;
+ err = hailo_resource_write_buffer(&resources->fw_access, 0, PO2_ROUND_UP(request_size, FW_CODE_SECTION_ALIGNMENT),
+ command);
+ if (err < 0) {
+ return err;
+ }
+
+ // Raise the bit for the CPU that will handle the control
+ fw_access_value = (command->cpu_id == HAILO_CPU_ID_CPU1) ? FW_ACCESS_CORE_CPU_CONTROL_MASK :
+ FW_ACCESS_APP_CPU_CONTROL_MASK;
+
+ // Raise ready flag to FW
+ hailo_resource_write32(&resources->fw_access, fw_addresses->raise_ready_offset, (u32)fw_access_value);
+ return 0;
+}
+
+int hailo_pcie_read_firmware_control(struct hailo_pcie_resources *resources, struct hailo_fw_control *command)
+{
+ u32 response_header_size = 0;
+
+ // Copy response md5 + buffer_len
+ response_header_size = sizeof(command->expected_md5) + sizeof(command->buffer_len);
+
+ hailo_resource_read_buffer(&resources->fw_access, PCIE_REQUEST_SIZE_OFFSET, response_header_size, command);
+
+ if (sizeof(command->buffer) < command->buffer_len) {
+ return -EINVAL;
+ }
+
+ // Copy response buffer
+ hailo_resource_read_buffer(&resources->fw_access, PCIE_REQUEST_SIZE_OFFSET + (size_t)response_header_size,
+ command->buffer_len, &command->buffer);
+
+ return 0;
+}
+
+void hailo_pcie_write_firmware_driver_shutdown(struct hailo_pcie_resources *resources)
+{
+ const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
+ const u32 fw_access_value = FW_ACCESS_DRIVER_SHUTDOWN_MASK;
+
+ // Write shutdown flag to FW
+ hailo_resource_write32(&resources->fw_access, fw_addresses->raise_ready_offset, fw_access_value);
+}
+
+int hailo_pcie_read_firmware_notification(struct hailo_pcie_resources *resources,
+ struct hailo_d2h_notification *notification)
+{
+ struct hailo_resource notification_resource;
+
+ if (PCIE_D2H_NOTIFICATION_SRAM_OFFSET > resources->fw_access.size) {
+ return -EINVAL;
+ }
+
+ notification_resource.address = resources->fw_access.address + PCIE_D2H_NOTIFICATION_SRAM_OFFSET,
+ notification_resource.size = sizeof(struct hailo_d2h_notification);
+
+ return hailo_read_firmware_notification(&notification_resource, notification);
+}
+
+static void write_atr_table(struct hailo_pcie_resources *resources,
+ struct hailo_atr_config *atr)
+{
+ hailo_resource_write_buffer(&resources->config, ATR0_PCIE_BRIDGE_OFFSET,
+ sizeof(*atr), (void*)atr);
+}
+
+static void read_atr_table(struct hailo_pcie_resources *resources,
+ struct hailo_atr_config *atr)
+{
+ hailo_resource_read_buffer(&resources->config, ATR0_PCIE_BRIDGE_OFFSET,
+ sizeof(*atr), (void*)atr);
+}
+
+static void configure_atr_table(struct hailo_pcie_resources *resources,
+ hailo_ptr_t base_address)
+{
+ struct hailo_atr_config atr = {
+ .atr_param = ATR0_PARAM,
+ .atr_src = ATR0_SRC_ADDR,
+ .atr_trsl_addr_1 = (u32)base_address,
+ .atr_trsl_addr_2 = ATR0_TRSL_ADDR2,
+ .atr_trsl_param = ATR0_TRSL_PARAM
+ };
+ write_atr_table(resources, &atr);
+}
+
+static void write_memory_chunk(struct hailo_pcie_resources *resources,
+ hailo_ptr_t dest, u32 dest_offset, const void *src, u32 len)
+{
+ BUG_ON(dest_offset + len > (u32)resources->fw_access.size);
+
+ configure_atr_table(resources, dest);
+ (void)hailo_resource_write_buffer(&resources->fw_access, dest_offset, len, src);
+}
+
+static void read_memory_chunk(
+ struct hailo_pcie_resources *resources, hailo_ptr_t src, u32 src_offset, void *dest, u32 len)
+{
+ BUG_ON(src_offset + len > (u32)resources->fw_access.size);
+
+ configure_atr_table(resources, src);
+ (void)hailo_resource_read_buffer(&resources->fw_access, src_offset, len, dest);
+}
+
+// Note: this function modify the device ATR table (that is also used by the firmware for control and vdma).
+// Use with caution, and restore the original atr if needed.
+static void write_memory(struct hailo_pcie_resources *resources, hailo_ptr_t dest, const void *src, u32 len)
+{
+ hailo_ptr_t base_address = dest & ~ATR0_TABLE_SIZE_MASK;
+ u32 chunk_len = 0;
+ u32 offset = 0;
+
+ if (base_address != dest) {
+ // Data is not aligned, write the first chunk
+ chunk_len = min(base_address + ATR0_TABLE_SIZE - dest, len);
+ write_memory_chunk(resources, base_address, dest - base_address, src, chunk_len);
+ offset += chunk_len;
+ }
+
+ while (offset < len) {
+ chunk_len = min(len - offset, ATR0_TABLE_SIZE);
+ write_memory_chunk(resources, dest + offset, 0, (const u8*)src + offset, chunk_len);
+ offset += chunk_len;
+ }
+}
+
+// Note: this function modify the device ATR table (that is also used by the firmware for control and vdma).
+// Use with caution, and restore the original atr if needed.
+static void read_memory(struct hailo_pcie_resources *resources, hailo_ptr_t src, void *dest, u32 len)
+{
+ hailo_ptr_t base_address = src & ~ATR0_TABLE_SIZE_MASK;
+ u32 chunk_len = 0;
+ u32 offset = 0;
+
+ if (base_address != src) {
+ // Data is not aligned, write the first chunk
+ chunk_len = min(base_address + ATR0_TABLE_SIZE - src, len);
+ read_memory_chunk(resources, base_address, src - base_address, dest, chunk_len);
+ offset += chunk_len;
+ }
+
+ while (offset < len) {
+ chunk_len = min(len - offset, ATR0_TABLE_SIZE);
+ read_memory_chunk(resources, src + offset, 0, (u8*)dest + offset, chunk_len);
+ offset += chunk_len;
+ }
+}
+
+static void hailo_write_app_firmware(struct hailo_pcie_resources *resources, firmware_header_t *fw_header,
+ secure_boot_certificate_t *fw_cert)
+{
+ const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
+ void *fw_code = (void*)((u8*)fw_header + sizeof(firmware_header_t));
+ void *key_data = &fw_cert->certificates_data[0];
+ void *content_data = &fw_cert->certificates_data[fw_cert->key_size];
+
+ write_memory(resources, fw_addresses->boot_fw_header, fw_header, sizeof(firmware_header_t));
+
+ write_memory(resources, fw_addresses->app_fw_code_ram_base, fw_code, fw_header->code_size);
+
+ write_memory(resources, fw_addresses->boot_key_cert, key_data, fw_cert->key_size);
+ write_memory(resources, fw_addresses->boot_cont_cert, content_data, fw_cert->content_size);
+}
+
+static void hailo_write_core_firmware(struct hailo_pcie_resources *resources, firmware_header_t *fw_header)
+{
+ const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
+ void *fw_code = (void*)((u8*)fw_header + sizeof(firmware_header_t));
+
+ write_memory(resources, fw_addresses->core_code_ram_base, fw_code, fw_header->code_size);
+ write_memory(resources, fw_addresses->core_fw_header, fw_header, sizeof(firmware_header_t));
+}
+
+static void hailo_trigger_firmware_boot(struct hailo_pcie_resources *resources)
+{
+ const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
+ u32 pcie_finished = 1;
+
+ write_memory(resources, fw_addresses->boot_fw_trigger,
+ (void*)&pcie_finished, sizeof(pcie_finished));
+}
+
+/**
+* Validates the FW headers.
+* @param[in] address Address of the firmware.
+* @param[in] firmware_size Size of the firmware.
+* @param[out] out_app_firmware_header (optional) App firmware header
+* @param[out] out_core_firmware_header (optional) Core firmware header
+* @param[out] out_firmware_cert (optional) Firmware certificate header
+*/
+static int FW_VALIDATION__validate_fw_headers(uintptr_t firmware_base_address, size_t firmware_size,
+ firmware_header_t **out_app_firmware_header, firmware_header_t **out_core_firmware_header,
+ secure_boot_certificate_t **out_firmware_cert, enum hailo_board_type board_type)
+{
+ firmware_header_t *app_firmware_header = NULL;
+ firmware_header_t *core_firmware_header = NULL;
+ secure_boot_certificate_t *firmware_cert = NULL;
+ int err = -EINVAL;
+ u32 consumed_firmware_offset = 0;
+
+ err = FW_VALIDATION__validate_fw_header(firmware_base_address, firmware_size, MAXIMUM_APP_FIRMWARE_CODE_SIZE,
+ &consumed_firmware_offset, &app_firmware_header, board_type);
+ if (0 != err) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = FW_VALIDATION__validate_cert_header(firmware_base_address, firmware_size,
+ &consumed_firmware_offset, &firmware_cert);
+ if (0 != err) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = FW_VALIDATION__validate_fw_header(firmware_base_address, firmware_size, MAXIMUM_CORE_FIRMWARE_CODE_SIZE,
+ &consumed_firmware_offset, &core_firmware_header, board_type);
+ if (0 != err) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (consumed_firmware_offset != firmware_size) {
+ /* it is an error if there is leftover data after the last firmware header */
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* the out params are all optional */
+ if (NULL != out_app_firmware_header) {
+ *out_app_firmware_header = app_firmware_header;
+ }
+ if (NULL != out_firmware_cert) {
+ *out_firmware_cert = firmware_cert;
+ }
+ if (NULL != out_core_firmware_header) {
+ *out_core_firmware_header = core_firmware_header;
+ }
+ err = 0;
+
+exit:
+ return err;
+}
+
+int hailo_pcie_write_firmware(struct hailo_pcie_resources *resources, const void *fw_data, size_t fw_size)
+{
+ firmware_header_t *app_firmware_header = NULL;
+ secure_boot_certificate_t *firmware_cert = NULL;
+ firmware_header_t *core_firmware_header = NULL;
+
+ int err = FW_VALIDATION__validate_fw_headers((uintptr_t)fw_data, fw_size,
+ &app_firmware_header, &core_firmware_header, &firmware_cert, resources->board_type);
+ if (err < 0) {
+ return err;
+ }
+
+ hailo_write_app_firmware(resources, app_firmware_header, firmware_cert);
+ hailo_write_core_firmware(resources, core_firmware_header);
+
+ hailo_trigger_firmware_boot(resources);
+
+ return 0;
+}
+
+bool hailo_pcie_is_firmware_loaded(struct hailo_pcie_resources *resources)
+{
+ u32 offset = ATR0_PCIE_BRIDGE_OFFSET + offsetof(struct hailo_atr_config, atr_trsl_addr_1);
+ u32 atr_value = hailo_resource_read32(&resources->config, offset);
+ return atr_value == compat[resources->board_type].fw_addresses.atr0_trsl_addr1;
+}
+
+bool hailo_pcie_wait_for_firmware(struct hailo_pcie_resources *resources)
+{
+ size_t retries;
+ for (retries = 0; retries < FIRMWARE_LOAD_WAIT_MAX_RETRIES; retries++) {
+ if (hailo_pcie_is_firmware_loaded(resources)) {
+ return true;
+ }
+
+ msleep(FIRMWARE_LOAD_SLEEP_MS);
+ }
+
+ return false;
+}
+
+int hailo_pcie_write_config_common(struct hailo_pcie_resources *resources, const void* config_data,
+ const size_t config_size, const struct hailo_config_constants *config_consts)
+{
+ if (config_size > config_consts->max_size) {
+ return -EINVAL;
+ }
+
+ write_memory(resources, config_consts->address, config_data, (u32)config_size);
+ return 0;
+}
+
+const struct hailo_config_constants* hailo_pcie_get_board_config_constants(const enum hailo_board_type board_type) {
+ BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
+ return &compat[board_type].board_cfg;
+}
+
+const struct hailo_config_constants* hailo_pcie_get_user_config_constants(const enum hailo_board_type board_type) {
+ BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
+ return &compat[board_type].fw_cfg;
+}
+
+const char* hailo_pcie_get_fw_filename(const enum hailo_board_type board_type) {
+ BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
+ return compat[board_type].fw_filename;
+}
+
+void hailo_pcie_update_channel_interrupts_mask(struct hailo_pcie_resources* resources, u32 channels_bitmap)
+{
+ size_t i = 0;
+ u32 mask = hailo_resource_read32(&resources->config, BSC_IMASK_HOST);
+
+ // Clear old channel interrupts
+ mask &= ~BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK;
+ mask &= ~BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK;
+ // Set interrupt by the bitmap
+ for (i = 0; i < MAX_VDMA_CHANNELS_PER_ENGINE; ++i) {
+ if (hailo_test_bit(i, &channels_bitmap)) {
+ // based on 18.5.2 "vDMA Interrupt Registers" in PLDA documentation
+ u32 offset = (i < VDMA_DEST_CHANNELS_START) ? 0 : 8;
+ hailo_set_bit((((int)i*8) / MAX_VDMA_CHANNELS_PER_ENGINE) + offset, &mask);
+ }
+ }
+ hailo_resource_write32(&resources->config, BSC_IMASK_HOST, mask);
+}
+
+void hailo_pcie_enable_interrupts(struct hailo_pcie_resources *resources)
+{
+ u32 mask = hailo_resource_read32(&resources->config, BSC_IMASK_HOST);
+
+ hailo_resource_write32(&resources->config, BCS_ISTATUS_HOST, 0xFFFFFFFF);
+ hailo_resource_write32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL, 0xFFFFFFFF);
+ hailo_resource_write32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL, 0xFFFFFFFF);
+
+ mask |= BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK | BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION | BCS_ISTATUS_HOST_DRIVER_DOWN;
+ hailo_resource_write32(&resources->config, BSC_IMASK_HOST, mask);
+}
+
+void hailo_pcie_disable_interrupts(struct hailo_pcie_resources* resources)
+{
+ hailo_resource_write32(&resources->config, BSC_IMASK_HOST, 0);
+}
+
+long hailo_pcie_read_firmware_log(struct hailo_pcie_resources *resources, struct hailo_read_log_params *params)
+{
+ long err = 0;
+ struct hailo_resource log_resource = {resources->fw_access.address, DEBUG_BUFFER_TOTAL_SIZE};
+
+ if (HAILO_CPU_ID_CPU0 == params->cpu_id) {
+ log_resource.address += PCIE_APP_CPU_DEBUG_OFFSET;
+ } else if (HAILO_CPU_ID_CPU1 == params->cpu_id) {
+ log_resource.address += PCIE_CORE_CPU_DEBUG_OFFSET;
+ } else {
+ return -EINVAL;
+ }
+
+ if (0 == params->buffer_size) {
+ params->read_bytes = 0;
+ return 0;
+ }
+
+ err = hailo_read_firmware_log(&log_resource, params);
+ if (0 != err) {
+ return err;
+ }
+
+ return 0;
+}
+
+static int direct_memory_transfer(struct hailo_pcie_resources *resources,
+ struct hailo_memory_transfer_params *params)
+{
+ int err = -EINVAL;
+ struct hailo_atr_config previous_atr = {0};
+
+ if (params->address > U32_MAX) {
+ return -EFAULT;
+ }
+
+ // Store previous ATR (Read/write modify the ATR).
+ read_atr_table(resources, &previous_atr);
+
+ switch (params->transfer_direction) {
+ case TRANSFER_READ:
+ read_memory(resources, (u32)params->address, params->buffer, (u32)params->count);
+ break;
+ case TRANSFER_WRITE:
+ write_memory(resources, (u32)params->address, params->buffer, (u32)params->count);
+ break;
+ default:
+ err = -EINVAL;
+ goto restore_atr;
+ }
+
+ err = 0;
+restore_atr:
+ write_atr_table(resources, &previous_atr);
+ return err;
+}
+
+int hailo_pcie_memory_transfer(struct hailo_pcie_resources *resources, struct hailo_memory_transfer_params *params)
+{
+ if (params->count > ARRAY_SIZE(params->buffer)) {
+ return -EINVAL;
+ }
+
+ switch (params->memory_type) {
+ case HAILO_TRANSFER_DEVICE_DIRECT_MEMORY:
+ return direct_memory_transfer(resources, params);
+ case HAILO_TRANSFER_MEMORY_PCIE_BAR0:
+ return hailo_resource_transfer(&resources->config, params);
+ case HAILO_TRANSFER_MEMORY_PCIE_BAR2:
+ case HAILO_TRANSFER_MEMORY_VDMA0:
+ return hailo_resource_transfer(&resources->vdma_registers, params);
+ case HAILO_TRANSFER_MEMORY_PCIE_BAR4:
+ return hailo_resource_transfer(&resources->fw_access, params);
+ default:
+ return -EINVAL;
+ }
+}
+
+bool hailo_pcie_is_device_connected(struct hailo_pcie_resources *resources)
+{
+ return PCI_VENDOR_ID_HAILO == hailo_resource_read16(&resources->config, PCIE_CONFIG_VENDOR_OFFSET);
+}
+
+// On PCIe, just return the address
+static u64 encode_dma_address(dma_addr_t dma_address, u8 channel_id)
+{
+ (void)channel_id;
+ return (u64)dma_address;
+}
+
+struct hailo_vdma_hw hailo_pcie_vdma_hw = {
+ .hw_ops = {
+ .encode_desc_dma_address = encode_dma_address
+ },
+ .ddr_data_id = HAILO_PCIE_HOST_DMA_DATA_ID,
+ .device_interrupts_bitmask = HAILO_PCIE_DMA_DEVICE_INTERRUPTS_BITMASK,
+ .host_interrupts_bitmask = HAILO_PCIE_DMA_HOST_INTERRUPTS_BITMASK,
+
+};
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/common/pcie_common.h
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_COMMON_PCIE_COMMON_H_
+#define _HAILO_COMMON_PCIE_COMMON_H_
+
+#include "hailo_resource.h"
+#include "hailo_ioctl_common.h"
+#include "fw_validation.h"
+#include "fw_operation.h"
+#include "utils.h"
+#include "vdma_common.h"
+
+#include <linux/types.h>
+
+
+#define BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK (0x04000000)
+#define BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION (0x02000000)
+#define BCS_ISTATUS_HOST_DRIVER_DOWN (0x08000000)
+#define BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK (0x000000FF)
+#define BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK (0x0000FF00)
+
+#define PCIE_HAILO8_BOARD_CFG_MAX_SIZE (0x500)
+#define PCIE_HAILO8_FW_CFG_MAX_SIZE (0x500)
+
+#define FW_CODE_SECTION_ALIGNMENT (4)
+
+#define HAILO_PCIE_CONFIG_BAR (0)
+#define HAILO_PCIE_VDMA_REGS_BAR (2)
+#define HAILO_PCIE_FW_ACCESS_BAR (4)
+
+#define HAILO_PCIE_DMA_ENGINES_COUNT (1)
+
+#define DRIVER_NAME "hailo"
+
+#define PCI_VENDOR_ID_HAILO 0x1e60
+#define PCI_DEVICE_ID_HAILO_HAILO8 0x2864
+#define PCI_DEVICE_ID_HAILO_HAILO15 0x45C4
+#define PCI_DEVICE_ID_HAILO_PLUTO 0x43a2
+
+struct hailo_pcie_resources {
+ struct hailo_resource config; // BAR0
+ struct hailo_resource vdma_registers; // BAR2
+ struct hailo_resource fw_access; // BAR4
+ enum hailo_board_type board_type;
+};
+
+enum hailo_pcie_interrupt_masks {
+ FW_CONTROL = BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK,
+ FW_NOTIFICATION = BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION,
+ DRIVER_DOWN = BCS_ISTATUS_HOST_DRIVER_DOWN,
+ VDMA_SRC_IRQ_MASK = BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK,
+ VDMA_DEST_IRQ_MASK = BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK
+};
+
+struct hailo_pcie_interrupt_source {
+ u32 interrupt_bitmask;
+ u32 vdma_channels_bitmap;
+};
+
+struct hailo_config_constants {
+ const char *filename;
+ u32 address;
+ size_t max_size;
+};
+
+// TODO: HRT-6144 - Align Windows/Linux to QNX
+#ifdef __QNX__
+enum hailo_bar_index {
+ BAR0 = 0,
+ BAR2,
+ BAR4,
+ MAX_BAR
+};
+#else
+enum hailo_bar_index {
+ BAR0 = 0,
+ BAR1,
+ BAR2,
+ BAR3,
+ BAR4,
+ BAR5,
+ MAX_BAR
+};
+#endif // ifdef (__QNX__)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern struct hailo_vdma_hw hailo_pcie_vdma_hw;
+
+// Reads the interrupt source from BARs, return false if there is no interrupt.
+// note - this function clears the interrupt signals.
+bool hailo_pcie_read_interrupt(struct hailo_pcie_resources *resources, struct hailo_pcie_interrupt_source *source);
+void hailo_pcie_update_channel_interrupts_mask(struct hailo_pcie_resources *resources, u32 channels_bitmap);
+void hailo_pcie_enable_interrupts(struct hailo_pcie_resources *resources);
+void hailo_pcie_disable_interrupts(struct hailo_pcie_resources *resources);
+
+int hailo_pcie_write_firmware_control(struct hailo_pcie_resources *resources, const struct hailo_fw_control *command);
+int hailo_pcie_read_firmware_control(struct hailo_pcie_resources *resources, struct hailo_fw_control *command);
+
+int hailo_pcie_write_firmware(struct hailo_pcie_resources *resources, const void *fw_data, size_t fw_size);
+bool hailo_pcie_is_firmware_loaded(struct hailo_pcie_resources *resources);
+bool hailo_pcie_wait_for_firmware(struct hailo_pcie_resources *resources);
+
+int hailo_pcie_read_firmware_notification(struct hailo_pcie_resources *resources,
+ struct hailo_d2h_notification *notification);
+
+int hailo_pcie_write_config_common(struct hailo_pcie_resources *resources, const void* config_data,
+ const size_t config_size, const struct hailo_config_constants *config_consts);
+const struct hailo_config_constants* hailo_pcie_get_board_config_constants(const enum hailo_board_type board_type);
+const struct hailo_config_constants* hailo_pcie_get_user_config_constants(const enum hailo_board_type board_type);
+const char* hailo_pcie_get_fw_filename(const enum hailo_board_type board_type);
+
+long hailo_pcie_read_firmware_log(struct hailo_pcie_resources *resources, struct hailo_read_log_params *params);
+int hailo_pcie_memory_transfer(struct hailo_pcie_resources *resources, struct hailo_memory_transfer_params *params);
+
+bool hailo_pcie_is_device_connected(struct hailo_pcie_resources *resources);
+void hailo_pcie_write_firmware_driver_shutdown(struct hailo_pcie_resources *resources);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HAILO_COMMON_PCIE_COMMON_H_ */
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/common/utils.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_DRIVER_UTILS_H_
+#define _HAILO_DRIVER_UTILS_H_
+
+#include <linux/bitops.h>
+
+#define hailo_clear_bit(bit, pval) { *(pval) &= ~(1 << bit); }
+#define hailo_test_bit(pos,var_addr) ((*var_addr) & (1<<(pos)))
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+static inline bool is_powerof2(size_t v) {
+ // bit trick
+ return (v & (v - 1)) == 0;
+}
+
+static inline void hailo_set_bit(int nr, u32* addr) {
+ u32 mask = BIT_MASK(nr);
+ u32 *p = addr + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // _HAILO_DRIVER_UTILS_H_
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/common/vdma_common.c
@@ -0,0 +1,684 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include "vdma_common.h"
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/circ_buf.h>
+#include <linux/ktime.h>
+#include <linux/timekeeping.h>
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/printk.h>
+
+
+#define CHANNEL_BASE_OFFSET(channel_index) ((channel_index) << 5)
+#define CHANNEL_HOST_OFFSET(channel_index) CHANNEL_BASE_OFFSET(channel_index) + \
+ (channel_index < VDMA_DEST_CHANNELS_START ? 0 : 0x10)
+#define CHANNEL_DEVICE_OFFSET(channel_index) CHANNEL_BASE_OFFSET(channel_index) + \
+ (channel_index < VDMA_DEST_CHANNELS_START ? 0x10 : 0)
+
+#define CHANNEL_CONTROL_OFFSET (0x0)
+#define CHANNEL_NUM_AVAIL_OFFSET (0x2)
+#define CHANNEL_NUM_PROC_OFFSET (0x4)
+#define CHANNEL_ERROR_OFFSET (0x8)
+
+#define VDMA_CHANNEL_CONTROL_START (0x1)
+#define VDMA_CHANNEL_CONTROL_ABORT (0b00)
+#define VDMA_CHANNEL_CONTROL_ABORT_PAUSE (0b10)
+#define VDMA_CHANNEL_CONTROL_START_ABORT_PAUSE_RESUME_BITMASK (0x3)
+#define VDMA_CHANNEL_CONTROL_START_ABORT_BITMASK (0x1)
+
+#define DESCRIPTOR_PAGE_SIZE_SHIFT (8)
+#define DESCRIPTOR_DESC_CONTROL (0x2)
+#define DESCRIPTOR_ADDR_L_MASK (0xFFFFFFC0)
+
+#define DESCRIPTOR_DESC_STATUS_DONE_BIT (0x0)
+#define DESCRIPTOR_DESC_STATUS_ERROR_BIT (0x1)
+#define DESCRIPTOR_DESC_STATUS_MASK (0xFF)
+
+#define DESC_STATUS_REQ (1 << 0)
+#define DESC_STATUS_REQ_ERR (1 << 1)
+#define DESC_REQUEST_IRQ_PROCESSED (1 << 2)
+#define DESC_REQUEST_IRQ_ERR (1 << 3)
+
+
+#define DWORD_SIZE (4)
+#define WORD_SIZE (2)
+#define BYTE_SIZE (1)
+
+#define TIMESTAMPS_CIRC_SPACE(timestamp_list) \
+ CIRC_SPACE((timestamp_list).head, (timestamp_list).tail, CHANNEL_IRQ_TIMESTAMPS_SIZE)
+#define TIMESTAMPS_CIRC_CNT(timestamp_list) \
+ CIRC_CNT((timestamp_list).head, (timestamp_list).tail, CHANNEL_IRQ_TIMESTAMPS_SIZE)
+
+#define ONGOING_TRANSFERS_CIRC_SPACE(transfers_list) \
+ CIRC_SPACE((transfers_list).head, (transfers_list).tail, HAILO_VDMA_MAX_ONGOING_TRANSFERS)
+#define ONGOING_TRANSFERS_CIRC_CNT(transfers_list) \
+ CIRC_CNT((transfers_list).head, (transfers_list).tail, HAILO_VDMA_MAX_ONGOING_TRANSFERS)
+
+#ifndef for_each_sgtable_dma_sg
+#define for_each_sgtable_dma_sg(sgt, sg, i) \
+ for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
+#endif /* for_each_sgtable_dma_sg */
+
+
+static int ongoing_transfer_push(struct hailo_vdma_channel *channel,
+ struct hailo_ongoing_transfer *ongoing_transfer)
+{
+ struct hailo_ongoing_transfers_list *transfers = &channel->ongoing_transfers;
+ if (!ONGOING_TRANSFERS_CIRC_SPACE(*transfers)) {
+ return -EFAULT;
+ }
+
+ if (ongoing_transfer->dirty_descs_count > ARRAY_SIZE(ongoing_transfer->dirty_descs)) {
+ return -EFAULT;
+ }
+
+ transfers->transfers[transfers->head] = *ongoing_transfer;
+ transfers->head = (transfers->head + 1) & HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK;
+ return 0;
+}
+
+static int ongoing_transfer_pop(struct hailo_vdma_channel *channel,
+ struct hailo_ongoing_transfer *ongoing_transfer)
+{
+ struct hailo_ongoing_transfers_list *transfers = &channel->ongoing_transfers;
+ if (!ONGOING_TRANSFERS_CIRC_CNT(*transfers)) {
+ return -EFAULT;
+ }
+
+ if (ongoing_transfer) {
+ *ongoing_transfer = transfers->transfers[transfers->tail];
+ }
+ transfers->tail = (transfers->tail + 1) & HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK;
+ return 0;
+}
+
+static void clear_dirty_desc(struct hailo_vdma_descriptors_list *desc_list, u16 desc)
+{
+ desc_list->desc_list[desc].PageSize_DescControl =
+ (u32)((desc_list->desc_page_size << DESCRIPTOR_PAGE_SIZE_SHIFT) + DESCRIPTOR_DESC_CONTROL);
+}
+
+static void clear_dirty_descs(struct hailo_vdma_channel *channel,
+ struct hailo_ongoing_transfer *ongoing_transfer)
+{
+ u8 i = 0;
+ struct hailo_vdma_descriptors_list *desc_list = channel->last_desc_list;
+ BUG_ON(ongoing_transfer->dirty_descs_count > ARRAY_SIZE(ongoing_transfer->dirty_descs));
+ for (i = 0; i < ongoing_transfer->dirty_descs_count; i++) {
+ clear_dirty_desc(desc_list, ongoing_transfer->dirty_descs[i]);
+ }
+}
+
+static bool validate_last_desc_status(struct hailo_vdma_channel *channel,
+ struct hailo_ongoing_transfer *ongoing_transfer)
+{
+ u16 last_desc = ongoing_transfer->last_desc;
+ u32 last_desc_control = channel->last_desc_list->desc_list[last_desc].RemainingPageSize_Status &
+ DESCRIPTOR_DESC_STATUS_MASK;
+ if (!hailo_test_bit(DESCRIPTOR_DESC_STATUS_DONE_BIT, &last_desc_control)) {
+ pr_err("Expecting desc %d to be done\n", last_desc);
+ return false;
+ }
+ if (hailo_test_bit(DESCRIPTOR_DESC_STATUS_ERROR_BIT, &last_desc_control)) {
+ pr_err("Got unexpected error on desc %d\n", last_desc);
+ return false;
+ }
+
+ return true;
+}
+
+void hailo_vdma_program_descriptor(struct hailo_vdma_descriptor *descriptor, u64 dma_address, size_t page_size,
+ u8 data_id)
+{
+ descriptor->PageSize_DescControl = (u32)((page_size << DESCRIPTOR_PAGE_SIZE_SHIFT) +
+ DESCRIPTOR_DESC_CONTROL);
+ descriptor->AddrL_rsvd_DataID = (u32)(((dma_address & DESCRIPTOR_ADDR_L_MASK)) | data_id);
+ descriptor->AddrH = (u32)(dma_address >> 32);
+ descriptor->RemainingPageSize_Status = 0 ;
+}
+
+static u8 get_channel_id(u8 channel_index)
+{
+ if (channel_index < VDMA_DEST_CHANNELS_START) {
+ // H2D channel
+ return channel_index;
+ }
+ else if ((channel_index >= VDMA_DEST_CHANNELS_START) &&
+ (channel_index < MAX_VDMA_CHANNELS_PER_ENGINE)) {
+ // D2H channel
+ return channel_index - VDMA_DEST_CHANNELS_START;
+ }
+ else {
+ return INVALID_VDMA_CHANNEL;
+ }
+}
+
+static int program_descriptors_in_chunk(
+ struct hailo_vdma_hw *vdma_hw,
+ dma_addr_t chunk_addr,
+ unsigned int chunk_size,
+ struct hailo_vdma_descriptors_list *desc_list,
+ u32 desc_index,
+ u32 max_desc_index,
+ u8 channel_id)
+{
+ const u32 desc_per_chunk = DIV_ROUND_UP(chunk_size, desc_list->desc_page_size);
+ struct hailo_vdma_descriptor *dma_desc = NULL;
+ u16 size_to_program = 0;
+ u32 index = 0;
+ u64 encoded_addr = 0;
+
+ for (index = 0; index < desc_per_chunk; index++) {
+ if (desc_index > max_desc_index) {
+ return -ERANGE;
+ }
+
+ encoded_addr = vdma_hw->hw_ops.encode_desc_dma_address(chunk_addr, channel_id);
+ if (INVALID_VDMA_ADDRESS == encoded_addr) {
+ return -EFAULT;
+ }
+
+ dma_desc = &desc_list->desc_list[desc_index % desc_list->desc_count];
+ size_to_program = chunk_size > desc_list->desc_page_size ?
+ desc_list->desc_page_size : (u16)chunk_size;
+ hailo_vdma_program_descriptor(dma_desc, encoded_addr, size_to_program, vdma_hw->ddr_data_id);
+
+ chunk_addr += size_to_program;
+ chunk_size -= size_to_program;
+ desc_index++;
+ }
+
+ return (int)desc_per_chunk;
+}
+
+int hailo_vdma_program_descriptors_list(
+ struct hailo_vdma_hw *vdma_hw,
+ struct hailo_vdma_descriptors_list *desc_list,
+ u32 starting_desc,
+ struct hailo_vdma_mapped_transfer_buffer *buffer,
+ u8 channel_index)
+{
+ const u8 channel_id = get_channel_id(channel_index);
+ int desc_programmed = 0;
+ u32 max_desc_index = 0;
+ u32 chunk_size = 0;
+ struct scatterlist *sg_entry = NULL;
+ unsigned int i = 0;
+ int ret = 0;
+ size_t buffer_current_offset = 0;
+ dma_addr_t chunk_start_addr = 0;
+ u32 program_size = buffer->size;
+
+ if (starting_desc >= desc_list->desc_count) {
+ return -EFAULT;
+ }
+
+ if (buffer->offset % desc_list->desc_page_size != 0) {
+ return -EFAULT;
+ }
+
+ // On circular buffer, allow programming desc_count descriptors (starting
+ // from starting_desc). On non circular, don't allow is to pass desc_count
+ max_desc_index = desc_list->is_circular ?
+ starting_desc + desc_list->desc_count - 1 :
+ desc_list->desc_count - 1;
+ for_each_sgtable_dma_sg(buffer->sg_table, sg_entry, i) {
+ // Skip sg entries until we reach the right buffer offset. offset can be in the middle of an sg entry.
+ if (buffer_current_offset + sg_dma_len(sg_entry) < buffer->offset) {
+ buffer_current_offset += sg_dma_len(sg_entry);
+ continue;
+ }
+ chunk_start_addr = (buffer_current_offset < buffer->offset) ?
+ sg_dma_address(sg_entry) + (buffer->offset - buffer_current_offset) :
+ sg_dma_address(sg_entry);
+ chunk_size = (buffer_current_offset < buffer->offset) ?
+ (u32)(sg_dma_len(sg_entry) - (buffer->offset - buffer_current_offset)) :
+ (u32)(sg_dma_len(sg_entry));
+ chunk_size = min((u32)program_size, chunk_size);
+
+ ret = program_descriptors_in_chunk(vdma_hw, chunk_start_addr, chunk_size, desc_list,
+ starting_desc, max_desc_index, channel_id);
+ if (ret < 0) {
+ return ret;
+ }
+
+ desc_programmed += ret;
+ starting_desc = starting_desc + ret;
+ program_size -= chunk_size;
+ buffer_current_offset += sg_dma_len(sg_entry);
+ }
+
+ if (program_size != 0) {
+ // We didn't program all the buffer.
+ return -EFAULT;
+ }
+
+ return desc_programmed;
+}
+
+static bool channel_control_reg_is_active(u8 control)
+{
+ return (control & VDMA_CHANNEL_CONTROL_START_ABORT_BITMASK) == VDMA_CHANNEL_CONTROL_START;
+}
+
+static int validate_channel_state(struct hailo_vdma_channel *channel)
+{
+ const u8 control = ioread8(channel->host_regs + CHANNEL_CONTROL_OFFSET);
+ const u16 hw_num_avail = ioread16(channel->host_regs + CHANNEL_NUM_AVAIL_OFFSET);
+
+ if (!channel_control_reg_is_active(control)) {
+ pr_err("Channel %d is not active\n", channel->index);
+ return -EBUSY;
+ }
+
+ if (hw_num_avail != channel->state.num_avail) {
+ pr_err("Channel %d hw state out of sync. num available is %d, expected %d\n",
+ channel->index, hw_num_avail, channel->state.num_avail);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static unsigned long get_interrupts_bitmask(struct hailo_vdma_hw *vdma_hw,
+ enum hailo_vdma_interrupts_domain interrupts_domain, bool is_debug)
+{
+ unsigned long bitmask = 0;
+
+ if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE & interrupts_domain)) {
+ bitmask |= vdma_hw->device_interrupts_bitmask;
+ }
+ if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_HOST & interrupts_domain)) {
+ bitmask |= vdma_hw->host_interrupts_bitmask;
+ }
+
+ if (bitmask != 0) {
+ bitmask |= DESC_REQUEST_IRQ_PROCESSED | DESC_REQUEST_IRQ_ERR;
+ if (is_debug) {
+ bitmask |= DESC_STATUS_REQ | DESC_STATUS_REQ_ERR;
+ }
+ }
+
+ return bitmask;
+}
+
+static void set_num_avail(u8 __iomem *host_regs, u16 num_avail)
+{
+ iowrite16(num_avail, host_regs + CHANNEL_NUM_AVAIL_OFFSET);
+}
+
+static u16 get_num_proc(u8 __iomem *host_regs)
+{
+ return ioread16(host_regs + CHANNEL_NUM_PROC_OFFSET);
+}
+
+static int program_last_desc(
+ struct hailo_vdma_descriptors_list *desc_list,
+ u32 starting_desc,
+ struct hailo_vdma_mapped_transfer_buffer *transfer_buffer)
+{
+ u32 total_descs = DIV_ROUND_UP(transfer_buffer->size, desc_list->desc_page_size);
+ u32 last_desc = (starting_desc + total_descs - 1) % desc_list->desc_count;
+ u32 last_desc_size = transfer_buffer->size - (total_descs - 1) * desc_list->desc_page_size;
+
+ // Configure only last descriptor with residue size
+ desc_list->desc_list[last_desc].PageSize_DescControl = (u32)
+ ((last_desc_size << DESCRIPTOR_PAGE_SIZE_SHIFT) + DESCRIPTOR_DESC_CONTROL);
+ return (int)total_descs;
+}
+
+int hailo_vdma_launch_transfer(
+ struct hailo_vdma_hw *vdma_hw,
+ struct hailo_vdma_channel *channel,
+ struct hailo_vdma_descriptors_list *desc_list,
+ u32 starting_desc,
+ u8 buffers_count,
+ struct hailo_vdma_mapped_transfer_buffer *buffers,
+ bool should_bind,
+ enum hailo_vdma_interrupts_domain first_interrupts_domain,
+ enum hailo_vdma_interrupts_domain last_desc_interrupts,
+ bool is_debug)
+{
+ int ret = -EFAULT;
+ u32 total_descs = 0;
+ u32 first_desc = starting_desc;
+ u32 last_desc = U32_MAX;
+ u16 new_num_avail = 0;
+ struct hailo_ongoing_transfer ongoing_transfer = {0};
+ u8 i = 0;
+
+ channel->state.desc_count_mask = (desc_list->desc_count - 1);
+
+ if (NULL == channel->last_desc_list) {
+ // First transfer on this active channel, store desc list.
+ channel->last_desc_list = desc_list;
+ } else if (desc_list != channel->last_desc_list) {
+ // Shouldn't happen, desc list may change only after channel deactivation.
+ pr_err("Inconsistent desc list given to channel %d\n", channel->index);
+ return -EINVAL;
+ }
+
+ if (channel->state.num_avail != (u16)starting_desc) {
+ pr_err("Channel %d state out of sync. num available is %d, expected %d\n",
+ channel->index, channel->state.num_avail, (u16)starting_desc);
+ return -EFAULT;
+ }
+
+ if (buffers_count > HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER) {
+ pr_err("Too many buffers %u for single transfer\n", buffers_count);
+ return -EINVAL;
+ }
+
+ if (is_debug) {
+ ret = validate_channel_state(channel);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ BUILD_BUG_ON_MSG((HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER + 1) != ARRAY_SIZE(ongoing_transfer.dirty_descs),
+ "Unexpected amount of dirty descriptors");
+ ongoing_transfer.dirty_descs_count = buffers_count + 1;
+ ongoing_transfer.dirty_descs[0] = (u16)starting_desc;
+
+ for (i = 0; i < buffers_count; i++) {
+ ret = should_bind ?
+ hailo_vdma_program_descriptors_list(vdma_hw, desc_list, starting_desc, &buffers[i], channel->index) :
+ program_last_desc(desc_list, starting_desc, &buffers[i]);
+ if (ret < 0) {
+ return ret;
+ }
+ total_descs += ret;
+ last_desc = (starting_desc + ret - 1) % desc_list->desc_count;
+ starting_desc = (starting_desc + ret) % desc_list->desc_count;
+
+ ongoing_transfer.dirty_descs[i+1] = (u16)last_desc;
+ ongoing_transfer.buffers[i] = buffers[i];
+ }
+ ongoing_transfer.buffers_count = buffers_count;
+
+ desc_list->desc_list[first_desc].PageSize_DescControl |=
+ get_interrupts_bitmask(vdma_hw, first_interrupts_domain, is_debug);
+ desc_list->desc_list[last_desc].PageSize_DescControl |=
+ get_interrupts_bitmask(vdma_hw, last_desc_interrupts, is_debug);
+
+ ongoing_transfer.last_desc = (u16)last_desc;
+ ongoing_transfer.is_debug = is_debug;
+ ret = ongoing_transfer_push(channel, &ongoing_transfer);
+ if (ret < 0) {
+ pr_err("Failed push ongoing transfer to channel %d\n", channel->index);
+ return ret;
+ }
+
+ new_num_avail = (u16)((last_desc + 1) % desc_list->desc_count);
+ channel->state.num_avail = new_num_avail;
+ set_num_avail(channel->host_regs, new_num_avail);
+
+ return (int)total_descs;
+}
+
+static void hailo_vdma_push_timestamp(struct hailo_vdma_channel *channel)
+{
+ struct hailo_channel_interrupt_timestamp_list *timestamp_list = &channel->timestamp_list;
+ const u16 num_proc = get_num_proc(channel->host_regs);
+ if (TIMESTAMPS_CIRC_SPACE(*timestamp_list) != 0) {
+ timestamp_list->timestamps[timestamp_list->head].timestamp_ns = ktime_get_ns();
+ timestamp_list->timestamps[timestamp_list->head].desc_num_processed = num_proc;
+ timestamp_list->head = (timestamp_list->head + 1) & CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK;
+ }
+}
+
+// Returns false if there are no items
+static bool hailo_vdma_pop_timestamp(struct hailo_channel_interrupt_timestamp_list *timestamp_list,
+ struct hailo_channel_interrupt_timestamp *out_timestamp)
+{
+ if (0 == TIMESTAMPS_CIRC_CNT(*timestamp_list)) {
+ return false;
+ }
+
+ *out_timestamp = timestamp_list->timestamps[timestamp_list->tail];
+ timestamp_list->tail = (timestamp_list->tail+1) & CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK;
+ return true;
+}
+
+static void hailo_vdma_pop_timestamps_to_response(struct hailo_vdma_channel *channel,
+ struct hailo_vdma_interrupts_read_timestamp_params *result)
+{
+ const u32 max_timestamps = ARRAY_SIZE(result->timestamps);
+ u32 i = 0;
+
+ while (hailo_vdma_pop_timestamp(&channel->timestamp_list, &result->timestamps[i]) &&
+ (i < max_timestamps)) {
+ // Although the hw_num_processed should be a number between 0 and
+ // desc_count-1, if desc_count < 0x10000 (the maximum desc size),
+ // the actual hw_num_processed is a number between 1 and desc_count.
+ // Therefore the value can be desc_count, in this case we change it to
+ // zero.
+ result->timestamps[i].desc_num_processed = result->timestamps[i].desc_num_processed &
+ channel->state.desc_count_mask;
+ i++;
+ }
+
+ result->timestamps_count = i;
+}
+
+static void channel_state_init(struct hailo_vdma_channel_state *state)
+{
+ state->num_avail = state->num_proc = 0;
+
+ // Special value used when the channel is not activate.
+ state->desc_count_mask = U32_MAX;
+}
+
+void hailo_vdma_engine_init(struct hailo_vdma_engine *engine, u8 engine_index,
+ const struct hailo_resource *channel_registers)
+{
+ u8 channel_index = 0;
+ struct hailo_vdma_channel *channel;
+
+ engine->index = engine_index;
+ engine->enabled_channels = 0x0;
+ engine->interrupted_channels = 0x0;
+
+ for_each_vdma_channel(engine, channel, channel_index) {
+ u8 __iomem *regs_base = (u8 __iomem *)channel_registers->address;
+ channel->host_regs = regs_base + CHANNEL_HOST_OFFSET(channel_index);
+ channel->device_regs = regs_base + CHANNEL_DEVICE_OFFSET(channel_index);
+ channel->index = channel_index;
+ channel->timestamp_measure_enabled = false;
+
+ channel_state_init(&channel->state);
+ channel->last_desc_list = NULL;
+
+ channel->ongoing_transfers.head = 0;
+ channel->ongoing_transfers.tail = 0;
+ }
+}
+
+void hailo_vdma_engine_enable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap,
+ bool measure_timestamp)
+{
+ struct hailo_vdma_channel *channel = NULL;
+ u8 channel_index = 0;
+
+ for_each_vdma_channel(engine, channel, channel_index) {
+ if (hailo_test_bit(channel_index, &bitmap)) {
+ channel->timestamp_measure_enabled = measure_timestamp;
+ channel->timestamp_list.head = channel->timestamp_list.tail = 0;
+ }
+ }
+
+ engine->enabled_channels |= bitmap;
+}
+
+void hailo_vdma_engine_disable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
+{
+ struct hailo_vdma_channel *channel = NULL;
+ u8 channel_index = 0;
+
+ engine->enabled_channels &= ~bitmap;
+
+ for_each_vdma_channel(engine, channel, channel_index) {
+ channel_state_init(&channel->state);
+
+ while (ONGOING_TRANSFERS_CIRC_CNT(channel->ongoing_transfers) > 0) {
+ struct hailo_ongoing_transfer transfer;
+ ongoing_transfer_pop(channel, &transfer);
+
+ if (channel->last_desc_list == NULL) {
+ pr_err("Channel %d has ongoing transfers but no desc list\n", channel->index);
+ continue;
+ }
+
+ clear_dirty_descs(channel, &transfer);
+ }
+
+ channel->last_desc_list = NULL;
+ }
+}
+
+void hailo_vdma_engine_push_timestamps(struct hailo_vdma_engine *engine, u32 bitmap)
+{
+ struct hailo_vdma_channel *channel = NULL;
+ u8 channel_index = 0;
+
+ for_each_vdma_channel(engine, channel, channel_index) {
+ if (unlikely(hailo_test_bit(channel_index, &bitmap) &&
+ channel->timestamp_measure_enabled)) {
+ hailo_vdma_push_timestamp(channel);
+ }
+ }
+}
+
+int hailo_vdma_engine_read_timestamps(struct hailo_vdma_engine *engine,
+ struct hailo_vdma_interrupts_read_timestamp_params *params)
+{
+ struct hailo_vdma_channel *channel = NULL;
+
+ if (params->channel_index >= MAX_VDMA_CHANNELS_PER_ENGINE) {
+ return -EINVAL;
+ }
+
+ channel = &engine->channels[params->channel_index];
+ hailo_vdma_pop_timestamps_to_response(channel, params);
+ return 0;
+}
+
+void hailo_vdma_engine_clear_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
+{
+ engine->interrupted_channels &= ~bitmap;
+}
+
+void hailo_vdma_engine_set_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
+{
+ engine->interrupted_channels |= bitmap;
+}
+
+static void fill_channel_irq_data(struct hailo_vdma_interrupts_channel_data *irq_data,
+ struct hailo_vdma_engine *engine, struct hailo_vdma_channel *channel, u16 num_proc,
+ bool validation_success)
+{
+ u8 host_control = ioread8(channel->host_regs + CHANNEL_CONTROL_OFFSET);
+ u8 device_control = ioread8(channel->device_regs + CHANNEL_CONTROL_OFFSET);
+
+ irq_data->engine_index = engine->index;
+ irq_data->channel_index = channel->index;
+
+ irq_data->is_active = channel_control_reg_is_active(host_control) &&
+ channel_control_reg_is_active(device_control);
+
+ irq_data->host_num_processed = num_proc;
+ irq_data->host_error = ioread8(channel->host_regs + CHANNEL_ERROR_OFFSET);
+ irq_data->device_error = ioread8(channel->device_regs + CHANNEL_ERROR_OFFSET);
+ irq_data->validation_success = validation_success;
+}
+
+static bool is_desc_between(u16 begin, u16 end, u16 desc)
+{
+ if (begin == end) {
+ // There is nothing between
+ return false;
+ }
+ if (begin < end) {
+ // desc needs to be in [begin, end)
+ return (begin <= desc) && (desc < end);
+ }
+ else {
+ // desc needs to be in [0, end) or [begin, m_descs.size()-1]
+ return (desc < end) || (begin <= desc);
+ }
+}
+
+static bool is_transfer_complete(struct hailo_vdma_channel *channel,
+ struct hailo_ongoing_transfer *transfer, u16 hw_num_proc)
+{
+ if (channel->state.num_avail == hw_num_proc) {
+ return true;
+ }
+
+ return is_desc_between(channel->state.num_proc, hw_num_proc, transfer->last_desc);
+}
+
+int hailo_vdma_engine_fill_irq_data(struct hailo_vdma_interrupts_wait_params *irq_data,
+ struct hailo_vdma_engine *engine, u32 irq_channels_bitmap,
+ transfer_done_cb_t transfer_done, void *transfer_done_opaque)
+{
+ struct hailo_vdma_channel *channel = NULL;
+ u8 channel_index = 0;
+ bool validation_success = true;
+
+ for_each_vdma_channel(engine, channel, channel_index) {
+ u16 hw_num_proc = U16_MAX;
+ if (!hailo_test_bit(channel->index, &irq_channels_bitmap)) {
+ continue;
+ }
+
+ if (channel->last_desc_list == NULL) {
+ // Channel not active or no transfer, skipping.
+ continue;
+ }
+
+ if (irq_data->channels_count >= ARRAY_SIZE(irq_data->irq_data)) {
+ return -EINVAL;
+ }
+
+ // Although the hw_num_processed should be a number between 0 and
+ // desc_count-1, if desc_count < 0x10000 (the maximum desc size),
+ // the actual hw_num_processed is a number between 1 and desc_count.
+ // Therefore the value can be desc_count, in this case we change it to
+ // zero.
+ hw_num_proc = get_num_proc(channel->host_regs) & channel->state.desc_count_mask;
+
+ while (ONGOING_TRANSFERS_CIRC_CNT(channel->ongoing_transfers) > 0) {
+ struct hailo_ongoing_transfer *cur_transfer =
+ &channel->ongoing_transfers.transfers[channel->ongoing_transfers.tail];
+ if (!is_transfer_complete(channel, cur_transfer, hw_num_proc)) {
+ break;
+ }
+
+ if (cur_transfer->is_debug &&
+ !validate_last_desc_status(channel, cur_transfer)) {
+ validation_success = false;
+ }
+
+ clear_dirty_descs(channel, cur_transfer);
+ transfer_done(cur_transfer, transfer_done_opaque);
+ channel->state.num_proc = (u16)((cur_transfer->last_desc + 1) & channel->state.desc_count_mask);
+
+ ongoing_transfer_pop(channel, NULL);
+ }
+
+ fill_channel_irq_data(&irq_data->irq_data[irq_data->channels_count],
+ engine, channel, hw_num_proc, validation_success);
+ irq_data->channels_count++;
+ }
+
+ return 0;
+}
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/common/vdma_common.h
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_COMMON_VDMA_COMMON_H_
+#define _HAILO_COMMON_VDMA_COMMON_H_
+
+#include "hailo_resource.h"
+#include "utils.h"
+
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+
+#define VDMA_DESCRIPTOR_LIST_ALIGN (1 << 16)
+#define INVALID_VDMA_ADDRESS (0)
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+struct hailo_vdma_descriptor {
+ u32 PageSize_DescControl;
+ u32 AddrL_rsvd_DataID;
+ u32 AddrH;
+ u32 RemainingPageSize_Status;
+};
+
+struct hailo_vdma_descriptors_list {
+ struct hailo_vdma_descriptor *desc_list;
+ u32 desc_count; // Must be power of 2 if is_circular is set.
+ u16 desc_page_size;
+ bool is_circular;
+};
+
+struct hailo_channel_interrupt_timestamp_list {
+ int head;
+ int tail;
+ struct hailo_channel_interrupt_timestamp timestamps[CHANNEL_IRQ_TIMESTAMPS_SIZE];
+};
+
+
+// For each buffers in transfer, the last descriptor will be programmed with
+// the residue size. In addition, if configured, the first descriptor (in
+// all transfer) may be programmed with interrupts.
+#define MAX_DIRTY_DESCRIPTORS_PER_TRANSFER \
+ (HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER + 1)
+
+struct hailo_vdma_mapped_transfer_buffer {
+ struct sg_table *sg_table;
+ u32 size;
+ u32 offset;
+ void *opaque; // Drivers can set any opaque data here.
+};
+
+struct hailo_ongoing_transfer {
+ uint16_t last_desc;
+
+ u8 buffers_count;
+ struct hailo_vdma_mapped_transfer_buffer buffers[HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER];
+
+ // Contains all descriptors that were programmed with non-default values
+ // for the transfer (by non-default we mean - different size or different
+ // interrupts domain).
+ uint8_t dirty_descs_count;
+ uint16_t dirty_descs[MAX_DIRTY_DESCRIPTORS_PER_TRANSFER];
+
+ // If set, validate descriptors status on transfer completion.
+ bool is_debug;
+};
+
+struct hailo_ongoing_transfers_list {
+ unsigned long head;
+ unsigned long tail;
+ struct hailo_ongoing_transfer transfers[HAILO_VDMA_MAX_ONGOING_TRANSFERS];
+};
+
+struct hailo_vdma_channel_state {
+ // vdma channel counters. num_avail should be synchronized with the hw
+ // num_avail value. num_proc is the last num proc updated when the user
+ // reads interrupts.
+ u16 num_avail;
+ u16 num_proc;
+
+ // Mask of the num-avail/num-proc counters.
+ u32 desc_count_mask;
+};
+
+struct hailo_vdma_channel {
+ u8 index;
+
+ u8 __iomem *host_regs;
+ u8 __iomem *device_regs;
+
+ // Last descriptors list attached to the channel. When it changes,
+ // assumes that the channel got reset.
+ struct hailo_vdma_descriptors_list *last_desc_list;
+
+ struct hailo_vdma_channel_state state;
+ struct hailo_ongoing_transfers_list ongoing_transfers;
+
+ bool timestamp_measure_enabled;
+ struct hailo_channel_interrupt_timestamp_list timestamp_list;
+};
+
+struct hailo_vdma_engine {
+ u8 index;
+ u32 enabled_channels;
+ u32 interrupted_channels;
+ struct hailo_vdma_channel channels[MAX_VDMA_CHANNELS_PER_ENGINE];
+};
+
+struct hailo_vdma_hw_ops {
+ // Accepts some dma_addr_t mapped to the device and encodes it using
+ // hw specific encode. returns INVALID_VDMA_ADDRESS on failure.
+ u64 (*encode_desc_dma_address)(dma_addr_t dma_address, u8 channel_id);
+};
+
+struct hailo_vdma_hw {
+ struct hailo_vdma_hw_ops hw_ops;
+
+ // The data_id code of ddr addresses.
+ u8 ddr_data_id;
+
+ // Bitmask needed to set on each descriptor to enable interrupts (either host/device).
+ unsigned long host_interrupts_bitmask;
+ unsigned long device_interrupts_bitmask;
+};
+
+#define _for_each_element_array(array, size, element, index) \
+ for (index = 0, element = &array[index]; index < size; index++, element = &array[index])
+
+#define for_each_vdma_channel(engine, channel, channel_index) \
+ _for_each_element_array(engine->channels, MAX_VDMA_CHANNELS_PER_ENGINE, \
+ channel, channel_index)
+
+void hailo_vdma_program_descriptor(struct hailo_vdma_descriptor *descriptor, u64 dma_address, size_t page_size,
+ u8 data_id);
+
+/**
+ * Program the given descriptors list to map the given buffer.
+ *
+ * @param vdma_hw vdma hw object
+ * @param desc_list descriptors list object to program
+ * @param starting_desc index of the first descriptor to program. If the list
+ * is circular, this function may wrap around the list.
+ * @param buffer buffer to program to the descriptors list.
+ * @param channel_index channel index of the channel attached.
+ *
+ * @return On success - the amount of descriptors programmed, negative value on error.
+ */
+int hailo_vdma_program_descriptors_list(
+ struct hailo_vdma_hw *vdma_hw,
+ struct hailo_vdma_descriptors_list *desc_list,
+ u32 starting_desc,
+ struct hailo_vdma_mapped_transfer_buffer *buffer,
+ u8 channel_index);
+
+/**
+ * Launch a transfer on some vdma channel. Includes:
+ * 1. Binding the transfer buffers to the descriptors list.
+ * 2. Program the descriptors list.
+ * 3. Increase num available
+ *
+ * @param vdma_hw vdma hw object
+ * @param channel vdma channel object.
+ * @param desc_list descriptors list object to program.
+ * @param starting_desc index of the first descriptor to program.
+ * @param buffers_count amount of transfer mapped buffers to program.
+ * @param buffers array of buffers to program to the descriptors list.
+ * @param should_bind whether to bind the buffer to the descriptors list.
+ * @param first_interrupts_domain - interrupts settings on first descriptor.
+ * @param last_desc_interrupts - interrupts settings on last descriptor.
+ * @param is_debug program descriptors for debug run, adds some overhead (for
+ * example, hw will write desc complete status).
+ *
+ * @return On success - the amount of descriptors programmed, negative value on error.
+ */
+int hailo_vdma_launch_transfer(
+ struct hailo_vdma_hw *vdma_hw,
+ struct hailo_vdma_channel *channel,
+ struct hailo_vdma_descriptors_list *desc_list,
+ u32 starting_desc,
+ u8 buffers_count,
+ struct hailo_vdma_mapped_transfer_buffer *buffers,
+ bool should_bind,
+ enum hailo_vdma_interrupts_domain first_interrupts_domain,
+ enum hailo_vdma_interrupts_domain last_desc_interrupts,
+ bool is_debug);
+
+void hailo_vdma_engine_init(struct hailo_vdma_engine *engine, u8 engine_index,
+ const struct hailo_resource *channel_registers);
+
+// enable/disable channels interrupt (does not update interrupts mask because the
+// implementation is different between PCIe and DRAM DMA. To support it we
+// can add some ops struct to the engine).
+void hailo_vdma_engine_enable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap,
+ bool measure_timestamp);
+void hailo_vdma_engine_disable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
+
+void hailo_vdma_engine_push_timestamps(struct hailo_vdma_engine *engine, u32 bitmap);
+int hailo_vdma_engine_read_timestamps(struct hailo_vdma_engine *engine,
+ struct hailo_vdma_interrupts_read_timestamp_params *params);
+
+static inline bool hailo_vdma_engine_got_interrupt(struct hailo_vdma_engine *engine,
+ u32 channels_bitmap)
+{
+ // Reading interrupts without lock is ok (needed only for writes)
+ const bool any_interrupt = (0 != (channels_bitmap & engine->interrupted_channels));
+ const bool any_disabled = (channels_bitmap != (channels_bitmap & engine->enabled_channels));
+ return (any_disabled || any_interrupt);
+}
+
+// Set/Clear/Read channels interrupts, must called under some lock (driver specific)
+void hailo_vdma_engine_clear_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
+void hailo_vdma_engine_set_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
+
+static inline u32 hailo_vdma_engine_read_interrupts(struct hailo_vdma_engine *engine,
+ u32 requested_bitmap)
+{
+ // Interrupts only for channels that are requested and enabled.
+ u32 irq_channels_bitmap = requested_bitmap &
+ engine->enabled_channels &
+ engine->interrupted_channels;
+ engine->interrupted_channels &= ~irq_channels_bitmap;
+
+ return irq_channels_bitmap;
+}
+
+typedef void(*transfer_done_cb_t)(struct hailo_ongoing_transfer *transfer, void *opaque);
+
+// Assuming irq_data->channels_count contains the amount of channels already
+// written (used for multiple engines).
+int hailo_vdma_engine_fill_irq_data(struct hailo_vdma_interrupts_wait_params *irq_data,
+ struct hailo_vdma_engine *engine, u32 irq_channels_bitmap,
+ transfer_done_cb_t transfer_done, void *transfer_done_opaque);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _HAILO_COMMON_VDMA_COMMON_H_ */
--- /dev/null
+++ b/drivers/media/pci/hailo/include/hailo_pcie_version.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_PCIE_VERSION_H_
+#define _HAILO_PCIE_VERSION_H_
+
+#include <linux/stringify.h>
+#include "../common/hailo_pcie_version.h"
+
+#define HAILO_DRV_VER __stringify(HAILO_DRV_VER_MAJOR) "." __stringify(HAILO_DRV_VER_MINOR) "." __stringify(HAILO_DRV_VER_REVISION)
+
+#endif /* _HAILO_PCIE_VERSION_H_ */
--- /dev/null
+++ b/drivers/media/pci/hailo/src/fops.c
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/uaccess.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <asm/thread_info.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#include <linux/sched/signal.h>
+#endif
+
+#include "hailo_pcie_version.h"
+#include "utils.h"
+#include "fops.h"
+#include "vdma_common.h"
+#include "utils/logs.h"
+#include "vdma/memory.h"
+#include "vdma/ioctl.h"
+#include "utils/compact.h"
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 13, 0 )
+#define wait_queue_t wait_queue_entry_t
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 15, 0 )
+#define ACCESS_ONCE READ_ONCE
+#endif
+
+#ifndef VM_RESERVED
+ #define VMEM_FLAGS (VM_IO | VM_DONTEXPAND | VM_DONTDUMP)
+#else
+ #define VMEM_FLAGS (VM_IO | VM_RESERVED)
+#endif
+
+#define IS_PO2_ALIGNED(size, alignment) (!(size & (alignment-1)))
+
+// On pcie driver there is only one dma engine
+#define DEFAULT_VDMA_ENGINE_INDEX (0)
+
+#if !defined(HAILO_EMULATOR)
+#define DEFAULT_SHUTDOWN_TIMEOUT_MS (5)
+#else /* !defined(HAILO_EMULATOR) */
+#define DEFAULT_SHUTDOWN_TIMEOUT_MS (1000)
+#endif /* !defined(HAILO_EMULATOR) */
+
+static long hailo_add_notification_wait(struct hailo_pcie_board *board, struct file *filp);
+
+static struct hailo_file_context *create_file_context(struct hailo_pcie_board *board, struct file *filp)
+{
+ struct hailo_file_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context) {
+ hailo_err(board, "Failed to alloc file context (required size %zu)\n", sizeof(*context));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ context->filp = filp;
+ hailo_vdma_file_context_init(&context->vdma_context);
+ list_add(&context->open_files_list, &board->open_files_list);
+ context->is_valid = true;
+ return context;
+}
+
+static void release_file_context(struct hailo_file_context *context)
+{
+ context->is_valid = false;
+ list_del(&context->open_files_list);
+ kfree(context);
+}
+
+static struct hailo_file_context *find_file_context(struct hailo_pcie_board *board, struct file *filp)
+{
+ struct hailo_file_context *cur = NULL;
+ list_for_each_entry(cur, &board->open_files_list, open_files_list) {
+ if (cur->filp == filp) {
+ return cur;
+ }
+ }
+ return NULL;
+}
+
+int hailo_pcie_fops_open(struct inode *inode, struct file *filp)
+{
+ u32 major = MAJOR(inode->i_rdev);
+ u32 minor = MINOR(inode->i_rdev);
+ struct hailo_pcie_board *pBoard;
+ int err = 0;
+ pci_power_t previous_power_state = PCI_UNKNOWN;
+ bool interrupts_enabled_by_filp = false;
+ struct hailo_file_context *context = NULL;
+
+ pr_debug(DRIVER_NAME ": (%d: %d-%d): fops_open\n", current->tgid, major, minor);
+
+ // allow multiple processes to open a device, count references in hailo_pcie_get_board_index.
+ if (!(pBoard = hailo_pcie_get_board_index(minor))) {
+ pr_err(DRIVER_NAME ": fops_open: PCIe board not found for /dev/hailo%d node.\n", minor);
+ err = -ENODEV;
+ goto l_exit;
+ }
+
+ filp->private_data = pBoard;
+
+ if (down_interruptible(&pBoard->mutex)) {
+ hailo_err(pBoard, "fops_open down_interruptible fail tgid:%d\n", current->tgid);
+ err = -ERESTARTSYS;
+ goto l_decrease_ref_count;
+ }
+
+ context = create_file_context(pBoard, filp);
+ if (IS_ERR(context)) {
+ err = PTR_ERR(context);
+ goto l_release_mutex;
+ }
+
+ previous_power_state = pBoard->pDev->current_state;
+ if (PCI_D0 != previous_power_state) {
+ hailo_info(pBoard, "Waking up board");
+ err = pci_set_power_state(pBoard->pDev, PCI_D0);
+ if (err < 0) {
+ hailo_err(pBoard, "Failed waking up board %d", err);
+ goto l_free_context;
+ }
+ }
+
+ if (!hailo_pcie_is_device_connected(&pBoard->pcie_resources)) {
+ hailo_err(pBoard, "Device disconnected while opening device\n");
+ err = -ENXIO;
+ goto l_revert_power_state;
+ }
+
+ // enable interrupts
+ if (!pBoard->interrupts_enabled) {
+ err = hailo_enable_interrupts(pBoard);
+ if (err < 0) {
+ hailo_err(pBoard, "Failed Enabling interrupts %d\n", err);
+ goto l_revert_power_state;
+ }
+ interrupts_enabled_by_filp = true;
+ }
+
+ err = hailo_add_notification_wait(pBoard, filp);
+ if (err < 0) {
+ goto l_release_irq;
+ }
+
+ hailo_dbg(pBoard, "(%d: %d-%d): fops_open: SUCCESS on /dev/hailo%d\n", current->tgid,
+ major, minor, minor);
+
+ up(&pBoard->mutex);
+ return 0;
+
+l_release_irq:
+ if (interrupts_enabled_by_filp) {
+ hailo_disable_interrupts(pBoard);
+ }
+
+l_revert_power_state:
+ if (pBoard->pDev->current_state != previous_power_state) {
+ if (pci_set_power_state(pBoard->pDev, previous_power_state) < 0) {
+ hailo_err(pBoard, "Failed setting power state back to %d\n", (int)previous_power_state);
+ }
+ }
+l_free_context:
+ release_file_context(context);
+l_release_mutex:
+ up(&pBoard->mutex);
+l_decrease_ref_count:
+ atomic_dec(&pBoard->ref_count);
+l_exit:
+ return err;
+}
+
+int hailo_pcie_driver_down(struct hailo_pcie_board *board)
+{
+ long completion_result = 0;
+ int err = 0;
+
+ reinit_completion(&board->driver_down.reset_completed);
+
+ hailo_pcie_write_firmware_driver_shutdown(&board->pcie_resources);
+
+ // Wait for response
+ completion_result =
+ wait_for_completion_timeout(&board->driver_down.reset_completed, msecs_to_jiffies(DEFAULT_SHUTDOWN_TIMEOUT_MS));
+ if (completion_result <= 0) {
+ if (0 == completion_result) {
+ hailo_err(board, "hailo_pcie_driver_down, timeout waiting for shutdown response (timeout_ms=%d)\n", DEFAULT_SHUTDOWN_TIMEOUT_MS);
+ err = -ETIMEDOUT;
+ } else {
+ hailo_info(board, "hailo_pcie_driver_down, wait for completion failed with err=%ld (process was interrupted or killed)\n",
+ completion_result);
+ err = completion_result;
+ }
+ goto l_exit;
+ }
+
+l_exit:
+ return err;
+}
+
+int hailo_pcie_fops_release(struct inode *inode, struct file *filp)
+{
+ struct hailo_pcie_board *pBoard = (struct hailo_pcie_board *)filp->private_data;
+ struct hailo_file_context *context = NULL;
+
+ u32 major = MAJOR(inode->i_rdev);
+ u32 minor = MINOR(inode->i_rdev);
+
+ if (pBoard) {
+ hailo_info(pBoard, "(%d: %d-%d): fops_release\n", current->tgid, major, minor);
+
+ if (down_interruptible(&pBoard->mutex)) {
+ hailo_err(pBoard, "fops_release down_interruptible failed");
+ return -ERESTARTSYS;
+ }
+
+ context = find_file_context(pBoard, filp);
+ if (NULL == context) {
+ hailo_err(pBoard, "Invalid driver state, file context does not exist\n");
+ up(&pBoard->mutex);
+ return -EINVAL;
+ }
+
+ if (false == context->is_valid) {
+ // File context is invalid, but open. It's OK to continue finalize and release it.
+ hailo_err(pBoard, "Invalid file context\n");
+ }
+
+ hailo_pcie_clear_notification_wait_list(pBoard, filp);
+
+ if (filp == pBoard->vdma.used_by_filp) {
+ if (hailo_pcie_driver_down(pBoard)) {
+ hailo_err(pBoard, "Failed sending FW shutdown event");
+ }
+ }
+
+ hailo_vdma_file_context_finalize(&context->vdma_context, &pBoard->vdma, filp);
+ release_file_context(context);
+
+ if (atomic_dec_and_test(&pBoard->ref_count)) {
+ // Disable interrupts
+ hailo_disable_interrupts(pBoard);
+
+ if (power_mode_enabled()) {
+ if (pBoard->pDev && pci_set_power_state(pBoard->pDev, PCI_D3hot) < 0) {
+ hailo_err(pBoard, "Failed setting power state to D3hot");
+ }
+ }
+
+ // deallocate board if already removed
+ if (!pBoard->pDev) {
+ hailo_dbg(pBoard, "fops_close, freed board\n");
+ up(&pBoard->mutex);
+ kfree(pBoard);
+ pBoard = NULL;
+ } else {
+
+ hailo_dbg(pBoard, "fops_close, released resources for board\n");
+ up(&pBoard->mutex);
+ }
+ } else {
+ up(&pBoard->mutex);
+ }
+
+ hailo_dbg(pBoard, "(%d: %d-%d): fops_close: SUCCESS on /dev/hailo%d\n", current->tgid,
+ major, minor, minor);
+ }
+
+ return 0;
+}
+
+static long hailo_memory_transfer_ioctl(struct hailo_pcie_board *board, unsigned long arg)
+{
+ long err = 0;
+ struct hailo_memory_transfer_params* transfer = &board->memory_transfer_params;
+
+ hailo_dbg(board, "Start memory transfer ioctl\n");
+
+ if (copy_from_user(transfer, (void __user*)arg, sizeof(*transfer))) {
+ hailo_err(board, "copy_from_user fail\n");
+ return -ENOMEM;
+ }
+
+ err = hailo_pcie_memory_transfer(&board->pcie_resources, transfer);
+ if (err < 0) {
+ hailo_err(board, "memory transfer failed %ld", err);
+ }
+
+ if (copy_to_user((void __user*)arg, transfer, sizeof(*transfer))) {
+ hailo_err(board, "copy_to_user fail\n");
+ return -ENOMEM;
+ }
+
+ return err;
+}
+
+static long hailo_read_log_ioctl(struct hailo_pcie_board *pBoard, unsigned long arg)
+{
+ long err = 0;
+ struct hailo_read_log_params params;
+
+ if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
+ hailo_err(pBoard, "HAILO_READ_LOG, copy_from_user fail\n");
+ return -ENOMEM;
+ }
+
+ if (0 > (err = hailo_pcie_read_firmware_log(&pBoard->pcie_resources, &params))) {
+ hailo_err(pBoard, "HAILO_READ_LOG, reading from log failed with error: %ld \n", err);
+ return err;
+ }
+
+ if (copy_to_user((void*)arg, &params, sizeof(params))) {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void firmware_notification_irq_handler(struct hailo_pcie_board *board)
+{
+ struct hailo_notification_wait *notif_wait_cursor = NULL;
+ int err = 0;
+ unsigned long irq_saved_flags = 0;
+
+ spin_lock_irqsave(&board->notification_read_spinlock, irq_saved_flags);
+ err = hailo_pcie_read_firmware_notification(&board->pcie_resources, &board->notification_cache);
+ spin_unlock_irqrestore(&board->notification_read_spinlock, irq_saved_flags);
+
+ if (err < 0) {
+ hailo_err(board, "Failed reading firmware notification");
+ }
+ else {
+ rcu_read_lock();
+ list_for_each_entry_rcu(notif_wait_cursor, &board->notification_wait_list, notification_wait_list)
+ {
+ complete(&notif_wait_cursor->notification_completion);
+ }
+ rcu_read_unlock();
+ }
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+irqreturn_t hailo_irqhandler(int irq, void *dev_id, struct pt_regs *regs)
+#else
+irqreturn_t hailo_irqhandler(int irq, void *dev_id)
+#endif
+{
+ irqreturn_t return_value = IRQ_NONE;
+ struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_id;
+ bool got_interrupt = false;
+ struct hailo_pcie_interrupt_source irq_source = {0};
+
+ hailo_dbg(board, "hailo_irqhandler\n");
+
+ while (true) {
+ if (!hailo_pcie_is_device_connected(&board->pcie_resources)) {
+ hailo_err(board, "Device disconnected while handling irq\n");
+ break;
+ }
+
+ got_interrupt = hailo_pcie_read_interrupt(&board->pcie_resources, &irq_source);
+ if (!got_interrupt) {
+ break;
+ }
+
+ return_value = IRQ_HANDLED;
+
+ // wake fw_control if needed
+ if (irq_source.interrupt_bitmask & FW_CONTROL) {
+ complete(&board->fw_control.completion);
+ }
+
+ // wake driver_down if needed
+ if (irq_source.interrupt_bitmask & DRIVER_DOWN) {
+ complete(&board->driver_down.reset_completed);
+ }
+
+ if (irq_source.interrupt_bitmask & FW_NOTIFICATION) {
+ if (!completion_done(&board->fw_loaded_completion)) {
+ // Complete firmware loaded completion
+ complete_all(&board->fw_loaded_completion);
+ } else {
+ firmware_notification_irq_handler(board);
+ }
+ }
+
+ if (0 != irq_source.vdma_channels_bitmap) {
+ hailo_vdma_irq_handler(&board->vdma, DEFAULT_VDMA_ENGINE_INDEX,
+ irq_source.vdma_channels_bitmap);
+ }
+ }
+
+ return return_value;
+}
+
+static long hailo_get_notification_wait_thread(struct hailo_pcie_board *pBoard, struct file *filp,
+ struct hailo_notification_wait **current_waiting_thread)
+{
+ struct hailo_notification_wait *cursor = NULL;
+ // note: safe to access without rcu because the notification_wait_list is closed only on file release
+ list_for_each_entry(cursor, &pBoard->notification_wait_list, notification_wait_list)
+ {
+ if ((current->tgid == cursor->tgid) && (filp == cursor->filp)) {
+ *current_waiting_thread = cursor;
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+static long hailo_add_notification_wait(struct hailo_pcie_board *board, struct file *filp)
+{
+ struct hailo_notification_wait *new_notification_wait = NULL;
+ if (!(new_notification_wait = kmalloc(sizeof(*new_notification_wait), GFP_KERNEL))) {
+ hailo_err(board, "Failed to allocate notification wait structure.\n");
+ return -ENOMEM;
+ }
+ new_notification_wait->tgid = current->tgid;
+ new_notification_wait->filp = filp;
+ new_notification_wait->is_disabled = false;
+ init_completion(&new_notification_wait->notification_completion);
+ list_add_rcu(&new_notification_wait->notification_wait_list, &board->notification_wait_list);
+ return 0;
+}
+
+static long hailo_read_notification_ioctl(struct hailo_pcie_board *pBoard, unsigned long arg, struct file *filp,
+ bool* should_up_board_mutex)
+{
+ long err = 0;
+ struct hailo_notification_wait *current_waiting_thread = NULL;
+ struct hailo_d2h_notification *notification = &pBoard->notification_to_user;
+ unsigned long irq_saved_flags;
+
+ err = hailo_get_notification_wait_thread(pBoard, filp, &current_waiting_thread);
+ if (0 != err) {
+ goto l_exit;
+ }
+ up(&pBoard->mutex);
+
+ if (0 > (err = wait_for_completion_interruptible(&current_waiting_thread->notification_completion))) {
+ hailo_info(pBoard,
+ "HAILO_READ_NOTIFICATION - wait_for_completion_interruptible error. err=%ld. tgid=%d (process was interrupted or killed)\n",
+ err, current_waiting_thread->tgid);
+ *should_up_board_mutex = false;
+ goto l_exit;
+ }
+
+ if (down_interruptible(&pBoard->mutex)) {
+ hailo_info(pBoard, "HAILO_READ_NOTIFICATION - down_interruptible error (process was interrupted or killed)\n");
+ *should_up_board_mutex = false;
+ err = -ERESTARTSYS;
+ goto l_exit;
+ }
+
+ // Check if was disabled
+ if (current_waiting_thread->is_disabled) {
+ hailo_info(pBoard, "HAILO_READ_NOTIFICATION, can't find notification wait for tgid=%d\n", current->tgid);
+ err = -EINVAL;
+ goto l_exit;
+ }
+
+ reinit_completion(&current_waiting_thread->notification_completion);
+
+ spin_lock_irqsave(&pBoard->notification_read_spinlock, irq_saved_flags);
+ notification->buffer_len = pBoard->notification_cache.buffer_len;
+ memcpy(notification->buffer, pBoard->notification_cache.buffer, notification->buffer_len);
+ spin_unlock_irqrestore(&pBoard->notification_read_spinlock, irq_saved_flags);
+
+ if (copy_to_user((void __user*)arg, notification, sizeof(*notification))) {
+ hailo_err(pBoard, "HAILO_READ_NOTIFICATION copy_to_user fail\n");
+ err = -ENOMEM;
+ goto l_exit;
+ }
+
+l_exit:
+ return err;
+}
+
+static long hailo_disable_notification(struct hailo_pcie_board *pBoard, struct file *filp)
+{
+ struct hailo_notification_wait *cursor = NULL;
+
+ hailo_info(pBoard, "HAILO_DISABLE_NOTIFICATION: disable notification");
+ rcu_read_lock();
+ list_for_each_entry_rcu(cursor, &pBoard->notification_wait_list, notification_wait_list) {
+ if ((current->tgid == cursor->tgid) && (filp == cursor->filp)) {
+ cursor->is_disabled = true;
+ complete(&cursor->notification_completion);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int hailo_fw_control(struct hailo_pcie_board *pBoard, unsigned long arg, bool* should_up_board_mutex)
+{
+ struct hailo_fw_control *command = &pBoard->fw_control.command;
+ long completion_result = 0;
+ int err = 0;
+
+ up(&pBoard->mutex);
+ *should_up_board_mutex = false;
+
+ if (down_interruptible(&pBoard->fw_control.mutex)) {
+ hailo_info(pBoard, "hailo_fw_control down_interruptible fail tgid:%d (process was interrupted or killed)\n", current->tgid);
+ return -ERESTARTSYS;
+ }
+
+ if (copy_from_user(command, (void __user*)arg, sizeof(*command))) {
+ hailo_err(pBoard, "hailo_fw_control, copy_from_user fail\n");
+ err = -ENOMEM;
+ goto l_exit;
+ }
+
+ reinit_completion(&pBoard->fw_control.completion);
+
+ err = hailo_pcie_write_firmware_control(&pBoard->pcie_resources, command);
+ if (err < 0) {
+ hailo_err(pBoard, "Failed writing fw control to pcie\n");
+ goto l_exit;
+ }
+
+ // Wait for response
+ completion_result = wait_for_completion_interruptible_timeout(&pBoard->fw_control.completion, msecs_to_jiffies(command->timeout_ms));
+ if (completion_result <= 0) {
+ if (0 == completion_result) {
+ hailo_err(pBoard, "hailo_fw_control, timeout waiting for control (timeout_ms=%d)\n", command->timeout_ms);
+ err = -ETIMEDOUT;
+ } else {
+ hailo_info(pBoard, "hailo_fw_control, wait for completion failed with err=%ld (process was interrupted or killed)\n", completion_result);
+ err = -EINTR;
+ }
+ goto l_exit;
+ }
+
+ err = hailo_pcie_read_firmware_control(&pBoard->pcie_resources, command);
+ if (err < 0) {
+ hailo_err(pBoard, "Failed reading fw control from pcie\n");
+ goto l_exit;
+ }
+
+ if (copy_to_user((void __user*)arg, command, sizeof(*command))) {
+ hailo_err(pBoard, "hailo_fw_control, copy_to_user fail\n");
+ err = -ENOMEM;
+ goto l_exit;
+ }
+
+l_exit:
+ up(&pBoard->fw_control.mutex);
+ return err;
+}
+
+static long hailo_query_device_properties(struct hailo_pcie_board *board, unsigned long arg)
+{
+ struct hailo_device_properties props = {
+ .desc_max_page_size = board->desc_max_page_size,
+ .allocation_mode = board->allocation_mode,
+ .dma_type = HAILO_DMA_TYPE_PCIE,
+ .dma_engines_count = board->vdma.vdma_engines_count,
+ .is_fw_loaded = hailo_pcie_is_firmware_loaded(&board->pcie_resources),
+ };
+
+ hailo_info(board, "HAILO_QUERY_DEVICE_PROPERTIES: desc_max_page_size=%u\n", props.desc_max_page_size);
+
+ if (copy_to_user((void __user*)arg, &props, sizeof(props))) {
+ hailo_err(board, "HAILO_QUERY_DEVICE_PROPERTIES, copy_to_user failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static long hailo_query_driver_info(struct hailo_pcie_board *board, unsigned long arg)
+{
+ struct hailo_driver_info info = {
+ .major_version = HAILO_DRV_VER_MAJOR,
+ .minor_version = HAILO_DRV_VER_MINOR,
+ .revision_version = HAILO_DRV_VER_REVISION
+ };
+
+ hailo_info(board, "HAILO_QUERY_DRIVER_INFO: major=%u, minor=%u, revision=%u\n",
+ info.major_version, info.minor_version, info.revision_version);
+
+ if (copy_to_user((void __user*)arg, &info, sizeof(info))) {
+ hailo_err(board, "HAILO_QUERY_DRIVER_INFO, copy_to_user failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static long hailo_general_ioctl(struct hailo_file_context *context, struct hailo_pcie_board *board,
+ unsigned int cmd, unsigned long arg, struct file *filp, bool *should_up_board_mutex)
+{
+ switch (cmd) {
+ case HAILO_MEMORY_TRANSFER:
+ return hailo_memory_transfer_ioctl(board, arg);
+ case HAILO_FW_CONTROL:
+ return hailo_fw_control(board, arg, should_up_board_mutex);
+ case HAILO_READ_NOTIFICATION:
+ return hailo_read_notification_ioctl(board, arg, filp, should_up_board_mutex);
+ case HAILO_DISABLE_NOTIFICATION:
+ return hailo_disable_notification(board, filp);
+ case HAILO_QUERY_DEVICE_PROPERTIES:
+ return hailo_query_device_properties(board, arg);
+ case HAILO_QUERY_DRIVER_INFO:
+ return hailo_query_driver_info(board, arg);
+ case HAILO_READ_LOG:
+ return hailo_read_log_ioctl(board, arg);
+ default:
+ hailo_err(board, "Invalid general ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
+ return -ENOTTY;
+ }
+}
+
+long hailo_pcie_fops_unlockedioctl(struct file* filp, unsigned int cmd, unsigned long arg)
+{
+ long err = 0;
+ struct hailo_pcie_board* board = (struct hailo_pcie_board*) filp->private_data;
+ struct hailo_file_context *context = NULL;
+ bool should_up_board_mutex = true;
+
+
+ if (!board || !board->pDev) return -ENODEV;
+
+ hailo_dbg(board, "(%d): fops_unlockedioctl. cmd:%d\n", current->tgid, _IOC_NR(cmd));
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ {
+ err = !compatible_access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
+ }
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ {
+ err = !compatible_access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
+ }
+
+ if (err) {
+ hailo_err(board, "Invalid ioctl parameter access 0x%x", cmd);
+ return -EFAULT;
+ }
+
+ if (down_interruptible(&board->mutex)) {
+ hailo_err(board, "unlockedioctl down_interruptible failed");
+ return -ERESTARTSYS;
+ }
+ BUG_ON(board->mutex.count != 0);
+
+ context = find_file_context(board, filp);
+ if (NULL == context) {
+ hailo_err(board, "Invalid driver state, file context does not exist\n");
+ up(&board->mutex);
+ return -EINVAL;
+ }
+
+ if (false == context->is_valid) {
+ hailo_err(board, "Invalid file context\n");
+ up(&board->mutex);
+ return -EINVAL;
+ }
+
+ switch (_IOC_TYPE(cmd)) {
+ case HAILO_GENERAL_IOCTL_MAGIC:
+ err = hailo_general_ioctl(context, board, cmd, arg, filp, &should_up_board_mutex);
+ break;
+ case HAILO_VDMA_IOCTL_MAGIC:
+ err = hailo_vdma_ioctl(&context->vdma_context, &board->vdma, cmd, arg, filp, &board->mutex,
+ &should_up_board_mutex);
+ break;
+ default:
+ hailo_err(board, "Invalid ioctl type %d\n", _IOC_TYPE(cmd));
+ err = -ENOTTY;
+ }
+
+ if (should_up_board_mutex) {
+ up(&board->mutex);
+ }
+
+ hailo_dbg(board, "(%d): fops_unlockedioct: SUCCESS\n", current->tgid);
+ return err;
+
+}
+
+int hailo_pcie_fops_mmap(struct file* filp, struct vm_area_struct *vma)
+{
+ int err = 0;
+
+ uintptr_t vdma_handle = vma->vm_pgoff << PAGE_SHIFT;
+
+ struct hailo_pcie_board* board = (struct hailo_pcie_board*)filp->private_data;
+ struct hailo_file_context *context = NULL;
+
+ BUILD_BUG_ON_MSG(sizeof(vma->vm_pgoff) < sizeof(vdma_handle),
+ "If this expression fails to compile it means the target HW is not compatible with our approach to use "
+ "the page offset paramter of 'mmap' to pass the driver the 'handle' of the desired descriptor");
+
+ vma->vm_pgoff = 0; // vm_pgoff contains vdma_handle page offset, the actual offset from the phys addr is 0
+
+ hailo_info(board, "%d fops_mmap\n", current->tgid);
+
+ if (!board || !board->pDev) return -ENODEV;
+
+ if (down_interruptible(&board->mutex)) {
+ hailo_err(board, "hailo_pcie_fops_mmap down_interruptible fail tgid:%d\n", current->tgid);
+ return -ERESTARTSYS;
+ }
+
+ context = find_file_context(board, filp);
+ if (NULL == context) {
+ up(&board->mutex);
+ hailo_err(board, "Invalid driver state, file context does not exist\n");
+ return -EINVAL;
+ }
+
+ if (false == context->is_valid) {
+ up(&board->mutex);
+ hailo_err(board, "Invalid file context\n");
+ return -EINVAL;
+ }
+
+ err = hailo_vdma_mmap(&context->vdma_context, &board->vdma, vma, vdma_handle);
+ up(&board->mutex);
+ return err;
+}
--- /dev/null
+++ b/drivers/media/pci/hailo/src/fops.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_PCI_FOPS_H_
+#define _HAILO_PCI_FOPS_H_
+
+int hailo_pcie_fops_open(struct inode* inode, struct file* filp);
+int hailo_pcie_fops_release(struct inode* inode, struct file* filp);
+long hailo_pcie_fops_unlockedioctl(struct file* filp, unsigned int cmd, unsigned long arg);
+int hailo_pcie_fops_mmap(struct file* filp, struct vm_area_struct *vma);
+int hailo_pcie_driver_down(struct hailo_pcie_board *board);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+irqreturn_t hailo_irqhandler(int irq, void* dev_id, struct pt_regs *regs);
+#else
+irqreturn_t hailo_irqhandler(int irq, void* dev_id);
+#endif
+
+#endif /* _HAILO_PCI_FOPS_H_ */
--- /dev/null
+++ b/drivers/media/pci/hailo/src/pcie.c
@@ -0,0 +1,1012 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/firmware.h>
+#include <linux/kthread.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+#include <linux/dma-direct.h>
+#endif
+
+#define KERNEL_CODE 1
+
+#include "hailo_pcie_version.h"
+#include "hailo_ioctl_common.h"
+#include "pcie.h"
+#include "fops.h"
+#include "sysfs.h"
+#include "utils/logs.h"
+#include "utils/compact.h"
+#include "vdma/vdma.h"
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION( 5, 4, 0 )
+#include <linux/pci-aspm.h>
+#endif
+
+// enum that represents values for the driver parameter to either force buffer from driver , userspace or not force
+// and let driver decide
+enum hailo_allocate_driver_buffer_driver_param {
+ HAILO_NO_FORCE_BUFFER = 0,
+ HAILO_FORCE_BUFFER_FROM_USERSPACE = 1,
+ HAILO_FORCE_BUFFER_FROM_DRIVER = 2,
+};
+
+//Debug flag
+static int force_desc_page_size = 0;
+static bool g_is_power_mode_enabled = true;
+static int force_allocation_from_driver = HAILO_NO_FORCE_BUFFER;
+
+#define DEVICE_NODE_NAME "hailo"
+static int char_major = 0;
+static struct class *chardev_class;
+
+static LIST_HEAD(g_hailo_board_list);
+static struct semaphore g_hailo_add_board_mutex = __SEMAPHORE_INITIALIZER(g_hailo_add_board_mutex, 1);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
+#define HAILO_IRQ_FLAGS (SA_SHIRQ | SA_INTERRUPT)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+#define HAILO_IRQ_FLAGS (IRQF_SHARED | IRQF_DISABLED)
+#else
+#define HAILO_IRQ_FLAGS (IRQF_SHARED)
+#endif
+
+ /* ****************************
+ ******************************* */
+bool power_mode_enabled(void)
+{
+#if !defined(HAILO_EMULATOR)
+ return g_is_power_mode_enabled;
+#else /* !defined(HAILO_EMULATOR) */
+ return false;
+#endif /* !defined(HAILO_EMULATOR) */
+}
+
+
+/**
+ * Due to an HW bug, on system with low MaxReadReq ( < 512) we need to use different descriptors size.
+ * Returns the max descriptor size or 0 on failure.
+ */
+static int hailo_get_desc_page_size(struct pci_dev *pdev, u32 *out_page_size)
+{
+ u16 pcie_device_control = 0;
+ int err = 0;
+ // The default page size must be smaller/equal to 32K (due to PLDA registers limit).
+ const u32 max_page_size = 32u * 1024u;
+ const u32 defualt_page_size = min((u32)PAGE_SIZE, max_page_size);
+
+ if (force_desc_page_size != 0) {
+ // The user given desc_page_size as a module parameter
+ if ((force_desc_page_size & (force_desc_page_size - 1)) != 0) {
+ pci_err(pdev, "force_desc_page_size must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ if (force_desc_page_size > max_page_size) {
+ pci_err(pdev, "force_desc_page_size %d mustn't be larger than %u", force_desc_page_size, max_page_size);
+ return -EINVAL;
+ }
+
+ pci_notice(pdev, "Probing: Force setting max_desc_page_size to %d (recommended value is %lu)\n",
+ force_desc_page_size, PAGE_SIZE);
+ *out_page_size = force_desc_page_size;
+ return 0;
+ }
+
+ err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_device_control);
+ if (err < 0) {
+ pci_err(pdev, "Couldn't read DEVCTL capability\n");
+ return err;
+ }
+
+ switch (pcie_device_control & PCI_EXP_DEVCTL_READRQ) {
+ case PCI_EXP_DEVCTL_READRQ_128B:
+ pci_notice(pdev, "Probing: Setting max_desc_page_size to 128 (recommended value is %u)\n", defualt_page_size);
+ *out_page_size = 128;
+ return 0;
+ case PCI_EXP_DEVCTL_READRQ_256B:
+ pci_notice(pdev, "Probing: Setting max_desc_page_size to 256 (recommended value is %u)\n", defualt_page_size);
+ *out_page_size = 256;
+ return 0;
+ default:
+ pci_notice(pdev, "Probing: Setting max_desc_page_size to %u, (page_size=%lu)\n", defualt_page_size, PAGE_SIZE);
+ *out_page_size = defualt_page_size;
+ return 0;
+ };
+}
+
+// should be called only from fops_open (once)
+struct hailo_pcie_board* hailo_pcie_get_board_index(u32 index)
+{
+ struct hailo_pcie_board *pBoard, *pRet = NULL;
+
+ down(&g_hailo_add_board_mutex);
+ list_for_each_entry(pBoard, &g_hailo_board_list, board_list)
+ {
+ if ( index == pBoard->board_index )
+ {
+ atomic_inc(&pBoard->ref_count);
+ pRet = pBoard;
+ break;
+ }
+ }
+ up(&g_hailo_add_board_mutex);
+
+ return pRet;
+}
+
+/**
+ * hailo_pcie_disable_aspm - Disable ASPM states
+ * @board: pointer to PCI board struct
+ * @state: bit-mask of ASPM states to disable
+ * @locked: indication if this context holds pci_bus_sem locked.
+ *
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static int hailo_pcie_disable_aspm(struct hailo_pcie_board *board, u16 state, bool locked)
+{
+ struct pci_dev *pdev = board->pDev;
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_dis_mask = 0;
+ u16 pdev_aspmc = 0;
+ u16 parent_aspmc = 0;
+ int err = 0;
+
+ switch (state) {
+ case PCIE_LINK_STATE_L0S:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
+ break;
+ case PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
+ break;
+ default:
+ break;
+ }
+
+ err = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ if (err < 0) {
+ hailo_err(board, "Couldn't read LNKCTL capability\n");
+ return err;
+ }
+
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (parent) {
+ err = pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_aspmc);
+ if (err < 0) {
+ hailo_err(board, "Couldn't read slot LNKCTL capability\n");
+ return err;
+ }
+ parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ hailo_notice(board, "Disabling ASPM %s %s\n",
+ (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+ (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+ // Disable L0s even if it is currently disabled as ASPM states can be enabled by the kernel when changing power modes
+#ifdef CONFIG_PCIEASPM
+ if (locked) {
+ // Older kernel versions (<5.2.21) don't return value for this functions, so we try manual disabling anyway
+ (void)pci_disable_link_state_locked(pdev, state);
+ } else {
+ (void)pci_disable_link_state(pdev, state);
+ }
+
+ /* Double-check ASPM control. If not disabled by the above, the
+ * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
+ * not enabled); override by writing PCI config space directly.
+ */
+ err = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ if (err < 0) {
+ hailo_err(board, "Couldn't read LNKCTL capability\n");
+ return err;
+ }
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (!(aspm_dis_mask & pdev_aspmc)) {
+ hailo_notice(board, "Successfully disabled ASPM %s %s\n",
+ (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+ (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+ return 0;
+ }
+#endif
+
+ /* Both device and parent should have the same ASPM setting.
+ * Disable ASPM in downstream component first and then upstream.
+ */
+ err = pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
+ if (err < 0) {
+ hailo_err(board, "Couldn't read LNKCTL capability\n");
+ return err;
+ }
+ if (parent) {
+ err = pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, aspm_dis_mask);
+ if (err < 0) {
+ hailo_err(board, "Couldn't read slot LNKCTL capability\n");
+ return err;
+ }
+ }
+ hailo_notice(board, "Manually disabled ASPM %s %s\n",
+ (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+ (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+ return 0;
+}
+
+static void hailo_pcie_insert_board(struct hailo_pcie_board* pBoard)
+{
+ u32 index = 0;
+ struct hailo_pcie_board *pCurrent, *pNext;
+
+
+ down(&g_hailo_add_board_mutex);
+ if ( list_empty(&g_hailo_board_list) ||
+ list_first_entry(&g_hailo_board_list, struct hailo_pcie_board, board_list)->board_index > 0)
+ {
+ pBoard->board_index = 0;
+ list_add(&pBoard->board_list, &g_hailo_board_list);
+
+ up(&g_hailo_add_board_mutex);
+ return;
+ }
+
+ list_for_each_entry_safe(pCurrent, pNext, &g_hailo_board_list, board_list)
+ {
+ index = pCurrent->board_index+1;
+ if( list_is_last(&pCurrent->board_list, &g_hailo_board_list) || (index != pNext->board_index))
+ {
+ break;
+ }
+ }
+
+ pBoard->board_index = index;
+ list_add(&pBoard->board_list, &pCurrent->board_list);
+
+ up(&g_hailo_add_board_mutex);
+
+ return;
+}
+
+static void hailo_pcie_remove_board(struct hailo_pcie_board* pBoard)
+{
+ down(&g_hailo_add_board_mutex);
+ if (pBoard)
+ {
+ list_del(&pBoard->board_list);
+ }
+ up(&g_hailo_add_board_mutex);
+}
+
+static int hailo_write_config(struct hailo_pcie_resources *resources, struct device *dev,
+ const struct hailo_config_constants *config_consts)
+{
+ const struct firmware *config = NULL;
+ int err = 0;
+
+ if (NULL == config_consts->filename) {
+ // Config not supported for platform
+ return 0;
+ }
+
+ err = request_firmware_direct(&config, config_consts->filename, dev);
+ if (err < 0) {
+ hailo_dev_info(dev, "Config %s not found\n", config_consts->filename);
+ return 0;
+ }
+
+ hailo_dev_notice(dev, "Writing config %s\n", config_consts->filename);
+
+ err = hailo_pcie_write_config_common(resources, config->data, config->size, config_consts);
+ if (err < 0) {
+ if (-EINVAL == err) {
+ hailo_dev_warn(dev, "Config size %zu is bigger than max %zu\n", config->size, config_consts->max_size);
+ }
+ release_firmware(config);
+ return err;
+ }
+
+ release_firmware(config);
+ return 0;
+}
+
+static bool wait_for_firmware_completion(struct completion *fw_load_completion)
+{
+ return (0 != wait_for_completion_timeout(fw_load_completion, FIRMWARE_WAIT_TIMEOUT_MS));
+}
+
+static int hailo_load_firmware(struct hailo_pcie_resources *resources,
+ struct device *dev, struct completion *fw_load_completion)
+{
+ const struct firmware *firmware = NULL;
+ int err = 0;
+
+ if (hailo_pcie_is_firmware_loaded(resources)) {
+ hailo_dev_warn(dev, "Firmware was already loaded\n");
+ return 0;
+ }
+
+ reinit_completion(fw_load_completion);
+
+ err = hailo_write_config(resources, dev, hailo_pcie_get_board_config_constants(resources->board_type));
+ if (err < 0) {
+ hailo_dev_err(dev, "Failed writing board config");
+ return err;
+ }
+
+ err = hailo_write_config(resources, dev, hailo_pcie_get_user_config_constants(resources->board_type));
+ if (err < 0) {
+ hailo_dev_err(dev, "Failed writing fw config");
+ return err;
+ }
+
+ // read firmware file
+ err = request_firmware_direct(&firmware, hailo_pcie_get_fw_filename(resources->board_type), dev);
+ if (err < 0) {
+ hailo_dev_warn(dev, "Firmware file not found (/lib/firmware/%s), please upload the firmware manually \n",
+ hailo_pcie_get_fw_filename(resources->board_type));
+ return 0;
+ }
+
+ err = hailo_pcie_write_firmware(resources, firmware->data, firmware->size);
+ if (err < 0) {
+ hailo_dev_err(dev, "Failed writing firmware. err %d\n", err);
+ release_firmware(firmware);
+ return err;
+ }
+
+ release_firmware(firmware);
+
+ if (!wait_for_firmware_completion(fw_load_completion)) {
+ hailo_dev_err(dev, "Timeout waiting for firmware..\n");
+ return -ETIMEDOUT;
+ }
+
+ hailo_dev_notice(dev, "Firmware was loaded successfully\n");
+ return 0;
+}
+
+static int hailo_activate_board(struct hailo_pcie_board *board)
+{
+ int err = 0;
+
+ (void)hailo_pcie_disable_aspm(board, PCIE_LINK_STATE_L0S, false);
+
+ err = hailo_enable_interrupts(board);
+ if (err < 0) {
+ hailo_err(board, "Failed Enabling interrupts %d\n", err);
+ return err;
+ }
+
+ err = hailo_load_firmware(&board->pcie_resources, &board->pDev->dev,
+ &board->fw_loaded_completion);
+ if (err < 0) {
+ hailo_err(board, "Firmware load failed\n");
+ hailo_disable_interrupts(board);
+ return err;
+ }
+
+ hailo_disable_interrupts(board);
+
+ if (power_mode_enabled()) {
+ // Setting the device to low power state, until the user opens the device
+ err = pci_set_power_state(board->pDev, PCI_D3hot);
+ if (err < 0) {
+ hailo_err(board, "Set power state failed %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int hailo_enable_interrupts(struct hailo_pcie_board *board)
+{
+ int err = 0;
+
+ if (board->interrupts_enabled) {
+ hailo_crit(board, "Failed enabling interrupts (already enabled)\n");
+ return -EINVAL;
+ }
+
+ // TODO HRT-2253: use new api for enabling msi: (pci_alloc_irq_vectors)
+ if ((err = pci_enable_msi(board->pDev))) {
+ hailo_err(board, "Failed to enable MSI %d\n", err);
+ return err;
+ }
+ hailo_info(board, "Enabled MSI interrupt\n");
+
+ err = request_irq(board->pDev->irq, hailo_irqhandler, HAILO_IRQ_FLAGS, DRIVER_NAME, board);
+ if (err) {
+ hailo_err(board, "request_irq failed %d\n", err);
+ pci_disable_msi(board->pDev);
+ return err;
+ }
+ hailo_info(board, "irq enabled %u\n", board->pDev->irq);
+
+ hailo_pcie_enable_interrupts(&board->pcie_resources);
+
+ board->interrupts_enabled = true;
+ return 0;
+}
+
+void hailo_disable_interrupts(struct hailo_pcie_board *board)
+{
+ // Sanity Check
+ if ((NULL == board) || (NULL == board->pDev)) {
+ pr_err("Failed to access board or device\n");
+ return;
+ }
+
+ if (!board->interrupts_enabled) {
+ return;
+ }
+
+ board->interrupts_enabled = false;
+ hailo_pcie_disable_interrupts(&board->pcie_resources);
+ free_irq(board->pDev->irq, board);
+ pci_disable_msi(board->pDev);
+}
+
+static int hailo_bar_iomap(struct pci_dev *pdev, int bar, struct hailo_resource *resource)
+{
+ resource->size = pci_resource_len(pdev, bar);
+ resource->address = (uintptr_t)(pci_iomap(pdev, bar, resource->size));
+
+ if (!resource->size || !resource->address) {
+ pci_err(pdev, "Probing: Invalid PCIe BAR %d", bar);
+ return -EINVAL;
+ }
+
+ pci_notice(pdev, "Probing: mapped bar %d - %p %zu\n", bar,
+ (void*)resource->address, resource->size);
+ return 0;
+}
+
+static void hailo_bar_iounmap(struct pci_dev *pdev, struct hailo_resource *resource)
+{
+ if (resource->address) {
+ pci_iounmap(pdev, (void*)resource->address);
+ resource->address = 0;
+ resource->size = 0;
+ }
+}
+
+static int pcie_resources_init(struct pci_dev *pdev, struct hailo_pcie_resources *resources,
+ enum hailo_board_type board_type)
+{
+ int err = -EINVAL;
+ if (board_type >= HAILO_BOARD_TYPE_COUNT) {
+ pci_err(pdev, "Probing: Invalid board type %d\n", (int)board_type);
+ err = -EINVAL;
+ goto failure_exit;
+ }
+
+ err = pci_request_regions(pdev, DRIVER_NAME);
+ if (err < 0) {
+ pci_err(pdev, "Probing: Error allocating bars %d\n", err);
+ goto failure_exit;
+ }
+
+ err = hailo_bar_iomap(pdev, HAILO_PCIE_CONFIG_BAR, &resources->config);
+ if (err < 0) {
+ goto failure_release_regions;
+ }
+
+ err = hailo_bar_iomap(pdev, HAILO_PCIE_VDMA_REGS_BAR, &resources->vdma_registers);
+ if (err < 0) {
+ goto failure_release_config;
+ }
+
+ err = hailo_bar_iomap(pdev, HAILO_PCIE_FW_ACCESS_BAR, &resources->fw_access);
+ if (err < 0) {
+ goto failure_release_vdma_regs;
+ }
+
+ resources->board_type = board_type;
+
+ if (!hailo_pcie_is_device_connected(resources)) {
+ pci_err(pdev, "Probing: Failed reading device BARs, device may be disconnected\n");
+ err = -ENODEV;
+ goto failure_release_fw_access;
+ }
+
+ return 0;
+
+failure_release_fw_access:
+ hailo_bar_iounmap(pdev, &resources->fw_access);
+failure_release_vdma_regs:
+ hailo_bar_iounmap(pdev, &resources->vdma_registers);
+failure_release_config:
+ hailo_bar_iounmap(pdev, &resources->config);
+failure_release_regions:
+ pci_release_regions(pdev);
+failure_exit:
+ return err;
+}
+
+static void pcie_resources_release(struct pci_dev *pdev, struct hailo_pcie_resources *resources)
+{
+ hailo_bar_iounmap(pdev, &resources->config);
+ hailo_bar_iounmap(pdev, &resources->vdma_registers);
+ hailo_bar_iounmap(pdev, &resources->fw_access);
+ pci_release_regions(pdev);
+}
+
+static void update_channel_interrupts(struct hailo_vdma_controller *controller,
+ size_t engine_index, u32 channels_bitmap)
+{
+ struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(controller->dev);
+ if (engine_index >= board->vdma.vdma_engines_count) {
+ hailo_err(board, "Invalid engine index %zu", engine_index);
+ return;
+ }
+
+ hailo_pcie_update_channel_interrupts_mask(&board->pcie_resources, channels_bitmap);
+}
+
+static struct hailo_vdma_controller_ops pcie_vdma_controller_ops = {
+ .update_channel_interrupts = update_channel_interrupts,
+};
+
+
+static int hailo_pcie_vdma_controller_init(struct hailo_vdma_controller *controller,
+ struct device *dev, struct hailo_resource *vdma_registers)
+{
+ const size_t engines_count = 1;
+ return hailo_vdma_controller_init(controller, dev, &hailo_pcie_vdma_hw,
+ &pcie_vdma_controller_ops, vdma_registers, engines_count);
+}
+
+// Tries to check if address allocated with kmalloc is dma capable.
+// If kmalloc address is not dma capable we assume other addresses
+// won't be dma capable as well.
+static bool is_kmalloc_dma_capable(struct device *dev)
+{
+ void *check_addr = NULL;
+ dma_addr_t dma_addr = 0;
+ phys_addr_t phys_addr = 0;
+ bool capable = false;
+
+ if (!dev->dma_mask) {
+ return false;
+ }
+
+ check_addr = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (NULL == check_addr) {
+ dev_err(dev, "failed allocating page!\n");
+ return false;
+ }
+
+ phys_addr = virt_to_phys(check_addr);
+ dma_addr = phys_to_dma(dev, phys_addr);
+
+ capable = is_dma_capable(dev, dma_addr, PAGE_SIZE);
+ kfree(check_addr);
+ return capable;
+}
+
+static int hailo_get_allocation_mode(struct pci_dev *pdev, enum hailo_allocation_mode *allocation_mode)
+{
+ // Check if module paramater was given to override driver choice
+ if (HAILO_NO_FORCE_BUFFER != force_allocation_from_driver) {
+ if (HAILO_FORCE_BUFFER_FROM_USERSPACE == force_allocation_from_driver) {
+ *allocation_mode = HAILO_ALLOCATION_MODE_USERSPACE;
+ pci_notice(pdev, "Probing: Using userspace allocated vdma buffers\n");
+ }
+ else if (HAILO_FORCE_BUFFER_FROM_DRIVER == force_allocation_from_driver) {
+ *allocation_mode = HAILO_ALLOCATION_MODE_DRIVER;
+ pci_notice(pdev, "Probing: Using driver allocated vdma buffers\n");
+ }
+ else {
+ pci_err(pdev, "Invalid value for force allocation driver paramater - value given: %d!\n",
+ force_allocation_from_driver);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ if (is_kmalloc_dma_capable(&pdev->dev)) {
+ *allocation_mode = HAILO_ALLOCATION_MODE_USERSPACE;
+ pci_notice(pdev, "Probing: Using userspace allocated vdma buffers\n");
+ } else {
+ *allocation_mode = HAILO_ALLOCATION_MODE_DRIVER;
+ pci_notice(pdev, "Probing: Using driver allocated vdma buffers\n");
+ }
+
+ return 0;
+}
+
+static int hailo_pcie_probe(struct pci_dev* pDev, const struct pci_device_id* id)
+{
+ struct hailo_pcie_board * pBoard;
+ struct device *char_device = NULL;
+ int err = -EINVAL;
+
+ pci_notice(pDev, "Probing on: %04x:%04x...\n", pDev->vendor, pDev->device);
+#ifdef HAILO_EMULATOR
+ pci_notice(pDev, "PCIe driver was compiled in emulator mode\n");
+#endif /* HAILO_EMULATOR */
+ if (!g_is_power_mode_enabled) {
+ pci_notice(pDev, "PCIe driver was compiled with power modes disabled\n");
+ }
+
+ /* Initialize device extension for the board*/
+ pci_notice(pDev, "Probing: Allocate memory for device extension, %zu\n", sizeof(struct hailo_pcie_board));
+ pBoard = (struct hailo_pcie_board*) kzalloc( sizeof(struct hailo_pcie_board), GFP_KERNEL);
+ if (pBoard == NULL)
+ {
+ pci_err(pDev, "Probing: Failed to allocate memory for device extension structure\n");
+ err = -ENOMEM;
+ goto probe_exit;
+ }
+
+ pBoard->pDev = pDev;
+
+ if ( (err = pci_enable_device(pDev)) )
+ {
+ pci_err(pDev, "Probing: Failed calling pci_enable_device %d\n", err);
+ goto probe_free_board;
+ }
+ pci_notice(pDev, "Probing: Device enabled\n");
+
+ pci_set_master(pDev);
+
+ err = pcie_resources_init(pDev, &pBoard->pcie_resources, id->driver_data);
+ if (err < 0) {
+ pci_err(pDev, "Probing: Failed init pcie resources");
+ goto probe_disable_device;
+ }
+
+ err = hailo_get_desc_page_size(pDev, &pBoard->desc_max_page_size);
+ if (err < 0) {
+ goto probe_release_pcie_resources;
+ }
+
+ pBoard->interrupts_enabled = false;
+ init_completion(&pBoard->fw_loaded_completion);
+
+ sema_init(&pBoard->mutex, 1);
+ atomic_set(&pBoard->ref_count, 0);
+ INIT_LIST_HEAD(&pBoard->open_files_list);
+
+ sema_init(&pBoard->fw_control.mutex, 1);
+ spin_lock_init(&pBoard->notification_read_spinlock);
+ init_completion(&pBoard->fw_control.completion);
+
+ init_completion(&pBoard->driver_down.reset_completed);
+
+ INIT_LIST_HEAD(&pBoard->notification_wait_list);
+
+ memset(&pBoard->notification_cache, 0, sizeof(pBoard->notification_cache));
+ memset(&pBoard->memory_transfer_params, 0, sizeof(pBoard->memory_transfer_params));
+
+ err = hailo_pcie_vdma_controller_init(&pBoard->vdma, &pBoard->pDev->dev,
+ &pBoard->pcie_resources.vdma_registers);
+ if (err < 0) {
+ hailo_err(pBoard, "Failed init vdma controller %d\n", err);
+ goto probe_release_pcie_resources;
+ }
+
+ // Checks the dma mask => it must be called after the device's dma_mask is set by hailo_pcie_vdma_controller_init
+ err = hailo_get_allocation_mode(pDev, &pBoard->allocation_mode);
+ if (err < 0) {
+ pci_err(pDev, "Failed determining allocation of buffers from driver. error type: %d\n", err);
+ goto probe_release_pcie_resources;
+ }
+
+ err = hailo_activate_board(pBoard);
+ if (err < 0) {
+ hailo_err(pBoard, "Failed activating board %d\n", err);
+ goto probe_release_pcie_resources;
+ }
+
+ /* Keep track on the device, in order, to be able to remove it later */
+ pci_set_drvdata(pDev, pBoard);
+ hailo_pcie_insert_board(pBoard);
+
+ /* Create dynamically the device node*/
+ char_device = device_create_with_groups(chardev_class, NULL,
+ MKDEV(char_major, pBoard->board_index),
+ pBoard,
+ g_hailo_dev_groups,
+ DEVICE_NODE_NAME"%d", pBoard->board_index);
+ if (IS_ERR(char_device)) {
+ hailo_err(pBoard, "Failed creating dynamic device %d\n", pBoard->board_index);
+ err = PTR_ERR(char_device);
+ goto probe_remove_board;
+ }
+
+ hailo_notice(pBoard, "Probing: Added board %0x-%0x, /dev/hailo%d\n", pDev->vendor, pDev->device, pBoard->board_index);
+
+ return 0;
+
+probe_remove_board:
+ hailo_pcie_remove_board(pBoard);
+
+probe_release_pcie_resources:
+ pcie_resources_release(pBoard->pDev, &pBoard->pcie_resources);
+
+probe_disable_device:
+ pci_disable_device(pDev);
+
+probe_free_board:
+ kfree(pBoard);
+
+probe_exit:
+
+ return err;
+}
+
+static void hailo_pcie_remove(struct pci_dev* pDev)
+{
+ struct hailo_pcie_board* pBoard = (struct hailo_pcie_board*) pci_get_drvdata(pDev);
+ struct hailo_notification_wait *cursor = NULL;
+
+ pci_notice(pDev, "Remove: Releasing board\n");
+
+ if (pBoard)
+ {
+
+ // lock board to wait for any pending operations and for synchronization with open
+ down(&pBoard->mutex);
+
+
+ // remove board from active boards list
+ hailo_pcie_remove_board(pBoard);
+
+
+ /* Delete the device node */
+ device_destroy(chardev_class, MKDEV(char_major, pBoard->board_index));
+
+ // disable interrupts - will only disable if they have not been disabled in release already
+ hailo_disable_interrupts(pBoard);
+
+ pcie_resources_release(pBoard->pDev, &pBoard->pcie_resources);
+
+ // deassociate device from board to be picked up by char device
+ pBoard->pDev = NULL;
+
+ pBoard->vdma.dev = NULL;
+
+ pci_disable_device(pDev);
+
+ pci_set_drvdata(pDev, NULL);
+
+ // Lock rcu_read_lock and send notification_completion to wake anyone waiting on the notification_wait_list when removed
+ rcu_read_lock();
+ list_for_each_entry_rcu(cursor, &pBoard->notification_wait_list, notification_wait_list) {
+ cursor->is_disabled = true;
+ complete(&cursor->notification_completion);
+ }
+ rcu_read_unlock();
+
+ up(&pBoard->mutex);
+
+ if ( 0 == atomic_read(&pBoard->ref_count) )
+ {
+ // nobody has the board open - free
+ pci_notice(pDev, "Remove: Freed board, /dev/hailo%d\n", pBoard->board_index);
+ kfree(pBoard);
+ }
+ else
+ {
+ // board resources are freed on last close
+ pci_notice(pDev, "Remove: Scheduled for board removal, /dev/hailo%d\n", pBoard->board_index);
+ }
+ }
+
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hailo_pcie_suspend(struct device *dev)
+{
+ struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(dev);
+ struct hailo_file_context *cur = NULL;
+ int err = 0;
+
+ // lock board to wait for any pending operations
+ down(&board->mutex);
+
+ // Disable all interrupts. All interrupts from Hailo chip would be masked.
+ hailo_disable_interrupts(board);
+
+ // Close all vDMA channels
+ if (board->vdma.used_by_filp != NULL) {
+ err = hailo_pcie_driver_down(board);
+ if (err < 0) {
+ dev_notice(dev, "Error while trying to call FW to close vdma channels\n");
+ }
+ }
+
+ // Un validate all activae file contexts so every new action would return error to the user.
+ list_for_each_entry(cur, &board->open_files_list, open_files_list) {
+ cur->is_valid = false;
+ }
+
+ // Release board
+ up(&board->mutex);
+
+ dev_notice(dev, "PM's suspend\n");
+ // Continue system suspend
+ return err;
+}
+
+static int hailo_pcie_resume(struct device *dev)
+{
+ struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(dev);
+ int err = 0;
+
+ if ((err = hailo_activate_board(board)) < 0) {
+ dev_err(dev, "Failed activating board %d\n", err);
+ return err;
+ }
+
+ dev_notice(dev, "PM's resume\n");
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(hailo_pcie_pm_ops, hailo_pcie_suspend, hailo_pcie_resume);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 )
+static void hailo_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct hailo_pcie_board* board = (struct hailo_pcie_board*) pci_get_drvdata(pdev);
+ int err = 0;
+ /* Reset preparation logic goes here */
+ pci_err(pdev, "Reset preparation for PCI device \n");
+
+ if (board)
+ {
+ // lock board to wait for any pending operations and for synchronization with open
+ down(&board->mutex);
+ if (board->vdma.used_by_filp != NULL) {
+ // Try to close all vDMA channels before reset
+ err = hailo_pcie_driver_down(board);
+ if (err < 0) {
+ pci_err(pdev, "Error while trying to call FW to close vdma channels (errno %d)\n", err);
+ }
+ }
+ up(&board->mutex);
+ }
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 ) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION( 4, 13, 0 ) && LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 )
+static void hailo_pci_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+ if (prepare) {
+ hailo_pci_reset_prepare(pdev);
+ }
+}
+#endif
+
+static const struct pci_error_handlers hailo_pcie_err_handlers = {
+#if LINUX_VERSION_CODE < KERNEL_VERSION( 3, 16, 0 )
+/* No FLR callback */
+#elif LINUX_VERSION_CODE < KERNEL_VERSION( 4, 13, 0 )
+/* FLR Callback is reset_notify */
+ .reset_notify = hailo_pci_reset_notify,
+#else
+/* FLR Callback is reset_prepare */
+ .reset_prepare = hailo_pci_reset_prepare,
+#endif
+};
+
+static struct pci_device_id hailo_pcie_id_table[] =
+{
+ {PCI_DEVICE_DATA(HAILO, HAILO8, HAILO_BOARD_TYPE_HAILO8)},
+ {PCI_DEVICE_DATA(HAILO, HAILO15, HAILO_BOARD_TYPE_HAILO15)},
+ {PCI_DEVICE_DATA(HAILO, PLUTO, HAILO_BOARD_TYPE_PLUTO)},
+ {0,0,0,0,0,0,0 },
+};
+
+static struct file_operations hailo_pcie_fops =
+{
+ owner: THIS_MODULE,
+ unlocked_ioctl: hailo_pcie_fops_unlockedioctl,
+ mmap: hailo_pcie_fops_mmap,
+ open: hailo_pcie_fops_open,
+ release: hailo_pcie_fops_release
+};
+
+
+static struct pci_driver hailo_pci_driver =
+{
+ name: DRIVER_NAME,
+ id_table: hailo_pcie_id_table,
+ probe: hailo_pcie_probe,
+ remove: hailo_pcie_remove,
+ driver: {
+ pm: &hailo_pcie_pm_ops,
+ },
+ err_handler: &hailo_pcie_err_handlers,
+};
+
+MODULE_DEVICE_TABLE (pci, hailo_pcie_id_table);
+
+static int hailo_pcie_register_chrdev(unsigned int major, const char *name)
+{
+ int char_major;
+
+ char_major = register_chrdev(major, name, &hailo_pcie_fops);
+
+ chardev_class = class_create_compat("hailo_chardev");
+
+ return char_major;
+}
+
+static void hailo_pcie_unregister_chrdev(unsigned int major, const char *name)
+{
+ class_destroy(chardev_class);
+ unregister_chrdev(major, name);
+}
+
+static int __init hailo_pcie_module_init(void)
+{
+ int err;
+
+ pr_notice(DRIVER_NAME ": Init module. driver version %s\n", HAILO_DRV_VER);
+
+ if ( 0 > (char_major = hailo_pcie_register_chrdev(0, DRIVER_NAME)) )
+ {
+ pr_err(DRIVER_NAME ": Init Error, failed to call register_chrdev.\n");
+
+ return char_major;
+ }
+
+ if ( 0 != (err = pci_register_driver(&hailo_pci_driver)))
+ {
+ pr_err(DRIVER_NAME ": Init Error, failed to call pci_register_driver.\n");
+ class_destroy(chardev_class);
+ hailo_pcie_unregister_chrdev(char_major, DRIVER_NAME);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit hailo_pcie_module_exit(void)
+{
+
+ pr_notice(DRIVER_NAME ": Exit module.\n");
+
+ // Unregister the driver from pci bus
+ pci_unregister_driver(&hailo_pci_driver);
+ hailo_pcie_unregister_chrdev(char_major, DRIVER_NAME);
+
+ pr_notice(DRIVER_NAME ": Hailo PCIe driver unloaded.\n");
+}
+
+
+module_init(hailo_pcie_module_init);
+module_exit(hailo_pcie_module_exit);
+
+module_param(o_dbg, int, S_IRUGO | S_IWUSR);
+
+module_param_named(no_power_mode, g_is_power_mode_enabled, invbool, S_IRUGO);
+MODULE_PARM_DESC(no_power_mode, "Disables automatic D0->D3 PCIe transactions");
+
+module_param(force_allocation_from_driver, int, S_IRUGO);
+MODULE_PARM_DESC(force_allocation_from_driver, "Determines whether to force buffer allocation from driver or userspace");
+
+module_param(force_desc_page_size, int, S_IRUGO);
+MODULE_PARM_DESC(force_desc_page_size, "Determines the maximum DMA descriptor page size (must be a power of 2)");
+
+MODULE_AUTHOR("Hailo Technologies Ltd.");
+MODULE_DESCRIPTION("Hailo PCIe driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(HAILO_DRV_VER);
+
--- /dev/null
+++ b/drivers/media/pci/hailo/src/pcie.h
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_PCI_PCIE_H_
+#define _HAILO_PCI_PCIE_H_
+
+#include "vdma/vdma.h"
+#include "hailo_ioctl_common.h"
+#include "pcie_common.h"
+#include "utils/fw_common.h"
+
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+
+#include <linux/ioctl.h>
+
+struct hailo_fw_control_info {
+ // protects that only one fw control will be send at a time
+ struct semaphore mutex;
+ // called from the interrupt handler to notify that a response is ready
+ struct completion completion;
+ // the command we are currently handling
+ struct hailo_fw_control command;
+};
+
+struct hailo_pcie_driver_down_info {
+ // called from the interrupt handler to notify that FW completed reset
+ struct completion reset_completed;
+};
+
+struct hailo_fw_boot {
+ // the filp that enabled interrupts for fw boot. the interrupt is enabled if this is not null
+ struct file *filp;
+ // called from the interrupt handler to notify that an interrupt was raised
+ struct completion completion;
+};
+
+
+// Context for each open file handle
+// TODO: store board and use as actual context
+struct hailo_file_context {
+ struct list_head open_files_list;
+ struct file *filp;
+ struct hailo_vdma_file_context vdma_context;
+ bool is_valid;
+};
+
+struct hailo_pcie_board {
+ struct list_head board_list;
+ struct pci_dev *pDev;
+ u32 board_index;
+ atomic_t ref_count;
+ struct list_head open_files_list;
+ struct hailo_pcie_resources pcie_resources;
+ struct hailo_fw_control_info fw_control;
+ struct hailo_pcie_driver_down_info driver_down;
+ struct semaphore mutex;
+ struct hailo_vdma_controller vdma;
+ spinlock_t notification_read_spinlock;
+ struct list_head notification_wait_list;
+ struct hailo_d2h_notification notification_cache;
+ struct hailo_d2h_notification notification_to_user;
+ struct hailo_memory_transfer_params memory_transfer_params;
+ u32 desc_max_page_size;
+ enum hailo_allocation_mode allocation_mode;
+ struct completion fw_loaded_completion;
+ bool interrupts_enabled;
+};
+
+bool power_mode_enabled(void);
+
+struct hailo_pcie_board* hailo_pcie_get_board_index(u32 index);
+void hailo_disable_interrupts(struct hailo_pcie_board *board);
+int hailo_enable_interrupts(struct hailo_pcie_board *board);
+
+#endif /* _HAILO_PCI_PCIE_H_ */
+
--- /dev/null
+++ b/drivers/media/pci/hailo/src/sysfs.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include "sysfs.h"
+#include "pcie.h"
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+static ssize_t board_location_show(struct device *dev, struct device_attribute *_attr,
+ char *buf)
+{
+ struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_get_drvdata(dev);
+ const char *dev_info = pci_name(board->pDev);
+ return sprintf(buf, "%s", dev_info);
+}
+static DEVICE_ATTR_RO(board_location);
+
+static ssize_t device_id_show(struct device *dev, struct device_attribute *_attr,
+ char *buf)
+{
+ struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_get_drvdata(dev);
+ return sprintf(buf, "%x:%x", board->pDev->vendor, board->pDev->device);
+}
+static DEVICE_ATTR_RO(device_id);
+
+static struct attribute *hailo_dev_attrs[] = {
+ &dev_attr_board_location.attr,
+ &dev_attr_device_id.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(hailo_dev);
+const struct attribute_group **g_hailo_dev_groups = hailo_dev_groups;
--- /dev/null
+++ b/drivers/media/pci/hailo/src/sysfs.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_PCI_SYSFS_H_
+#define _HAILO_PCI_SYSFS_H_
+
+#include <linux/sysfs.h>
+
+extern const struct attribute_group **g_hailo_dev_groups;
+
+#endif /* _HAILO_PCI_SYSFS_H_ */
--- /dev/null
+++ b/drivers/media/pci/hailo/src/utils.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "hailo_pcie_version.h"
+#include "pcie.h"
+#include "utils.h"
+#include "utils/logs.h"
+
+
+void hailo_pcie_clear_notification_wait_list(struct hailo_pcie_board *pBoard, struct file *filp)
+{
+ struct hailo_notification_wait *cur = NULL, *next = NULL;
+ list_for_each_entry_safe(cur, next, &pBoard->notification_wait_list, notification_wait_list) {
+ if (cur->filp == filp) {
+ list_del_rcu(&cur->notification_wait_list);
+ synchronize_rcu();
+ kfree(cur);
+ }
+ }
+}
--- /dev/null
+++ b/drivers/media/pci/hailo/src/utils.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_PCI_UTILS_H_
+#define _HAILO_PCI_UTILS_H_
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+
+#include "pcie.h"
+
+void hailo_pcie_clear_notification_wait_list(struct hailo_pcie_board *pBoard, struct file *filp);
+
+#endif /* _HAILO_PCI_UTILS_H_ */
--- /dev/null
+++ b/drivers/media/pci/hailo/utils/compact.h
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_PCI_COMPACT_H_
+#define _HAILO_PCI_COMPACT_H_
+
+#include <linux/version.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)
+#define class_create_compat class_create
+#else
+#define class_create_compat(name) class_create(THIS_MODULE, name)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
+#define pci_printk(level, pdev, fmt, arg...) \
+ dev_printk(level, &(pdev)->dev, fmt, ##arg)
+#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
+#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
+#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
+#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
+#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
+#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
+#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
+#define get_user_pages_compact get_user_pages
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+#define get_user_pages_compact(start, nr_pages, gup_flags, pages) \
+ get_user_pages(start, nr_pages, gup_flags, pages, NULL)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+#define get_user_pages_compact(start, nr_pages, gup_flags, pages) \
+ get_user_pages(current, current->mm, start, nr_pages, gup_flags, pages, NULL)
+#else
+static inline long get_user_pages_compact(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages)
+{
+ int write = !!((gup_flags & FOLL_WRITE) == FOLL_WRITE);
+ int force = !!((gup_flags & FOLL_FORCE) == FOLL_FORCE);
+ return get_user_pages(current, current->mm, start, nr_pages, write, force,
+ pages, NULL);
+}
+#endif
+
+#ifndef _LINUX_MMAP_LOCK_H
+static inline void mmap_read_lock(struct mm_struct *mm)
+{
+ down_read(&mm->mmap_sem);
+}
+
+static inline void mmap_read_unlock(struct mm_struct *mm)
+{
+ up_read(&mm->mmap_sem);
+}
+#endif /* _LINUX_MMAP_LOCK_H */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)
+#define sg_alloc_table_from_pages_segment_compat __sg_alloc_table_from_pages
+#else
+static inline struct scatterlist *sg_alloc_table_from_pages_segment_compat(struct sg_table *sgt,
+ struct page **pages, unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ struct scatterlist *prv, unsigned int left_pages,
+ gfp_t gfp_mask)
+{
+ int res = 0;
+
+ if (NULL != prv) {
+ // prv not suported
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (0 != left_pages) {
+ // Left pages not supported
+ return ERR_PTR(-EINVAL);
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
+ res = sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, size, max_segment, gfp_mask);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ res = __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, max_segment, gfp_mask);
+#else
+ res = sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, gfp_mask);
+#endif
+ if (res < 0) {
+ return ERR_PTR(res);
+ }
+
+ return sgt->sgl;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION( 5, 0, 0 )
+#define compatible_access_ok(a,b,c) access_ok(b, c)
+#else
+#define compatible_access_ok(a,b,c) access_ok(a, b, c)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+#define PCI_DEVICE_DATA(vend, dev, data) \
+ .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+ .driver_data = (kernel_ulong_t)(data)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
+// On kernels < 4.1.12, kvmalloc, kvfree is not implemented. For simplicity, instead of implement our own
+// kvmalloc/kvfree, just using vmalloc and vfree (It may reduce allocate/access performance, but it worth it).
+static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ (void)flags; //ignore
+ return vmalloc(n * size);
+}
+
+#define kvfree vfree
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
+static inline bool is_dma_capable(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+// Case for Rasberry Pie kernel versions 5.4.83 <=> 5.5.0 - already changed bus_dma_mask -> bus_dma_limit
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (defined(HAILO_RASBERRY_PIE) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 83))
+ const u64 bus_dma_limit = dev->bus_dma_limit;
+#else
+ const u64 bus_dma_limit = dev->bus_dma_mask;
+#endif
+
+ return (dma_addr <= min_not_zero(*dev->dma_mask, bus_dma_limit));
+}
+#else
+static inline bool is_dma_capable(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ // Implementation of dma_capable from linux kernel
+ const u64 bus_dma_limit = (*dev->dma_mask + 1) & ~(*dev->dma_mask);
+ if (bus_dma_limit && size > bus_dma_limit) {
+ return false;
+ }
+
+ if ((dma_addr | (dma_addr + size - 1)) & ~(*dev->dma_mask)) {
+ return false;
+ }
+
+ return true;
+}
+#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
+
+#endif /* _HAILO_PCI_COMPACT_H_ */
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/utils/fw_common.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_LINUX_COMMON_H_
+#define _HAILO_LINUX_COMMON_H_
+
+#include "hailo_ioctl_common.h"
+
+struct hailo_notification_wait {
+ struct list_head notification_wait_list;
+ int tgid;
+ struct file* filp;
+ struct completion notification_completion;
+ bool is_disabled;
+};
+
+#endif /* _HAILO_LINUX_COMMON_H_ */
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/utils/logs.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include "logs.h"
+
+int o_dbg = LOGLEVEL_NOTICE;
--- /dev/null
+++ b/drivers/media/pci/hailo/utils/logs.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _COMMON_LOGS_H_
+#define _COMMON_LOGS_H_
+
+#include <linux/kern_levels.h>
+
+// Should be used only by "module_param".
+// Specify the current debug level for the logs
+extern int o_dbg;
+
+
+// Logging, same interface as dev_*, uses o_dbg to filter
+// log messages
+#define hailo_printk(level, dev, fmt, ...) \
+ do { \
+ int __level = (level[1] - '0'); \
+ if (__level <= o_dbg) { \
+ dev_printk((level), dev, fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define hailo_emerg(board, fmt, ...) hailo_printk(KERN_EMERG, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
+#define hailo_alert(board, fmt, ...) hailo_printk(KERN_ALERT, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
+#define hailo_crit(board, fmt, ...) hailo_printk(KERN_CRIT, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
+#define hailo_err(board, fmt, ...) hailo_printk(KERN_ERR, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
+#define hailo_warn(board, fmt, ...) hailo_printk(KERN_WARNING, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
+#define hailo_notice(board, fmt, ...) hailo_printk(KERN_NOTICE, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
+#define hailo_info(board, fmt, ...) hailo_printk(KERN_INFO, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
+#define hailo_dbg(board, fmt, ...) hailo_printk(KERN_DEBUG, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
+
+#define hailo_dev_emerg(dev, fmt, ...) hailo_printk(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
+#define hailo_dev_alert(dev, fmt, ...) hailo_printk(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
+#define hailo_dev_crit(dev, fmt, ...) hailo_printk(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
+#define hailo_dev_err(dev, fmt, ...) hailo_printk(KERN_ERR, dev, fmt, ##__VA_ARGS__)
+#define hailo_dev_warn(dev, fmt, ...) hailo_printk(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
+#define hailo_dev_notice(dev, fmt, ...) hailo_printk(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
+#define hailo_dev_info(dev, fmt, ...) hailo_printk(KERN_INFO, dev, fmt, ##__VA_ARGS__)
+#define hailo_dev_dbg(dev, fmt, ...) hailo_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__)
+
+
+#endif //_COMMON_LOGS_H_
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/vdma/ioctl.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#include "ioctl.h"
+#include "memory.h"
+#include "utils/logs.h"
+#include "utils.h"
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+
+long hailo_vdma_interrupts_enable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
+{
+ struct hailo_vdma_interrupts_enable_params input;
+ struct hailo_vdma_engine *engine = NULL;
+ u8 engine_index = 0;
+ u32 channels_bitmap = 0;
+
+ if (copy_from_user(&input, (void *)arg, sizeof(input))) {
+ hailo_dev_err(controller->dev, "copy_from_user fail\n");
+ return -ENOMEM;
+ }
+
+ // Validate params (ignoring engine_index >= controller->vdma_engines_count).
+ for_each_vdma_engine(controller, engine, engine_index) {
+ channels_bitmap = input.channels_bitmap_per_engine[engine_index];
+ if (0 != (channels_bitmap & engine->enabled_channels)) {
+ hailo_dev_err(controller->dev, "Trying to enable channels that are already enabled\n");
+ return -EINVAL;
+ }
+ }
+
+ for_each_vdma_engine(controller, engine, engine_index) {
+ channels_bitmap = input.channels_bitmap_per_engine[engine_index];
+ hailo_vdma_engine_enable_channel_interrupts(engine, channels_bitmap,
+ input.enable_timestamps_measure);
+ hailo_vdma_update_interrupts_mask(controller, engine_index);
+ hailo_dev_info(controller->dev, "Enabled interrupts for engine %u, channels bitmap 0x%x\n",
+ engine_index, channels_bitmap);
+ }
+
+ return 0;
+}
+
+long hailo_vdma_interrupts_disable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
+{
+ struct hailo_vdma_interrupts_disable_params input;
+ struct hailo_vdma_engine *engine = NULL;
+ u8 engine_index = 0;
+ u32 channels_bitmap = 0;
+
+ if (copy_from_user(&input, (void*)arg, sizeof(input))) {
+ hailo_dev_err(controller->dev, "copy_from_user fail\n");
+ return -ENOMEM;
+ }
+
+ // Validate params (ignoring engine_index >= controller->vdma_engines_count).
+ for_each_vdma_engine(controller, engine, engine_index) {
+ channels_bitmap = input.channels_bitmap_per_engine[engine_index];
+ if (channels_bitmap != (channels_bitmap & engine->enabled_channels)) {
+ hailo_dev_err(controller->dev, "Trying to disable channels that were not enabled\n");
+ return -EINVAL;
+ }
+ }
+
+ for_each_vdma_engine(controller, engine, engine_index) {
+ channels_bitmap = input.channels_bitmap_per_engine[engine_index];
+ hailo_vdma_engine_interrupts_disable(controller, engine, engine_index,
+ channels_bitmap);
+ }
+
+ // Wake up threads waiting
+ wake_up_interruptible_all(&controller->interrupts_wq);
+
+ return 0;
+}
+
+static bool got_interrupt(struct hailo_vdma_controller *controller,
+ u32 channels_bitmap_per_engine[MAX_VDMA_ENGINES])
+{
+ struct hailo_vdma_engine *engine = NULL;
+ u8 engine_index = 0;
+ for_each_vdma_engine(controller, engine, engine_index) {
+ if (hailo_vdma_engine_got_interrupt(engine,
+ channels_bitmap_per_engine[engine_index])) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static void transfer_done(struct hailo_ongoing_transfer *transfer, void *opaque)
+{
+ u8 i = 0;
+ struct hailo_vdma_controller *controller = (struct hailo_vdma_controller *)opaque;
+ for (i = 0; i < transfer->buffers_count; i++) {
+ struct hailo_vdma_buffer *mapped_buffer = (struct hailo_vdma_buffer *)transfer->buffers[i].opaque;
+ hailo_vdma_buffer_sync_cyclic(controller, mapped_buffer, HAILO_SYNC_FOR_CPU,
+ transfer->buffers[i].offset, transfer->buffers[i].size);
+ }
+}
+
+long hailo_vdma_interrupts_wait_ioctl(struct hailo_vdma_controller *controller, unsigned long arg,
+ struct semaphore *mutex, bool *should_up_board_mutex)
+{
+ long err = 0;
+ struct hailo_vdma_interrupts_wait_params params = {0};
+ struct hailo_vdma_engine *engine = NULL;
+ bool bitmap_not_empty = false;
+ u8 engine_index = 0;
+ u32 irq_bitmap = 0;
+ unsigned long irq_saved_flags = 0;
+
+ if (copy_from_user(&params, (void*)arg, sizeof(params))) {
+ hailo_dev_err(controller->dev, "HAILO_VDMA_INTERRUPTS_WAIT, copy_from_user fail\n");
+ return -ENOMEM;
+ }
+
+ // We don't need to validate that channels_bitmap_per_engine are enabled -
+ // If the channel is not enabled we just return an empty interrupts list.
+
+ // Validate params (ignoring engine_index >= controller->vdma_engines_count).
+ // It us ok to wait on a disabled channel - the wait will just exit.
+ for_each_vdma_engine(controller, engine, engine_index) {
+ if (0 != params.channels_bitmap_per_engine[engine_index]) {
+ bitmap_not_empty = true;
+ }
+ }
+ if (!bitmap_not_empty) {
+ hailo_dev_err(controller->dev, "Got an empty bitmap for wait interrupts\n");
+ return -EINVAL;
+ }
+
+ up(mutex);
+ err = wait_event_interruptible(controller->interrupts_wq,
+ got_interrupt(controller, params.channels_bitmap_per_engine));
+ if (err < 0) {
+ hailo_dev_info(controller->dev,
+ "wait channel interrupts failed with err=%ld (process was interrupted or killed)\n", err);
+ *should_up_board_mutex = false;
+ return err;
+ }
+
+ if (down_interruptible(mutex)) {
+ hailo_dev_info(controller->dev, "down_interruptible error (process was interrupted or killed)\n");
+ *should_up_board_mutex = false;
+ return -ERESTARTSYS;
+ }
+
+ params.channels_count = 0;
+ for_each_vdma_engine(controller, engine, engine_index) {
+
+ spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
+ irq_bitmap = hailo_vdma_engine_read_interrupts(engine,
+ params.channels_bitmap_per_engine[engine->index]);
+ spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
+
+ err = hailo_vdma_engine_fill_irq_data(&params, engine, irq_bitmap,
+ transfer_done, controller);
+ if (err < 0) {
+ hailo_dev_err(controller->dev, "Failed fill irq data %ld", err);
+ return err;
+ }
+ }
+
+ if (copy_to_user((void __user*)arg, &params, sizeof(params))) {
+ hailo_dev_err(controller->dev, "copy_to_user fail\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static uintptr_t hailo_get_next_vdma_handle(struct hailo_vdma_file_context *context)
+{
+ // Note: The kernel code left-shifts the 'offset' param from the user-space call to mmap by PAGE_SHIFT bits and
+ // stores the result in 'vm_area_struct.vm_pgoff'. We pass the desc_handle to mmap in the offset param. To
+ // counter this, we right-shift the desc_handle. See also 'mmap function'.
+ uintptr_t next_handle = 0;
+ next_handle = atomic_inc_return(&context->last_vdma_handle);
+ return (next_handle << PAGE_SHIFT);
+}
+
+long hailo_vdma_buffer_map_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg)
+{
+ struct hailo_vdma_buffer_map_params buf_info;
+ struct hailo_vdma_buffer *mapped_buffer = NULL;
+ enum dma_data_direction direction = DMA_NONE;
+ struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
+
+ if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
+ hailo_dev_err(controller->dev, "copy from user fail\n");
+ return -EFAULT;
+ }
+
+ hailo_dev_info(controller->dev, "address %px tgid %d size: %zu\n",
+ buf_info.user_address, current->tgid, buf_info.size);
+
+ direction = get_dma_direction(buf_info.data_direction);
+ if (DMA_NONE == direction) {
+ hailo_dev_err(controller->dev, "invalid data direction %d\n", buf_info.data_direction);
+ return -EINVAL;
+ }
+
+ low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, buf_info.allocated_buffer_handle);
+
+ mapped_buffer = hailo_vdma_buffer_map(controller->dev,
+ buf_info.user_address, buf_info.size, direction, low_memory_buffer);
+ if (IS_ERR(mapped_buffer)) {
+ hailo_dev_err(controller->dev, "failed map buffer %px\n",
+ buf_info.user_address);
+ return PTR_ERR(mapped_buffer);
+ }
+
+ mapped_buffer->handle = atomic_inc_return(&context->last_vdma_user_buffer_handle);
+ buf_info.mapped_handle = mapped_buffer->handle;
+ if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
+ hailo_dev_err(controller->dev, "copy_to_user fail\n");
+ hailo_vdma_buffer_put(mapped_buffer);
+ return -EFAULT;
+ }
+
+ list_add(&mapped_buffer->mapped_user_buffer_list, &context->mapped_user_buffer_list);
+ hailo_dev_info(controller->dev, "buffer %px (handle %zu) is mapped\n",
+ buf_info.user_address, buf_info.mapped_handle);
+ return 0;
+}
+
+long hailo_vdma_buffer_unmap_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg)
+{
+ struct hailo_vdma_buffer *mapped_buffer = NULL;
+ struct hailo_vdma_buffer_unmap_params buffer_unmap_params;
+
+ if (copy_from_user(&buffer_unmap_params, (void __user*)arg, sizeof(buffer_unmap_params))) {
+ hailo_dev_err(controller->dev, "copy from user fail\n");
+ return -EFAULT;
+ }
+
+ hailo_dev_info(controller->dev, "unmap user buffer handle %zu\n", buffer_unmap_params.mapped_handle);
+
+ mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, buffer_unmap_params.mapped_handle);
+ if (mapped_buffer == NULL) {
+ hailo_dev_warn(controller->dev, "buffer handle %zu not found\n", buffer_unmap_params.mapped_handle);
+ return -EINVAL;
+ }
+
+ list_del(&mapped_buffer->mapped_user_buffer_list);
+ hailo_vdma_buffer_put(mapped_buffer);
+ return 0;
+}
+
+long hailo_vdma_buffer_sync_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
+{
+ struct hailo_vdma_buffer_sync_params sync_info = {};
+ struct hailo_vdma_buffer *mapped_buffer = NULL;
+
+ if (copy_from_user(&sync_info, (void __user*)arg, sizeof(sync_info))) {
+ hailo_dev_err(controller->dev, "copy_from_user fail\n");
+ return -EFAULT;
+ }
+
+ if (!(mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, sync_info.handle))) {
+ hailo_dev_err(controller->dev, "buffer handle %zu doesn't exist\n", sync_info.handle);
+ return -EINVAL;
+ }
+
+ if ((sync_info.sync_type != HAILO_SYNC_FOR_CPU) && (sync_info.sync_type != HAILO_SYNC_FOR_DEVICE)) {
+ hailo_dev_err(controller->dev, "Invalid sync_type given for vdma buffer sync.\n");
+ return -EINVAL;
+ }
+
+ if (sync_info.offset + sync_info.count > mapped_buffer->size) {
+ hailo_dev_err(controller->dev, "Invalid offset/count given for vdma buffer sync. offset %zu count %zu buffer size %u\n",
+ sync_info.offset, sync_info.count, mapped_buffer->size);
+ return -EINVAL;
+ }
+
+ hailo_vdma_buffer_sync(controller, mapped_buffer, sync_info.sync_type,
+ sync_info.offset, sync_info.count);
+ return 0;
+}
+
+long hailo_desc_list_create_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg)
+{
+ struct hailo_desc_list_create_params params;
+ struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
+ uintptr_t next_handle = 0;
+ long err = -EINVAL;
+
+ if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
+ hailo_dev_err(controller->dev, "copy_from_user fail\n");
+ return -EFAULT;
+ }
+
+ if (params.is_circular && !is_powerof2(params.desc_count)) {
+ hailo_dev_err(controller->dev, "Invalid desc count given : %zu , circular descriptors count must be power of 2\n",
+ params.desc_count);
+ return -EINVAL;
+ }
+
+ if (!is_powerof2(params.desc_page_size)) {
+ hailo_dev_err(controller->dev, "Invalid desc page size given : %u\n",
+ params.desc_page_size);
+ return -EINVAL;
+ }
+
+ hailo_dev_info(controller->dev,
+ "Create desc list desc_count: %zu desc_page_size: %u\n",
+ params.desc_count, params.desc_page_size);
+
+ descriptors_buffer = kzalloc(sizeof(*descriptors_buffer), GFP_KERNEL);
+ if (NULL == descriptors_buffer) {
+ hailo_dev_err(controller->dev, "Failed to allocate buffer for descriptors list struct\n");
+ return -ENOMEM;
+ }
+
+ next_handle = hailo_get_next_vdma_handle(context);
+
+ err = hailo_desc_list_create(controller->dev, params.desc_count,
+ params.desc_page_size, next_handle, params.is_circular,
+ descriptors_buffer);
+ if (err < 0) {
+ hailo_dev_err(controller->dev, "failed to allocate descriptors buffer\n");
+ kfree(descriptors_buffer);
+ return err;
+ }
+
+ list_add(&descriptors_buffer->descriptors_buffer_list, &context->descriptors_buffer_list);
+
+ // Note: The physical address is required for CONTEXT_SWITCH firmware controls
+ BUILD_BUG_ON(sizeof(params.dma_address) < sizeof(descriptors_buffer->dma_address));
+ params.dma_address = descriptors_buffer->dma_address;
+ params.desc_handle = descriptors_buffer->handle;
+
+ if(copy_to_user((void __user*)arg, &params, sizeof(params))){
+ hailo_dev_err(controller->dev, "copy_to_user fail\n");
+ list_del(&descriptors_buffer->descriptors_buffer_list);
+ hailo_desc_list_release(controller->dev, descriptors_buffer);
+ kfree(descriptors_buffer);
+ return -EFAULT;
+ }
+
+ hailo_dev_info(controller->dev, "Created desc list, handle 0x%llu\n",
+ (u64)params.desc_handle);
+ return 0;
+}
+
+long hailo_desc_list_release_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg)
+{
+ struct hailo_desc_list_release_params params;
+ struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
+
+ if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
+ hailo_dev_err(controller->dev, "copy_from_user fail\n");
+ return -EFAULT;
+ }
+
+ descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, params.desc_handle);
+ if (descriptors_buffer == NULL) {
+ hailo_dev_warn(controller->dev, "not found desc handle %llu\n", (unsigned long long)params.desc_handle);
+ return -EINVAL;
+ }
+
+ list_del(&descriptors_buffer->descriptors_buffer_list);
+ hailo_desc_list_release(controller->dev, descriptors_buffer);
+ kfree(descriptors_buffer);
+ return 0;
+}
+
+long hailo_desc_list_bind_vdma_buffer(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg)
+{
+ struct hailo_desc_list_bind_vdma_buffer_params configure_info;
+ struct hailo_vdma_buffer *mapped_buffer = NULL;
+ struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
+ struct hailo_vdma_mapped_transfer_buffer transfer_buffer = {0};
+
+ if (copy_from_user(&configure_info, (void __user*)arg, sizeof(configure_info))) {
+ hailo_dev_err(controller->dev, "copy from user fail\n");
+ return -EFAULT;
+ }
+ hailo_dev_info(controller->dev, "config buffer_handle=%zu desc_handle=%llu starting_desc=%u\n",
+ configure_info.buffer_handle, (u64)configure_info.desc_handle, configure_info.starting_desc);
+
+ mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, configure_info.buffer_handle);
+ descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, configure_info.desc_handle);
+ if (mapped_buffer == NULL || descriptors_buffer == NULL) {
+ hailo_dev_err(controller->dev, "invalid user/descriptors buffer\n");
+ return -EFAULT;
+ }
+
+ if (configure_info.buffer_size > mapped_buffer->size) {
+ hailo_dev_err(controller->dev, "invalid buffer size. \n");
+ return -EFAULT;
+ }
+
+ transfer_buffer.sg_table = &mapped_buffer->sg_table;
+ transfer_buffer.size = configure_info.buffer_size;
+ transfer_buffer.offset = configure_info.buffer_offset;
+
+ return hailo_vdma_program_descriptors_list(
+ controller->hw,
+ &descriptors_buffer->desc_list,
+ configure_info.starting_desc,
+ &transfer_buffer,
+ configure_info.channel_index
+ );
+}
+
+long hailo_vdma_low_memory_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg)
+{
+ struct hailo_allocate_low_memory_buffer_params buf_info = {0};
+ struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
+ long err = -EINVAL;
+
+ if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
+ hailo_dev_err(controller->dev, "copy from user fail\n");
+ return -EFAULT;
+ }
+
+ low_memory_buffer = kzalloc(sizeof(*low_memory_buffer), GFP_KERNEL);
+ if (NULL == low_memory_buffer) {
+ hailo_dev_err(controller->dev, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = hailo_vdma_low_memory_buffer_alloc(buf_info.buffer_size, low_memory_buffer);
+ if (err < 0) {
+ kfree(low_memory_buffer);
+ hailo_dev_err(controller->dev, "failed allocating buffer from driver\n");
+ return err;
+ }
+
+ // Get handle for allocated buffer
+ low_memory_buffer->handle = hailo_get_next_vdma_handle(context);
+
+ list_add(&low_memory_buffer->vdma_low_memory_buffer_list, &context->vdma_low_memory_buffer_list);
+
+ buf_info.buffer_handle = low_memory_buffer->handle;
+ if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
+ hailo_dev_err(controller->dev, "copy_to_user fail\n");
+ list_del(&low_memory_buffer->vdma_low_memory_buffer_list);
+ hailo_vdma_low_memory_buffer_free(low_memory_buffer);
+ kfree(low_memory_buffer);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+long hailo_vdma_low_memory_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg)
+{
+ struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
+ struct hailo_free_low_memory_buffer_params params = {0};
+
+ if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
+ hailo_dev_err(controller->dev, "copy from user fail\n");
+ return -EFAULT;
+ }
+
+ low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, params.buffer_handle);
+ if (NULL == low_memory_buffer) {
+ hailo_dev_warn(controller->dev, "vdma buffer handle %lx not found\n", params.buffer_handle);
+ return -EINVAL;
+ }
+
+ list_del(&low_memory_buffer->vdma_low_memory_buffer_list);
+ hailo_vdma_low_memory_buffer_free(low_memory_buffer);
+ kfree(low_memory_buffer);
+ return 0;
+}
+
+long hailo_mark_as_in_use(struct hailo_vdma_controller *controller, unsigned long arg, struct file *filp)
+{
+ struct hailo_mark_as_in_use_params params = {0};
+
+ // If device is used by this FD, return false to indicate its free for usage
+ if (filp == controller->used_by_filp) {
+ params.in_use = false;
+ } else if (NULL != controller->used_by_filp) {
+ params.in_use = true;
+ } else {
+ controller->used_by_filp = filp;
+ params.in_use = false;
+ }
+
+ if (copy_to_user((void __user*)arg, &params, sizeof(params))) {
+ hailo_dev_err(controller->dev, "copy_to_user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+long hailo_vdma_continuous_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
+{
+ struct hailo_allocate_continuous_buffer_params buf_info = {0};
+ struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
+ long err = -EINVAL;
+ size_t aligned_buffer_size = 0;
+
+ if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
+ hailo_dev_err(controller->dev, "copy from user fail\n");
+ return -EFAULT;
+ }
+
+ continuous_buffer = kzalloc(sizeof(*continuous_buffer), GFP_KERNEL);
+ if (NULL == continuous_buffer) {
+ hailo_dev_err(controller->dev, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ // We use PAGE_ALIGN to support mmap
+ aligned_buffer_size = PAGE_ALIGN(buf_info.buffer_size);
+ err = hailo_vdma_continuous_buffer_alloc(controller->dev, aligned_buffer_size, continuous_buffer);
+ if (err < 0) {
+ kfree(continuous_buffer);
+ return err;
+ }
+
+ continuous_buffer->handle = hailo_get_next_vdma_handle(context);
+ list_add(&continuous_buffer->continuous_buffer_list, &context->continuous_buffer_list);
+
+ buf_info.buffer_handle = continuous_buffer->handle;
+ buf_info.dma_address = continuous_buffer->dma_address;
+ if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
+ hailo_dev_err(controller->dev, "copy_to_user fail\n");
+ list_del(&continuous_buffer->continuous_buffer_list);
+ hailo_vdma_continuous_buffer_free(controller->dev, continuous_buffer);
+ kfree(continuous_buffer);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+long hailo_vdma_continuous_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
+{
+ struct hailo_free_continuous_buffer_params params;
+ struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
+
+ if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
+ hailo_dev_err(controller->dev, "copy from user fail\n");
+ return -EFAULT;
+ }
+
+ continuous_buffer = hailo_vdma_find_continuous_buffer(context, params.buffer_handle);
+ if (NULL == continuous_buffer) {
+ hailo_dev_warn(controller->dev, "vdma buffer handle %lx not found\n", params.buffer_handle);
+ return -EINVAL;
+ }
+
+ list_del(&continuous_buffer->continuous_buffer_list);
+ hailo_vdma_continuous_buffer_free(controller->dev, continuous_buffer);
+ kfree(continuous_buffer);
+ return 0;
+}
+
+long hailo_vdma_interrupts_read_timestamps_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
+{
+ struct hailo_vdma_interrupts_read_timestamp_params *params = &controller->read_interrupt_timestamps_params;
+ struct hailo_vdma_engine *engine = NULL;
+ int err = -EINVAL;
+
+ hailo_dev_dbg(controller->dev, "Start read interrupt timestamps ioctl\n");
+
+ if (copy_from_user(params, (void __user*)arg, sizeof(*params))) {
+ hailo_dev_err(controller->dev, "copy_from_user fail\n");
+ return -ENOMEM;
+ }
+
+ if (params->engine_index >= controller->vdma_engines_count) {
+ hailo_dev_err(controller->dev, "Invalid engine %u", params->engine_index);
+ return -EINVAL;
+ }
+ engine = &controller->vdma_engines[params->engine_index];
+
+ err = hailo_vdma_engine_read_timestamps(engine, params);
+ if (err < 0) {
+ hailo_dev_err(controller->dev, "Failed read engine interrupts for %u:%u",
+ params->engine_index, params->channel_index);
+ return err;
+ }
+
+ if (copy_to_user((void __user*)arg, params, sizeof(*params))) {
+ hailo_dev_err(controller->dev, "copy_to_user fail\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+long hailo_vdma_launch_transfer_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg)
+{
+ struct hailo_vdma_launch_transfer_params params;
+ struct hailo_vdma_engine *engine = NULL;
+ struct hailo_vdma_channel *channel = NULL;
+ struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
+ struct hailo_vdma_mapped_transfer_buffer mapped_transfer_buffers[ARRAY_SIZE(params.buffers)] = {0};
+ int ret = -EINVAL;
+ u8 i = 0;
+
+ if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
+ hailo_dev_err(controller->dev, "copy from user fail\n");
+ return -EFAULT;
+ }
+
+ if (params.engine_index >= controller->vdma_engines_count) {
+ hailo_dev_err(controller->dev, "Invalid engine %u", params.engine_index);
+ return -EINVAL;
+ }
+ engine = &controller->vdma_engines[params.engine_index];
+
+ if (params.channel_index >= ARRAY_SIZE(engine->channels)) {
+ hailo_dev_err(controller->dev, "Invalid channel %u", params.channel_index);
+ return -EINVAL;
+ }
+ channel = &engine->channels[params.channel_index];
+
+ if (params.buffers_count > ARRAY_SIZE(params.buffers)) {
+ hailo_dev_err(controller->dev, "too many buffers %u\n", params.buffers_count);
+ return -EINVAL;
+ }
+
+ descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, params.desc_handle);
+ if (descriptors_buffer == NULL) {
+ hailo_dev_err(controller->dev, "invalid descriptors list handle\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < params.buffers_count; i++) {
+ struct hailo_vdma_buffer *mapped_buffer =
+ hailo_vdma_find_mapped_user_buffer(context, params.buffers[i].mapped_buffer_handle);
+ if (mapped_buffer == NULL) {
+ hailo_dev_err(controller->dev, "invalid user buffer\n");
+ return -EFAULT;
+ }
+
+ if (params.buffers[i].size > mapped_buffer->size) {
+ hailo_dev_err(controller->dev, "Syncing size %u while buffer size is %u\n",
+ params.buffers[i].size, mapped_buffer->size);
+ return -EINVAL;
+ }
+
+ if (params.buffers[i].offset > mapped_buffer->size) {
+ hailo_dev_err(controller->dev, "Syncing offset %u while buffer size is %u\n",
+ params.buffers[i].offset, mapped_buffer->size);
+ return -EINVAL;
+ }
+
+ // Syncing the buffer to device change its ownership from host to the device.
+ // We sync on D2H as well if the user owns the buffer since the buffer might have been changed by
+ // the host between the time it was mapped and the current async transfer.
+ hailo_vdma_buffer_sync_cyclic(controller, mapped_buffer, HAILO_SYNC_FOR_DEVICE,
+ params.buffers[i].offset, params.buffers[i].size);
+
+ mapped_transfer_buffers[i].sg_table = &mapped_buffer->sg_table;
+ mapped_transfer_buffers[i].size = params.buffers[i].size;
+ mapped_transfer_buffers[i].offset = params.buffers[i].offset;
+ mapped_transfer_buffers[i].opaque = mapped_buffer;
+ }
+
+ ret = hailo_vdma_launch_transfer(
+ controller->hw,
+ channel,
+ &descriptors_buffer->desc_list,
+ params.starting_desc,
+ params.buffers_count,
+ mapped_transfer_buffers,
+ params.should_bind,
+ params.first_interrupts_domain,
+ params.last_interrupts_domain,
+ params.is_debug
+ );
+ if (ret < 0) {
+ hailo_dev_err(controller->dev, "Failed launch transfer %d\n", ret);
+ return ret;
+ }
+
+ params.descs_programed = ret;
+
+ if (copy_to_user((void __user*)arg, &params, sizeof(params))) {
+ hailo_dev_err(controller->dev, "copy_to_user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/vdma/ioctl.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#ifndef _HAILO_VDMA_IOCTL_H_
+#define _HAILO_VDMA_IOCTL_H_
+
+#include "vdma/vdma.h"
+
+long hailo_vdma_interrupts_enable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
+long hailo_vdma_interrupts_disable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
+long hailo_vdma_interrupts_wait_ioctl(struct hailo_vdma_controller *controller, unsigned long arg,
+ struct semaphore *mutex, bool *should_up_board_mutex);
+
+long hailo_vdma_buffer_map_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+long hailo_vdma_buffer_unmap_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long handle);
+long hailo_vdma_buffer_sync_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+
+long hailo_desc_list_create_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+long hailo_desc_list_release_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+long hailo_desc_list_bind_vdma_buffer(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+
+long hailo_vdma_low_memory_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+long hailo_vdma_low_memory_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+
+long hailo_mark_as_in_use(struct hailo_vdma_controller *controller, unsigned long arg, struct file *filp);
+
+long hailo_vdma_continuous_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+long hailo_vdma_continuous_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
+
+long hailo_vdma_interrupts_read_timestamps_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
+
+long hailo_vdma_launch_transfer_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned long arg);
+
+#endif /* _HAILO_VDMA_IOCTL_H_ */
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/vdma/memory.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#define pr_fmt(fmt) "hailo: " fmt
+
+#include "memory.h"
+#include "utils/compact.h"
+
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+
+
+#define SGL_MAX_SEGMENT_SIZE (0x10000)
+// See linux/mm.h
+#define MMIO_AND_NO_PAGES_VMA_MASK (VM_IO | VM_PFNMAP)
+
+static int map_mmio_address(void __user* user_address, u32 size, struct vm_area_struct *vma,
+ struct sg_table *sgt);
+static int prepare_sg_table(struct sg_table *sg_table, void __user* user_address, u32 size,
+ struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer);
+static void clear_sg_table(struct sg_table *sgt);
+
+struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev,
+ void __user *user_address, size_t size, enum dma_data_direction direction,
+ struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
+{
+ int ret = -EINVAL;
+ struct hailo_vdma_buffer *mapped_buffer = NULL;
+ struct sg_table sgt = {0};
+ struct vm_area_struct *vma = NULL;
+ bool is_mmio = false;
+
+ mapped_buffer = kzalloc(sizeof(*mapped_buffer), GFP_KERNEL);
+ if (NULL == mapped_buffer) {
+ dev_err(dev, "memory alloc failed\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING)) {
+ vma = find_vma(current->mm, (uintptr_t)user_address);
+ if (NULL == vma) {
+ dev_err(dev, "no vma for virt_addr/size = 0x%08lx/0x%08zx\n", (uintptr_t)user_address, size);
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+
+ if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) &&
+ (MMIO_AND_NO_PAGES_VMA_MASK == (vma->vm_flags & MMIO_AND_NO_PAGES_VMA_MASK))) {
+ // user_address represents memory mapped I/O and isn't backed by 'struct page' (only by pure pfn)
+ if (NULL != low_mem_driver_allocated_buffer) {
+ // low_mem_driver_allocated_buffer are backed by regular 'struct page' addresses, just in low memory
+ dev_err(dev, "low_mem_driver_allocated_buffer shouldn't be provided with an mmio address\n");
+ ret = -EINVAL;
+ goto free_buffer_struct;
+ }
+
+ ret = map_mmio_address(user_address, size, vma, &sgt);
+ if (ret < 0) {
+ dev_err(dev, "failed to map mmio address %d\n", ret);
+ goto free_buffer_struct;
+ }
+
+ is_mmio = true;
+ } else {
+ // user_address is a standard 'struct page' backed memory address
+ ret = prepare_sg_table(&sgt, user_address, size, low_mem_driver_allocated_buffer);
+ if (ret < 0) {
+ dev_err(dev, "failed to set sg list for user buffer %d\n", ret);
+ goto free_buffer_struct;
+ }
+ sgt.nents = dma_map_sg(dev, sgt.sgl, sgt.orig_nents, direction);
+ if (0 == sgt.nents) {
+ dev_err(dev, "failed to map sg list for user buffer\n");
+ ret = -ENXIO;
+ goto clear_sg_table;
+ }
+ }
+
+ kref_init(&mapped_buffer->kref);
+ mapped_buffer->device = dev;
+ mapped_buffer->user_address = user_address;
+ mapped_buffer->size = size;
+ mapped_buffer->data_direction = direction;
+ mapped_buffer->sg_table = sgt;
+ mapped_buffer->is_mmio = is_mmio;
+
+ return mapped_buffer;
+
+clear_sg_table:
+ clear_sg_table(&sgt);
+free_buffer_struct:
+ kfree(mapped_buffer);
+cleanup:
+ return ERR_PTR(ret);
+}
+
+static void unmap_buffer(struct kref *kref)
+{
+ struct hailo_vdma_buffer *buf = container_of(kref, struct hailo_vdma_buffer, kref);
+
+ if (!buf->is_mmio) {
+ dma_unmap_sg(buf->device, buf->sg_table.sgl, buf->sg_table.orig_nents, buf->data_direction);
+ }
+
+ clear_sg_table(&buf->sg_table);
+ kfree(buf);
+}
+
+void hailo_vdma_buffer_get(struct hailo_vdma_buffer *buf)
+{
+ kref_get(&buf->kref);
+}
+
+void hailo_vdma_buffer_put(struct hailo_vdma_buffer *buf)
+{
+ kref_put(&buf->kref, unmap_buffer);
+}
+
+static void vdma_sync_entire_buffer(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type)
+{
+ if (sync_type == HAILO_SYNC_FOR_CPU) {
+ dma_sync_sg_for_cpu(controller->dev, mapped_buffer->sg_table.sgl, mapped_buffer->sg_table.nents,
+ mapped_buffer->data_direction);
+ } else {
+ dma_sync_sg_for_device(controller->dev, mapped_buffer->sg_table.sgl, mapped_buffer->sg_table.nents,
+ mapped_buffer->data_direction);
+ }
+}
+
+typedef void (*dma_sync_single_callback)(struct device *, dma_addr_t, size_t, enum dma_data_direction);
+// Map sync_info->count bytes starting at sync_info->offset
+static void vdma_sync_buffer_interval(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_buffer *mapped_buffer,
+ size_t offset, size_t size, enum hailo_vdma_buffer_sync_type sync_type)
+{
+ size_t sync_start_offset = offset;
+ size_t sync_end_offset = offset + size;
+ dma_sync_single_callback dma_sync_single = (sync_type == HAILO_SYNC_FOR_CPU) ?
+ dma_sync_single_for_cpu :
+ dma_sync_single_for_device;
+ struct scatterlist* sg_entry = NULL;
+ size_t current_iter_offset = 0;
+ int i = 0;
+
+ for_each_sg(mapped_buffer->sg_table.sgl, sg_entry, mapped_buffer->sg_table.nents, i) {
+ // Check if the intervals: [current_iter_offset, sg_dma_len(sg_entry)] and [sync_start_offset, sync_end_offset]
+ // have any intersection. If offset isn't at the start of a sg_entry, we still want to sync it.
+ if (max(sync_start_offset, current_iter_offset) <= min(sync_end_offset, current_iter_offset + sg_dma_len(sg_entry))) {
+ dma_sync_single(controller->dev, sg_dma_address(sg_entry), sg_dma_len(sg_entry),
+ mapped_buffer->data_direction);
+ }
+
+ current_iter_offset += sg_dma_len(sg_entry);
+ }
+}
+
+void hailo_vdma_buffer_sync(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
+ size_t offset, size_t size)
+{
+ if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) && mapped_buffer->is_mmio) {
+ // MMIO buffers don't need to be sync'd
+ return;
+ }
+
+ if ((offset == 0) && (size == mapped_buffer->size)) {
+ vdma_sync_entire_buffer(controller, mapped_buffer, sync_type);
+ } else {
+ vdma_sync_buffer_interval(controller, mapped_buffer, offset, size, sync_type);
+ }
+}
+
+// Similar to vdma_buffer_sync, allow circular sync of the buffer.
+void hailo_vdma_buffer_sync_cyclic(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
+ size_t offset, size_t size)
+{
+ size_t size_to_end = min(size, mapped_buffer->size - offset);
+
+ hailo_vdma_buffer_sync(controller, mapped_buffer, sync_type, offset, size_to_end);
+
+ if (size_to_end < size) {
+ hailo_vdma_buffer_sync(controller, mapped_buffer, sync_type, 0, size - size_to_end);
+ }
+}
+
+struct hailo_vdma_buffer* hailo_vdma_find_mapped_user_buffer(struct hailo_vdma_file_context *context,
+ size_t buffer_handle)
+{
+ struct hailo_vdma_buffer *cur = NULL;
+ list_for_each_entry(cur, &context->mapped_user_buffer_list, mapped_user_buffer_list) {
+ if (cur->handle == buffer_handle) {
+ return cur;
+ }
+ }
+ return NULL;
+}
+
+void hailo_vdma_clear_mapped_user_buffer_list(struct hailo_vdma_file_context *context,
+ struct hailo_vdma_controller *controller)
+{
+ struct hailo_vdma_buffer *cur = NULL, *next = NULL;
+ list_for_each_entry_safe(cur, next, &context->mapped_user_buffer_list, mapped_user_buffer_list) {
+ list_del(&cur->mapped_user_buffer_list);
+ hailo_vdma_buffer_put(cur);
+ }
+}
+
+
+int hailo_desc_list_create(struct device *dev, u32 descriptors_count, u16 desc_page_size,
+ uintptr_t desc_handle, bool is_circular, struct hailo_descriptors_list_buffer *descriptors)
+{
+ size_t buffer_size = 0;
+ const u64 align = VDMA_DESCRIPTOR_LIST_ALIGN; //First addr must be aligned on 64 KB (from the VDMA registers documentation)
+
+ buffer_size = descriptors_count * sizeof(struct hailo_vdma_descriptor);
+ buffer_size = ALIGN(buffer_size, align);
+
+ descriptors->kernel_address = dma_alloc_coherent(dev, buffer_size,
+ &descriptors->dma_address, GFP_KERNEL | __GFP_ZERO);
+ if (descriptors->kernel_address == NULL) {
+ dev_err(dev, "Failed to allocate descriptors list, desc_count 0x%x, buffer_size 0x%zx, This failure means there is not a sufficient amount of CMA memory "
+ "(contiguous physical memory), This usually is caused by lack of general system memory. Please check you have sufficent memory.\n",
+ descriptors_count, buffer_size);
+ return -ENOMEM;
+ }
+
+ descriptors->buffer_size = buffer_size;
+ descriptors->handle = desc_handle;
+
+ descriptors->desc_list.desc_list = descriptors->kernel_address;
+ descriptors->desc_list.desc_count = descriptors_count;
+ descriptors->desc_list.desc_page_size = desc_page_size;
+ descriptors->desc_list.is_circular = is_circular;
+
+ return 0;
+}
+
+void hailo_desc_list_release(struct device *dev, struct hailo_descriptors_list_buffer *descriptors)
+{
+ dma_free_coherent(dev, descriptors->buffer_size, descriptors->kernel_address, descriptors->dma_address);
+}
+
+struct hailo_descriptors_list_buffer* hailo_vdma_find_descriptors_buffer(struct hailo_vdma_file_context *context,
+ uintptr_t desc_handle)
+{
+ struct hailo_descriptors_list_buffer *cur = NULL;
+ list_for_each_entry(cur, &context->descriptors_buffer_list, descriptors_buffer_list) {
+ if (cur->handle == desc_handle) {
+ return cur;
+ }
+ }
+ return NULL;
+}
+
+void hailo_vdma_clear_descriptors_buffer_list(struct hailo_vdma_file_context *context,
+ struct hailo_vdma_controller *controller)
+{
+ struct hailo_descriptors_list_buffer *cur = NULL, *next = NULL;
+ list_for_each_entry_safe(cur, next, &context->descriptors_buffer_list, descriptors_buffer_list) {
+ list_del(&cur->descriptors_buffer_list);
+ hailo_desc_list_release(controller->dev, cur);
+ kfree(cur);
+ }
+}
+
+int hailo_vdma_low_memory_buffer_alloc(size_t size, struct hailo_vdma_low_memory_buffer *low_memory_buffer)
+{
+ int ret = -EINVAL;
+ void *kernel_address = NULL;
+ size_t pages_count = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ size_t num_allocated = 0, i = 0;
+ void **pages = NULL;
+
+ pages = kcalloc(pages_count, sizeof(*pages), GFP_KERNEL);
+ if (NULL == pages) {
+ pr_err("Failed to allocate pages for buffer (size %zu)\n", size);
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ for (num_allocated = 0; num_allocated < pages_count; num_allocated++) {
+ // __GFP_DMA32 flag is used to limit system memory allocations to the lowest 4 GB of physical memory in order to guarantee DMA
+ // Operations will not have to use bounce buffers on certain architectures (e.g 32-bit DMA enabled architectures)
+ kernel_address = (void*)__get_free_page(__GFP_DMA32);
+ if (NULL == kernel_address) {
+ pr_err("Failed to allocate %zu coherent bytes\n", (size_t)PAGE_SIZE);
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ pages[num_allocated] = kernel_address;
+ }
+
+ low_memory_buffer->pages_count = pages_count;
+ low_memory_buffer->pages_address = pages;
+
+ return 0;
+
+cleanup:
+ if (NULL != pages) {
+ for (i = 0; i < num_allocated; i++) {
+ free_page((long unsigned)pages[i]);
+ }
+
+ kfree(pages);
+ }
+
+ return ret;
+}
+
+void hailo_vdma_low_memory_buffer_free(struct hailo_vdma_low_memory_buffer *low_memory_buffer)
+{
+ size_t i = 0;
+ if (NULL == low_memory_buffer) {
+ return;
+ }
+
+ for (i = 0; i < low_memory_buffer->pages_count; i++) {
+ free_page((long unsigned)low_memory_buffer->pages_address[i]);
+ }
+
+ kfree(low_memory_buffer->pages_address);
+}
+
+struct hailo_vdma_low_memory_buffer* hailo_vdma_find_low_memory_buffer(struct hailo_vdma_file_context *context,
+ uintptr_t buf_handle)
+{
+ struct hailo_vdma_low_memory_buffer *cur = NULL;
+ list_for_each_entry(cur, &context->vdma_low_memory_buffer_list, vdma_low_memory_buffer_list) {
+ if (cur->handle == buf_handle) {
+ return cur;
+ }
+ }
+
+ return NULL;
+}
+
+void hailo_vdma_clear_low_memory_buffer_list(struct hailo_vdma_file_context *context)
+{
+ struct hailo_vdma_low_memory_buffer *cur = NULL, *next = NULL;
+ list_for_each_entry_safe(cur, next, &context->vdma_low_memory_buffer_list, vdma_low_memory_buffer_list) {
+ list_del(&cur->vdma_low_memory_buffer_list);
+ hailo_vdma_low_memory_buffer_free(cur);
+ kfree(cur);
+ }
+}
+
+int hailo_vdma_continuous_buffer_alloc(struct device *dev, size_t size,
+ struct hailo_vdma_continuous_buffer *continuous_buffer)
+{
+ dma_addr_t dma_address = 0;
+ void *kernel_address = NULL;
+
+ kernel_address = dma_alloc_coherent(dev, size, &dma_address, GFP_KERNEL);
+ if (NULL == kernel_address) {
+ dev_warn(dev, "Failed to allocate continuous buffer, size 0x%zx. This failure means there is not a sufficient amount of CMA memory "
+ "(contiguous physical memory), This usually is caused by lack of general system memory. Please check you have sufficent memory.\n", size);
+ return -ENOMEM;
+ }
+
+ continuous_buffer->kernel_address = kernel_address;
+ continuous_buffer->dma_address = dma_address;
+ continuous_buffer->size = size;
+ return 0;
+}
+
+void hailo_vdma_continuous_buffer_free(struct device *dev,
+ struct hailo_vdma_continuous_buffer *continuous_buffer)
+{
+ dma_free_coherent(dev, continuous_buffer->size, continuous_buffer->kernel_address,
+ continuous_buffer->dma_address);
+}
+
+struct hailo_vdma_continuous_buffer* hailo_vdma_find_continuous_buffer(struct hailo_vdma_file_context *context,
+ uintptr_t buf_handle)
+{
+ struct hailo_vdma_continuous_buffer *cur = NULL;
+ list_for_each_entry(cur, &context->continuous_buffer_list, continuous_buffer_list) {
+ if (cur->handle == buf_handle) {
+ return cur;
+ }
+ }
+
+ return NULL;
+}
+
+void hailo_vdma_clear_continuous_buffer_list(struct hailo_vdma_file_context *context,
+ struct hailo_vdma_controller *controller)
+{
+ struct hailo_vdma_continuous_buffer *cur = NULL, *next = NULL;
+ list_for_each_entry_safe(cur, next, &context->continuous_buffer_list, continuous_buffer_list) {
+ list_del(&cur->continuous_buffer_list);
+ hailo_vdma_continuous_buffer_free(controller->dev, cur);
+ kfree(cur);
+ }
+}
+
+// Assumes the provided user_address belongs to the vma and that MMIO_AND_NO_PAGES_VMA_MASK bits are set under
+// vma->vm_flags. This is validated in hailo_vdma_buffer_map, and won't be checked here
+static int map_mmio_address(void __user* user_address, u32 size, struct vm_area_struct *vma,
+ struct sg_table *sgt)
+{
+ int ret = -EINVAL;
+ unsigned long i = 0;
+ unsigned long pfn = 0;
+ unsigned long next_pfn = 0;
+ phys_addr_t phys_addr = 0;
+ dma_addr_t mmio_dma_address = 0;
+ const uintptr_t virt_addr = (uintptr_t)user_address;
+ const u32 vma_size = vma->vm_end - vma->vm_start + 1;
+ const uintptr_t num_pages = PFN_UP(virt_addr + size) - PFN_DOWN(virt_addr);
+
+ // Check that the vma that was marked as MMIO_AND_NO_PAGES_VMA_MASK is big enough
+ if (vma_size < size) {
+ pr_err("vma (%u bytes) smaller than provided buffer (%u bytes)\n", vma_size, size);
+ return -EINVAL;
+ }
+
+ // Get the physical address of user_address
+ ret = follow_pfn(vma, virt_addr, &pfn);
+ if (ret) {
+ pr_err("follow_pfn failed with %d\n", ret);
+ return ret;
+ }
+ phys_addr = __pfn_to_phys(pfn) + offset_in_page(virt_addr);
+
+ // Make sure the physical memory is contiguous
+ for (i = 1; i < num_pages; ++i) {
+ ret = follow_pfn(vma, virt_addr + (i << PAGE_SHIFT), &next_pfn);
+ if (ret < 0) {
+ pr_err("follow_pfn failed with %d\n", ret);
+ return ret;
+ }
+ if (next_pfn != pfn + 1) {
+ pr_err("non-contiguous physical memory\n");
+ return -EFAULT;
+ }
+ pfn = next_pfn;
+ }
+
+ // phys_addr to dma
+ // TODO: need dma_map_resource here? doesn't work currently (we get dma_mapping_error on the returned dma addr)
+ // (HRT-12521)
+ mmio_dma_address = (dma_addr_t)phys_addr;
+
+ // Create a page-less scatterlist.
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (ret < 0) {
+ return ret;
+ }
+
+ sg_assign_page(sgt->sgl, NULL);
+ sg_dma_address(sgt->sgl) = mmio_dma_address;
+ sg_dma_len(sgt->sgl) = size;
+
+ return 0;
+}
+
+static int prepare_sg_table(struct sg_table *sg_table, void __user *user_address, u32 size,
+ struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
+{
+ int ret = -EINVAL;
+ int pinned_pages = 0;
+ size_t npages = 0;
+ struct page **pages = NULL;
+ int i = 0;
+ struct scatterlist *sg_alloc_res = NULL;
+
+ npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ return -ENOMEM;
+ }
+
+ // Check whether mapping user allocated buffer or driver allocated low memory buffer
+ if (NULL == low_mem_driver_allocated_buffer) {
+ mmap_read_lock(current->mm);
+ pinned_pages = get_user_pages_compact((unsigned long)user_address,
+ npages, FOLL_WRITE | FOLL_FORCE, pages);
+ mmap_read_unlock(current->mm);
+
+ if (pinned_pages < 0) {
+ pr_err("get_user_pages failed with %d\n", pinned_pages);
+ ret = pinned_pages;
+ goto exit;
+ } else if (pinned_pages != npages) {
+ pr_err("Pinned %d out of %zu\n", pinned_pages, npages);
+ ret = -EINVAL;
+ goto release_pages;
+ }
+ } else {
+ // Check to make sure in case user provides wrong buffer
+ if (npages != low_mem_driver_allocated_buffer->pages_count) {
+ pr_err("Received wrong amount of pages %zu to map expected %zu\n",
+ npages, low_mem_driver_allocated_buffer->pages_count);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = virt_to_page(low_mem_driver_allocated_buffer->pages_address[i]);
+ get_page(pages[i]);
+ }
+ }
+
+ sg_alloc_res = sg_alloc_table_from_pages_segment_compat(sg_table, pages, npages,
+ 0, size, SGL_MAX_SEGMENT_SIZE, NULL, 0, GFP_KERNEL);
+ if (IS_ERR(sg_alloc_res)) {
+ ret = PTR_ERR(sg_alloc_res);
+ pr_err("sg table alloc failed (err %d)..\n", ret);
+ goto release_pages;
+ }
+
+ ret = 0;
+ goto exit;
+release_pages:
+ for (i = 0; i < pinned_pages; i++) {
+ if (!PageReserved(pages[i])) {
+ SetPageDirty(pages[i]);
+ }
+ put_page(pages[i]);
+ }
+exit:
+ kvfree(pages);
+ return ret;
+}
+
+static void clear_sg_table(struct sg_table *sgt)
+{
+ struct sg_page_iter iter;
+ struct page *page = NULL;
+
+ for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+ page = sg_page_iter_page(&iter);
+ if (page) {
+ if (!PageReserved(page)) {
+ SetPageDirty(page);
+ }
+ put_page(page);
+ }
+ }
+
+ sg_free_table(sgt);
+}
--- /dev/null
+++ b/drivers/media/pci/hailo/vdma/memory.h
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+/**
+ * vDMA memory utility (including allocation and mappings)
+ */
+
+#ifndef _HAILO_VDMA_MEMORY_H_
+#define _HAILO_VDMA_MEMORY_H_
+
+#include "vdma/vdma.h"
+
+struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev,
+ void __user *user_address, size_t size, enum dma_data_direction direction,
+ struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer);
+void hailo_vdma_buffer_get(struct hailo_vdma_buffer *buf);
+void hailo_vdma_buffer_put(struct hailo_vdma_buffer *buf);
+
+void hailo_vdma_buffer_sync(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
+ size_t offset, size_t size);
+void hailo_vdma_buffer_sync_cyclic(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
+ size_t offset, size_t size);
+
+struct hailo_vdma_buffer* hailo_vdma_find_mapped_user_buffer(struct hailo_vdma_file_context *context,
+ size_t buffer_handle);
+void hailo_vdma_clear_mapped_user_buffer_list(struct hailo_vdma_file_context *context,
+ struct hailo_vdma_controller *controller);
+
+int hailo_desc_list_create(struct device *dev, u32 descriptors_count, u16 desc_page_size,
+ uintptr_t desc_handle, bool is_circular, struct hailo_descriptors_list_buffer *descriptors);
+void hailo_desc_list_release(struct device *dev, struct hailo_descriptors_list_buffer *descriptors);
+struct hailo_descriptors_list_buffer* hailo_vdma_find_descriptors_buffer(struct hailo_vdma_file_context *context,
+ uintptr_t desc_handle);
+void hailo_vdma_clear_descriptors_buffer_list(struct hailo_vdma_file_context *context,
+ struct hailo_vdma_controller *controller);
+
+int hailo_vdma_low_memory_buffer_alloc(size_t size, struct hailo_vdma_low_memory_buffer *low_memory_buffer);
+void hailo_vdma_low_memory_buffer_free(struct hailo_vdma_low_memory_buffer *low_memory_buffer);
+struct hailo_vdma_low_memory_buffer* hailo_vdma_find_low_memory_buffer(struct hailo_vdma_file_context *context,
+ uintptr_t buf_handle);
+void hailo_vdma_clear_low_memory_buffer_list(struct hailo_vdma_file_context *context);
+
+int hailo_vdma_continuous_buffer_alloc(struct device *dev, size_t size,
+ struct hailo_vdma_continuous_buffer *continuous_buffer);
+void hailo_vdma_continuous_buffer_free(struct device *dev,
+ struct hailo_vdma_continuous_buffer *continuous_buffer);
+struct hailo_vdma_continuous_buffer* hailo_vdma_find_continuous_buffer(struct hailo_vdma_file_context *context,
+ uintptr_t buf_handle);
+void hailo_vdma_clear_continuous_buffer_list(struct hailo_vdma_file_context *context,
+ struct hailo_vdma_controller *controller);
+#endif /* _HAILO_VDMA_MEMORY_H_ */
\ No newline at end of file
--- /dev/null
+++ b/drivers/media/pci/hailo/vdma/vdma.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+
+#define pr_fmt(fmt) "hailo: " fmt
+
+#include "vdma.h"
+#include "memory.h"
+#include "ioctl.h"
+#include "utils/logs.h"
+
+#include <linux/sched.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+#include <linux/dma-map-ops.h>
+#else
+#include <linux/dma-mapping.h>
+#endif
+
+
+static struct hailo_vdma_engine* init_vdma_engines(struct device *dev,
+ struct hailo_resource *channel_registers_per_engine, size_t engines_count)
+{
+ struct hailo_vdma_engine *engines = NULL;
+ u8 i = 0;
+
+ engines = devm_kmalloc_array(dev, engines_count, sizeof(*engines), GFP_KERNEL);
+ if (NULL == engines) {
+ dev_err(dev, "Failed allocating vdma engines\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < engines_count; i++) {
+ hailo_vdma_engine_init(&engines[i], i, &channel_registers_per_engine[i]);
+ }
+
+ return engines;
+}
+
+static int hailo_set_dma_mask(struct device *dev)
+{
+ int err = -EINVAL;
+ /* Check and configure DMA length */
+ if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))) {
+ dev_notice(dev, "Probing: Enabled 64 bit dma\n");
+ } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))) {
+ dev_notice(dev, "Probing: Enabled 48 bit dma\n");
+ } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)))) {
+ dev_notice(dev, "Probing: Enabled 40 bit dma\n");
+ } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)))) {
+ dev_notice(dev, "Probing: Enabled 36 bit dma\n");
+ } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)))) {
+ dev_notice(dev, "Probing: Enabled 32 bit dma\n");
+ } else {
+ dev_err(dev, "Probing: Error enabling dma %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int hailo_vdma_controller_init(struct hailo_vdma_controller *controller,
+ struct device *dev, struct hailo_vdma_hw *vdma_hw,
+ struct hailo_vdma_controller_ops *ops,
+ struct hailo_resource *channel_registers_per_engine, size_t engines_count)
+{
+ int err = 0;
+ controller->hw = vdma_hw;
+ controller->ops = ops;
+ controller->dev = dev;
+
+ controller->vdma_engines_count = engines_count;
+ controller->vdma_engines = init_vdma_engines(dev, channel_registers_per_engine, engines_count);
+ if (IS_ERR(controller->vdma_engines)) {
+ dev_err(dev, "Failed initialized vdma engines\n");
+ return PTR_ERR(controller->vdma_engines);
+ }
+
+ controller->used_by_filp = NULL;
+ spin_lock_init(&controller->interrupts_lock);
+ init_waitqueue_head(&controller->interrupts_wq);
+
+ /* Check and configure DMA length */
+ err = hailo_set_dma_mask(dev);
+ if (0 > err) {
+ return err;
+ }
+
+ if (get_dma_ops(controller->dev)) {
+ hailo_dev_notice(controller->dev, "Probing: Using specialized dma_ops=%ps", get_dma_ops(controller->dev));
+ }
+
+ return 0;
+}
+
+void hailo_vdma_file_context_init(struct hailo_vdma_file_context *context)
+{
+ atomic_set(&context->last_vdma_user_buffer_handle, 0);
+ INIT_LIST_HEAD(&context->mapped_user_buffer_list);
+
+ atomic_set(&context->last_vdma_handle, 0);
+ INIT_LIST_HEAD(&context->descriptors_buffer_list);
+ INIT_LIST_HEAD(&context->vdma_low_memory_buffer_list);
+ INIT_LIST_HEAD(&context->continuous_buffer_list);
+}
+
+void hailo_vdma_update_interrupts_mask(struct hailo_vdma_controller *controller,
+ size_t engine_index)
+{
+ struct hailo_vdma_engine *engine = &controller->vdma_engines[engine_index];
+ controller->ops->update_channel_interrupts(controller, engine_index, engine->enabled_channels);
+}
+
+void hailo_vdma_engine_interrupts_disable(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_engine *engine, u8 engine_index, u32 channels_bitmap)
+{
+ unsigned long irq_saved_flags = 0;
+ // In case of FLR, the vdma registers will be NULL
+ const bool is_device_up = (NULL != controller->dev);
+
+ hailo_vdma_engine_disable_channel_interrupts(engine, channels_bitmap);
+ if (is_device_up) {
+ hailo_vdma_update_interrupts_mask(controller, engine_index);
+ }
+
+ spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
+ hailo_vdma_engine_clear_channel_interrupts(engine, channels_bitmap);
+ spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
+
+ hailo_dev_info(controller->dev, "Disabled interrupts for engine %u, channels bitmap 0x%x\n",
+ engine_index, channels_bitmap);
+}
+
+void hailo_vdma_file_context_finalize(struct hailo_vdma_file_context *context,
+ struct hailo_vdma_controller *controller, struct file *filp)
+{
+ size_t engine_index = 0;
+ struct hailo_vdma_engine *engine = NULL;
+ const u32 channels_bitmap = 0xFFFFFFFF; // disable all channel interrupts
+
+ if (filp == controller->used_by_filp) {
+ for_each_vdma_engine(controller, engine, engine_index) {
+ hailo_vdma_engine_interrupts_disable(controller, engine, engine_index, channels_bitmap);
+ }
+ }
+
+ hailo_vdma_clear_mapped_user_buffer_list(context, controller);
+ hailo_vdma_clear_descriptors_buffer_list(context, controller);
+ hailo_vdma_clear_low_memory_buffer_list(context);
+ hailo_vdma_clear_continuous_buffer_list(context, controller);
+
+ if (filp == controller->used_by_filp) {
+ controller->used_by_filp = NULL;
+ }
+}
+
+void hailo_vdma_irq_handler(struct hailo_vdma_controller *controller,
+ size_t engine_index, u32 channels_bitmap)
+{
+ unsigned long irq_saved_flags = 0;
+ struct hailo_vdma_engine *engine = NULL;
+
+ BUG_ON(engine_index >= controller->vdma_engines_count);
+ engine = &controller->vdma_engines[engine_index];
+
+ hailo_vdma_engine_push_timestamps(engine, channels_bitmap);
+
+ spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
+ hailo_vdma_engine_set_channel_interrupts(engine, channels_bitmap);
+ spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
+
+ wake_up_interruptible_all(&controller->interrupts_wq);
+}
+
+long hailo_vdma_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned int cmd, unsigned long arg, struct file *filp, struct semaphore *mutex, bool *should_up_board_mutex)
+{
+ switch (cmd) {
+ case HAILO_VDMA_INTERRUPTS_ENABLE:
+ return hailo_vdma_interrupts_enable_ioctl(controller, arg);
+ case HAILO_VDMA_INTERRUPTS_DISABLE:
+ return hailo_vdma_interrupts_disable_ioctl(controller, arg);
+ case HAILO_VDMA_INTERRUPTS_WAIT:
+ return hailo_vdma_interrupts_wait_ioctl(controller, arg, mutex, should_up_board_mutex);
+ case HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS:
+ return hailo_vdma_interrupts_read_timestamps_ioctl(controller, arg);
+ case HAILO_VDMA_BUFFER_MAP:
+ return hailo_vdma_buffer_map_ioctl(context, controller, arg);
+ case HAILO_VDMA_BUFFER_UNMAP:
+ return hailo_vdma_buffer_unmap_ioctl(context, controller, arg);
+ case HAILO_VDMA_BUFFER_SYNC:
+ return hailo_vdma_buffer_sync_ioctl(context, controller, arg);
+ case HAILO_DESC_LIST_CREATE:
+ return hailo_desc_list_create_ioctl(context, controller, arg);
+ case HAILO_DESC_LIST_RELEASE:
+ return hailo_desc_list_release_ioctl(context, controller, arg);
+ case HAILO_DESC_LIST_BIND_VDMA_BUFFER:
+ return hailo_desc_list_bind_vdma_buffer(context, controller, arg);
+ case HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC:
+ return hailo_vdma_low_memory_buffer_alloc_ioctl(context, controller, arg);
+ case HAILO_VDMA_LOW_MEMORY_BUFFER_FREE:
+ return hailo_vdma_low_memory_buffer_free_ioctl(context, controller, arg);
+ case HAILO_MARK_AS_IN_USE:
+ return hailo_mark_as_in_use(controller, arg, filp);
+ case HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC:
+ return hailo_vdma_continuous_buffer_alloc_ioctl(context, controller, arg);
+ case HAILO_VDMA_CONTINUOUS_BUFFER_FREE:
+ return hailo_vdma_continuous_buffer_free_ioctl(context, controller, arg);
+ case HAILO_VDMA_LAUNCH_TRANSFER:
+ return hailo_vdma_launch_transfer_ioctl(context, controller, arg);
+ default:
+ hailo_dev_err(controller->dev, "Invalid vDMA ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
+ return -ENOTTY;
+ }
+}
+
+static int desc_list_mmap(struct hailo_vdma_controller *controller,
+ struct hailo_descriptors_list_buffer *vdma_descriptors_buffer, struct vm_area_struct *vma)
+{
+ int err = 0;
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+
+ if (vsize > vdma_descriptors_buffer->buffer_size) {
+ hailo_dev_err(controller->dev, "Requested size to map (%lx) is larger than the descriptor list size(%x)\n",
+ vsize, vdma_descriptors_buffer->buffer_size);
+ return -EINVAL;
+ }
+
+ err = dma_mmap_coherent(controller->dev, vma, vdma_descriptors_buffer->kernel_address,
+ vdma_descriptors_buffer->dma_address, vsize);
+ if (err != 0) {
+ hailo_dev_err(controller->dev, " Failed mmap descriptors %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int low_memory_buffer_mmap(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_low_memory_buffer *vdma_buffer, struct vm_area_struct *vma)
+{
+ int err = 0;
+ size_t i = 0;
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ unsigned long orig_vm_start = vma->vm_start;
+ unsigned long orig_vm_end = vma->vm_end;
+ unsigned long page_fn = 0;
+
+ if (vsize != vdma_buffer->pages_count * PAGE_SIZE) {
+ hailo_dev_err(controller->dev, "mmap size should be %lu (given %lu)\n",
+ vdma_buffer->pages_count * PAGE_SIZE, vsize);
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < vdma_buffer->pages_count ; i++) {
+ if (i > 0) {
+ vma->vm_start = vma->vm_end;
+ }
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+
+ page_fn = virt_to_phys(vdma_buffer->pages_address[i]) >> PAGE_SHIFT ;
+ err = remap_pfn_range(vma, vma->vm_start, page_fn, PAGE_SIZE, vma->vm_page_prot);
+
+ if (err != 0) {
+ hailo_dev_err(controller->dev, " fops_mmap failed mapping kernel page %d\n", err);
+ return err;
+ }
+ }
+
+ vma->vm_start = orig_vm_start;
+ vma->vm_end = orig_vm_end;
+
+ return 0;
+}
+
+static int continuous_buffer_mmap(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_continuous_buffer *buffer, struct vm_area_struct *vma)
+{
+ int err = 0;
+ const unsigned long vsize = vma->vm_end - vma->vm_start;
+
+ if (vsize > buffer->size) {
+ hailo_dev_err(controller->dev, "mmap size should be less than %zu (given %lu)\n",
+ buffer->size, vsize);
+ return -EINVAL;
+ }
+
+ err = dma_mmap_coherent(controller->dev, vma, buffer->kernel_address,
+ buffer->dma_address, vsize);
+ if (err < 0) {
+ hailo_dev_err(controller->dev, " vdma_mmap failed dma_mmap_coherent %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int hailo_vdma_mmap(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ struct vm_area_struct *vma, uintptr_t vdma_handle)
+{
+ struct hailo_descriptors_list_buffer *vdma_descriptors_buffer = NULL;
+ struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
+ struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
+
+ hailo_dev_info(controller->dev, "Map vdma_handle %llu\n", (u64)vdma_handle);
+ if (NULL != (vdma_descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, vdma_handle))) {
+ return desc_list_mmap(controller, vdma_descriptors_buffer, vma);
+ }
+ else if (NULL != (low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, vdma_handle))) {
+ return low_memory_buffer_mmap(controller, low_memory_buffer, vma);
+ }
+ else if (NULL != (continuous_buffer = hailo_vdma_find_continuous_buffer(context, vdma_handle))) {
+ return continuous_buffer_mmap(controller, continuous_buffer, vma);
+ }
+ else {
+ hailo_dev_err(controller->dev, "Can't mmap vdma handle: %llu (not existing)\n", (u64)vdma_handle);
+ return -EINVAL;
+ }
+}
+
+enum dma_data_direction get_dma_direction(enum hailo_dma_data_direction hailo_direction)
+{
+ switch (hailo_direction) {
+ case HAILO_DMA_BIDIRECTIONAL:
+ return DMA_BIDIRECTIONAL;
+ case HAILO_DMA_TO_DEVICE:
+ return DMA_TO_DEVICE;
+ case HAILO_DMA_FROM_DEVICE:
+ return DMA_FROM_DEVICE;
+ default:
+ pr_err("Invalid hailo direction %d\n", hailo_direction);
+ return DMA_NONE;
+ }
+}
--- /dev/null
+++ b/drivers/media/pci/hailo/vdma/vdma.h
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
+ **/
+/**
+ * Hailo vdma engine definitions
+ */
+
+#ifndef _HAILO_VDMA_VDMA_H_
+#define _HAILO_VDMA_VDMA_H_
+
+#include "hailo_ioctl_common.h"
+#include "hailo_resource.h"
+#include "vdma_common.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/semaphore.h>
+
+#define VDMA_CHANNEL_CONTROL_REG_OFFSET(channel_index, direction) (((direction) == DMA_TO_DEVICE) ? \
+ (((channel_index) << 5) + 0x0) : (((channel_index) << 5) + 0x10))
+#define VDMA_CHANNEL_CONTROL_REG_ADDRESS(vdma_registers, channel_index, direction) \
+ ((u8*)((vdma_registers)->address) + VDMA_CHANNEL_CONTROL_REG_OFFSET(channel_index, direction))
+
+#define VDMA_CHANNEL_NUM_PROC_OFFSET(channel_index, direction) (((direction) == DMA_TO_DEVICE) ? \
+ (((channel_index) << 5) + 0x4) : (((channel_index) << 5) + 0x14))
+#define VDMA_CHANNEL_NUM_PROC_ADDRESS(vdma_registers, channel_index, direction) \
+ ((u8*)((vdma_registers)->address) + VDMA_CHANNEL_NUM_PROC_OFFSET(channel_index, direction))
+
+
+struct hailo_vdma_buffer {
+ struct list_head mapped_user_buffer_list;
+ size_t handle;
+
+ struct kref kref;
+ struct device *device;
+
+ void __user *user_address;
+ u32 size;
+ enum dma_data_direction data_direction;
+ struct sg_table sg_table;
+
+ // If this flag is set, the buffer pointed by sg_table is not backed by
+ // 'struct page' (only by pure pfn). On this case, accessing to the page,
+ // or calling APIs that access the page (e.g. dma_sync_sg_for_cpu) is not
+ // allowed.
+ bool is_mmio;
+};
+
+// Continuous buffer that holds a descriptor list.
+struct hailo_descriptors_list_buffer {
+ struct list_head descriptors_buffer_list;
+ uintptr_t handle;
+ void *kernel_address;
+ dma_addr_t dma_address;
+ u32 buffer_size;
+ struct hailo_vdma_descriptors_list desc_list;
+};
+
+struct hailo_vdma_low_memory_buffer {
+ struct list_head vdma_low_memory_buffer_list;
+ uintptr_t handle;
+ size_t pages_count;
+ void **pages_address;
+};
+
+struct hailo_vdma_continuous_buffer {
+ struct list_head continuous_buffer_list;
+ uintptr_t handle;
+ void *kernel_address;
+ dma_addr_t dma_address;
+ size_t size;
+};
+
+struct hailo_vdma_controller;
+struct hailo_vdma_controller_ops {
+ void (*update_channel_interrupts)(struct hailo_vdma_controller *controller, size_t engine_index,
+ u32 channels_bitmap);
+};
+
+struct hailo_vdma_controller {
+ struct hailo_vdma_hw *hw;
+ struct hailo_vdma_controller_ops *ops;
+ struct device *dev;
+
+ size_t vdma_engines_count;
+ struct hailo_vdma_engine *vdma_engines;
+
+ spinlock_t interrupts_lock;
+ wait_queue_head_t interrupts_wq;
+
+ struct file *used_by_filp;
+
+ // Putting big IOCTL structures here to avoid stack allocation.
+ struct hailo_vdma_interrupts_read_timestamp_params read_interrupt_timestamps_params;
+};
+
+#define for_each_vdma_engine(controller, engine, engine_index) \
+ _for_each_element_array(controller->vdma_engines, controller->vdma_engines_count, \
+ engine, engine_index)
+
+struct hailo_vdma_file_context {
+ atomic_t last_vdma_user_buffer_handle;
+ struct list_head mapped_user_buffer_list;
+
+ // Last_vdma_handle works as a handle for vdma decriptor list and for the vdma buffer -
+ // there will be no collisions between the two
+ atomic_t last_vdma_handle;
+ struct list_head descriptors_buffer_list;
+ struct list_head vdma_low_memory_buffer_list;
+ struct list_head continuous_buffer_list;
+};
+
+
+int hailo_vdma_controller_init(struct hailo_vdma_controller *controller,
+ struct device *dev, struct hailo_vdma_hw *vdma_hw,
+ struct hailo_vdma_controller_ops *ops,
+ struct hailo_resource *channel_registers_per_engine, size_t engines_count);
+
+void hailo_vdma_update_interrupts_mask(struct hailo_vdma_controller *controller,
+ size_t engine_index);
+
+void hailo_vdma_engine_interrupts_disable(struct hailo_vdma_controller *controller,
+ struct hailo_vdma_engine *engine, u8 engine_index, u32 channels_bitmap);
+
+void hailo_vdma_file_context_init(struct hailo_vdma_file_context *context);
+void hailo_vdma_file_context_finalize(struct hailo_vdma_file_context *context,
+ struct hailo_vdma_controller *controller, struct file *filp);
+
+void hailo_vdma_irq_handler(struct hailo_vdma_controller *controller, size_t engine_index,
+ u32 channels_bitmap);
+
+// TODO: reduce params count
+long hailo_vdma_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ unsigned int cmd, unsigned long arg, struct file *filp, struct semaphore *mutex, bool *should_up_board_mutex);
+
+int hailo_vdma_mmap(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
+ struct vm_area_struct *vma, uintptr_t vdma_handle);
+
+enum dma_data_direction get_dma_direction(enum hailo_dma_data_direction hailo_direction);
+void hailo_vdma_disable_vdma_channels(struct hailo_vdma_controller *controller, const bool should_close_channels);
+
+#endif /* _HAILO_VDMA_VDMA_H_ */
\ No newline at end of file