summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_amba.c9
-rw-r--r--drivers/android/binder.c157
-rw-r--r--drivers/android/binder_alloc.c44
-rw-r--r--drivers/android/binder_alloc.h22
-rw-r--r--drivers/char/bsr.c5
-rw-r--r--drivers/char/misc.c3
-rw-r--r--drivers/counter/104-quad-8.c2
-rw-r--r--drivers/extcon/Kconfig12
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-arizona.c33
-rw-r--r--drivers/extcon/extcon-fsa9480.c395
-rw-r--r--drivers/firmware/google/coreboot_table.h11
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c14
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c28
-rw-r--r--drivers/firmware/google/memconsole.c9
-rw-r--r--drivers/firmware/google/vpd.c14
-rw-r--r--drivers/firmware/google/vpd_decode.c2
-rw-r--r--drivers/fpga/Kconfig6
-rw-r--r--drivers/fpga/dfl-fme-mgr.c4
-rw-r--r--drivers/fpga/dfl-fme-pr.c17
-rw-r--r--drivers/fsi/cf-fsi-fw.h2
-rw-r--r--drivers/fsi/fsi-core.c32
-rw-r--r--drivers/fsi/fsi-occ.c15
-rw-r--r--drivers/fsi/fsi-sbefifo.c4
-rw-r--r--drivers/hwmon/occ/common.c4
-rw-r--r--drivers/hwmon/occ/common.h1
-rw-r--r--drivers/hwtracing/coresight/Kconfig1
-rw-r--r--drivers/hwtracing/coresight/Makefile3
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c78
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c36
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c815
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c118
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c80
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c96
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c24
-rw-r--r--drivers/hwtracing/coresight/coresight.c164
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c297
-rw-r--r--drivers/hwtracing/intel_th/msu.c150
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/memory/Kconfig2
-rw-r--r--drivers/memory/jz4780-nemc.c26
-rw-r--r--drivers/misc/Kconfig32
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/altera-stapl/Kconfig1
-rw-r--r--drivers/misc/c2port/Kconfig2
-rw-r--r--drivers/misc/cb710/Kconfig1
-rw-r--r--drivers/misc/cxl/Kconfig3
-rw-r--r--drivers/misc/echo/Kconfig1
-rw-r--r--drivers/misc/eeprom/ee1004.c43
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/fsa9480.c547
-rw-r--r--drivers/misc/genwqe/Kconfig1
-rw-r--r--drivers/misc/habanalabs/asid.c2
-rw-r--r--drivers/misc/habanalabs/command_submission.c10
-rw-r--r--drivers/misc/habanalabs/context.c11
-rw-r--r--drivers/misc/habanalabs/debugfs.c54
-rw-r--r--drivers/misc/habanalabs/device.c189
-rw-r--r--drivers/misc/habanalabs/firmware_if.c51
-rw-r--r--drivers/misc/habanalabs/goya/goya.c635
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h16
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c16
-rw-r--r--drivers/misc/habanalabs/habanalabs.h93
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c66
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c11
-rw-r--r--drivers/misc/habanalabs/hw_queue.c2
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h418
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h1
-rw-r--r--drivers/misc/habanalabs/memory.c13
-rw-r--r--drivers/misc/habanalabs/mmu.c20
-rw-r--r--drivers/misc/habanalabs/pci.c10
-rw-r--r--drivers/misc/habanalabs/sysfs.c4
-rw-r--r--drivers/misc/isl29003.c4
-rw-r--r--drivers/misc/lis3lv02d/Kconfig2
-rw-r--r--drivers/misc/lkdtm/Makefile3
-rw-r--r--drivers/misc/lkdtm/bugs.c66
-rw-r--r--drivers/misc/lkdtm/core.c1
-rw-r--r--drivers/misc/lkdtm/lkdtm.h1
-rw-r--r--drivers/misc/mei/debugfs.c184
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c11
-rw-r--r--drivers/misc/mic/scif/scif_main.c1
-rw-r--r--drivers/misc/ocxl/Kconfig1
-rw-r--r--drivers/misc/ocxl/context.c9
-rw-r--r--drivers/misc/ocxl/link.c28
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c2
-rw-r--r--drivers/misc/tsl2550.c2
-rw-r--r--drivers/misc/vmw_balloon.c489
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c80
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.c38
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.h29
-rw-r--r--drivers/misc/xilinx_sdfec.c345
-rw-r--r--drivers/mux/Kconfig12
-rw-r--r--drivers/mux/mmio.c6
-rw-r--r--drivers/nvmem/Kconfig9
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c161
-rw-r--r--drivers/nvmem/imx-ocotp.c52
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/slimbus/core.c5
-rw-r--r--drivers/slimbus/qcom-ctrl.c4
-rw-r--r--drivers/slimbus/stream.c12
-rw-r--r--drivers/soundwire/bus.c6
-rw-r--r--drivers/soundwire/cadence_master.c30
-rw-r--r--drivers/soundwire/intel.c17
-rw-r--r--drivers/soundwire/intel.h2
-rw-r--r--drivers/soundwire/intel_init.c25
-rw-r--r--drivers/soundwire/mipi_disco.c35
-rw-r--r--drivers/soundwire/stream.c8
-rw-r--r--drivers/w1/slaves/w1_ds2413.c65
-rw-r--r--drivers/w1/slaves/w1_ds2805.c6
122 files changed, 4767 insertions, 2246 deletions
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
index 8159f0a669b8..49b781a9cd97 100644
--- a/drivers/acpi/acpi_amba.c
+++ b/drivers/acpi/acpi_amba.c
@@ -21,6 +21,15 @@
static const struct acpi_device_id amba_id_list[] = {
{"ARMH0061", 0}, /* PL061 GPIO Device */
+ {"ARMHC500", 0}, /* ARM CoreSight ETM4x */
+ {"ARMHC501", 0}, /* ARM CoreSight ETR */
+ {"ARMHC502", 0}, /* ARM CoreSight STM */
+ {"ARMHC503", 0}, /* ARM CoreSight Debug */
+ {"ARMHC979", 0}, /* ARM CoreSight TPIU */
+ {"ARMHC97C", 0}, /* ARM CoreSight SoC-400 TMC, SoC-600 ETF/ETB */
+ {"ARMHC98D", 0}, /* ARM CoreSight Dynamic Replicator */
+ {"ARMHC9CA", 0}, /* ARM CoreSight CATU */
+ {"ARMHC9FF", 0}, /* ARM CoreSight Dynamic Funnel */
{"", 0},
};
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bc26b5511f0a..38a59a630cd4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2059,10 +2059,9 @@ static size_t binder_get_object(struct binder_proc *proc,
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
- !IS_ALIGNED(offset, sizeof(u32)))
+ binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+ offset, read_size))
return 0;
- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
- offset, read_size);
/* Ok, now see if we read a complete object. */
hdr = &object->hdr;
@@ -2131,8 +2130,10 @@ static struct binder_buffer_object *binder_validate_ptr(
return NULL;
buffer_offset = start_offset + sizeof(binder_size_t) * index;
- binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
- b, buffer_offset, sizeof(object_offset));
+ if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+ b, buffer_offset,
+ sizeof(object_offset)))
+ return NULL;
object_size = binder_get_object(proc, b, object_offset, object);
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
return NULL;
@@ -2212,10 +2213,12 @@ static bool binder_validate_fixup(struct binder_proc *proc,
return false;
last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
buffer_offset = objects_start_offset +
- sizeof(binder_size_t) * last_bbo->parent,
- binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
- b, buffer_offset,
- sizeof(last_obj_offset));
+ sizeof(binder_size_t) * last_bbo->parent;
+ if (binder_alloc_copy_from_buffer(&proc->alloc,
+ &last_obj_offset,
+ b, buffer_offset,
+ sizeof(last_obj_offset)))
+ return false;
}
return (fixup_offset >= last_min_offset);
}
@@ -2301,15 +2304,15 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
- size_t object_size;
+ size_t object_size = 0;
struct binder_object object;
binder_size_t object_offset;
- binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
- buffer, buffer_offset,
- sizeof(object_offset));
- object_size = binder_get_object(proc, buffer,
- object_offset, &object);
+ if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+ buffer, buffer_offset,
+ sizeof(object_offset)))
+ object_size = binder_get_object(proc, buffer,
+ object_offset, &object);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
debug_id, (u64)object_offset, buffer->data_size);
@@ -2432,15 +2435,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
for (fd_index = 0; fd_index < fda->num_fds;
fd_index++) {
u32 fd;
+ int err;
binder_size_t offset = fda_offset +
fd_index * sizeof(fd);
- binder_alloc_copy_from_buffer(&proc->alloc,
- &fd,
- buffer,
- offset,
- sizeof(fd));
- binder_deferred_fd_close(fd);
+ err = binder_alloc_copy_from_buffer(
+ &proc->alloc, &fd, buffer,
+ offset, sizeof(fd));
+ WARN_ON(err);
+ if (!err)
+ binder_deferred_fd_close(fd);
}
} break;
default:
@@ -2683,11 +2687,12 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
int ret;
binder_size_t offset = fda_offset + fdi * sizeof(fd);
- binder_alloc_copy_from_buffer(&target_proc->alloc,
- &fd, t->buffer,
- offset, sizeof(fd));
- ret = binder_translate_fd(fd, offset, t, thread,
- in_reply_to);
+ ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
+ &fd, t->buffer,
+ offset, sizeof(fd));
+ if (!ret)
+ ret = binder_translate_fd(fd, offset, t, thread,
+ in_reply_to);
if (ret < 0)
return ret;
}
@@ -2740,8 +2745,12 @@ static int binder_fixup_parent(struct binder_transaction *t,
}
buffer_offset = bp->parent_offset +
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
- binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
- &bp->buffer, sizeof(bp->buffer));
+ if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
+ &bp->buffer, sizeof(bp->buffer))) {
+ binder_user_error("%d:%d got transaction with invalid parent offset\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
return 0;
}
@@ -3160,15 +3169,20 @@ static void binder_transaction(struct binder_proc *proc,
goto err_binder_alloc_buf_failed;
}
if (secctx) {
+ int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
ALIGN(secctx_sz, sizeof(u64));
t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, buf_offset,
- secctx, secctx_sz);
+ err = binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, buf_offset,
+ secctx, secctx_sz);
+ if (err) {
+ t->security_ctx = 0;
+ WARN_ON(1);
+ }
security_release_secctx(secctx, secctx_sz);
secctx = NULL;
}
@@ -3234,11 +3248,16 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_object object;
binder_size_t object_offset;
- binder_alloc_copy_from_buffer(&target_proc->alloc,
- &object_offset,
- t->buffer,
- buffer_offset,
- sizeof(object_offset));
+ if (binder_alloc_copy_from_buffer(&target_proc->alloc,
+ &object_offset,
+ t->buffer,
+ buffer_offset,
+ sizeof(object_offset))) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_offset;
+ }
object_size = binder_get_object(target_proc, t->buffer,
object_offset, &object);
if (object_size == 0 || object_offset < off_min) {
@@ -3262,15 +3281,17 @@ static void binder_transaction(struct binder_proc *proc,
fp = to_flat_binder_object(hdr);
ret = binder_translate_binder(fp, t, thread);
- if (ret < 0) {
+
+ if (ret < 0 ||
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fp, sizeof(*fp))) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, object_offset,
- fp, sizeof(*fp));
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
@@ -3278,15 +3299,16 @@ static void binder_transaction(struct binder_proc *proc,
fp = to_flat_binder_object(hdr);
ret = binder_translate_handle(fp, t, thread);
- if (ret < 0) {
+ if (ret < 0 ||
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fp, sizeof(*fp))) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, object_offset,
- fp, sizeof(*fp));
} break;
case BINDER_TYPE_FD: {
@@ -3296,16 +3318,17 @@ static void binder_transaction(struct binder_proc *proc,
int ret = binder_translate_fd(fp->fd, fd_offset, t,
thread, in_reply_to);
- if (ret < 0) {
+ fp->pad_binder = 0;
+ if (ret < 0 ||
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fp, sizeof(*fp))) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
- fp->pad_binder = 0;
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, object_offset,
- fp, sizeof(*fp));
} break;
case BINDER_TYPE_FDA: {
struct binder_object ptr_object;
@@ -3393,15 +3416,16 @@ static void binder_transaction(struct binder_proc *proc,
num_valid,
last_fixup_obj_off,
last_fixup_min_off);
- if (ret < 0) {
+ if (ret < 0 ||
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ bp, sizeof(*bp))) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, object_offset,
- bp, sizeof(*bp));
last_fixup_obj_off = object_offset;
last_fixup_min_off = 0;
} break;
@@ -4140,20 +4164,27 @@ static int binder_apply_fd_fixups(struct binder_proc *proc,
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
fd_install(fd, fixup->file);
fixup->file = NULL;
- binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
- fixup->offset, &fd,
- sizeof(u32));
+ if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
+ fixup->offset, &fd,
+ sizeof(u32))) {
+ ret = -EINVAL;
+ break;
+ }
}
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
if (fixup->file) {
fput(fixup->file);
} else if (ret) {
u32 fd;
-
- binder_alloc_copy_from_buffer(&proc->alloc, &fd,
- t->buffer, fixup->offset,
- sizeof(fd));
- binder_deferred_fd_close(fd);
+ int err;
+
+ err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
+ t->buffer,
+ fixup->offset,
+ sizeof(fd));
+ WARN_ON(err);
+ if (!err)
+ binder_deferred_fd_close(fd);
}
list_del(&fixup->fixup_entry);
kfree(fixup);
@@ -4268,6 +4299,8 @@ retry:
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
@@ -4276,8 +4309,6 @@ retry:
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
- kfree(w);
- binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index ce5603c2291c..6d79a1b0d446 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1119,15 +1119,16 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
return 0;
}
-static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
- bool to_buffer,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- void *ptr,
- size_t bytes)
+static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
+ bool to_buffer,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *ptr,
+ size_t bytes)
{
/* All copies must be 32-bit aligned and 32-bit size */
- BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
+ if (!check_buffer(alloc, buffer, buffer_offset, bytes))
+ return -EINVAL;
while (bytes) {
unsigned long size;
@@ -1155,25 +1156,26 @@ static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
ptr = ptr + size;
buffer_offset += size;
}
+ return 0;
}
-void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- void *src,
- size_t bytes)
+int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *src,
+ size_t bytes)
{
- binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
- src, bytes);
+ return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
+ src, bytes);
}
-void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
- void *dest,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- size_t bytes)
+int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+ void *dest,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ size_t bytes)
{
- binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
- dest, bytes);
+ return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
+ dest, bytes);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 71bfa95f8e09..db9c1b984695 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -159,17 +159,17 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
const void __user *from,
size_t bytes);
-void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- void *src,
- size_t bytes);
-
-void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
- void *dest,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- size_t bytes);
+int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *src,
+ size_t bytes);
+
+int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+ void *dest,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ size_t bytes);
#endif /* _LINUX_BINDER_ALLOC_H */
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 167e7370d43a..e5e5333f302d 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -134,7 +134,7 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-static int bsr_open(struct inode * inode, struct file * filp)
+static int bsr_open(struct inode *inode, struct file *filp)
{
struct cdev *cdev = inode->i_cdev;
struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
@@ -309,7 +309,8 @@ static int __init bsr_init(void)
goto out_err_2;
}
- if ((ret = bsr_create_devs(np)) < 0) {
+ ret = bsr_create_devs(np);
+ if (ret < 0) {
np = NULL;
goto out_err_3;
}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 53cfe574d8d4..f6a147427029 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -226,6 +226,7 @@ int misc_register(struct miscdevice *misc)
mutex_unlock(&misc_mtx);
return err;
}
+EXPORT_SYMBOL(misc_register);
/**
* misc_deregister - unregister a miscellaneous device
@@ -249,8 +250,6 @@ void misc_deregister(struct miscdevice *misc)
clear_bit(i, misc_minors);
mutex_unlock(&misc_mtx);
}
-
-EXPORT_SYMBOL(misc_register);
EXPORT_SYMBOL(misc_deregister);
static char *misc_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 4fa2931dcb7b..00b113f4b958 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -833,7 +833,7 @@ static int quad8_action_get(struct counter_device *counter,
return 0;
}
-const struct counter_ops quad8_ops = {
+static const struct counter_ops quad8_ops = {
.signal_read = quad8_signal_read,
.count_read = quad8_count_read,
.count_write = quad8_count_write,
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 6f5af4196b8d..fa1804460e8c 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -37,6 +37,18 @@ config EXTCON_AXP288
Say Y here to enable support for USB peripheral detection
and USB MUX switching by X-Power AXP288 PMIC.
+config EXTCON_FSA9480
+ tristate "FSA9480 EXTCON Support"
+ depends on INPUT && I2C
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Fairchild Semiconductor
+ FSA9480 microUSB switch and accessory detector chip. The FSA9480 is a USB
+ port accessory detector and switch. The FSA9480 is fully controlled using
+ I2C and enables USB data, stereo and mono audio, video, microphone
+ and UART data to use a common connector port.
+
config EXTCON_GPIO
tristate "GPIO extcon support"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index d3941a735df3..52096fd8a216 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -8,6 +8,7 @@ extcon-core-objs += extcon.o devres.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
+obj-$(CONFIG_EXTCON_FSA9480) += extcon-fsa9480.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index bb6434726c7a..7e9f4c9ee87d 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -326,10 +326,12 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
arizona_extcon_pulse_micbias(info);
- regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_ENA, ARIZONA_MICD_ENA,
- &change);
- if (!change) {
+ ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA, ARIZONA_MICD_ENA,
+ &change);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to enable micd: %d\n", ret);
+ } else if (!change) {
regulator_disable(info->micvdd);
pm_runtime_put_autosuspend(info->dev);
}
@@ -341,12 +343,14 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
- bool change;
+ bool change = false;
int ret;
- regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_ENA, 0,
- &change);
+ ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA, 0,
+ &change);
+ if (ret < 0)
+ dev_err(arizona->dev, "Failed to disable micd: %d\n", ret);
ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
@@ -1718,12 +1722,15 @@ static int arizona_extcon_remove(struct platform_device *pdev)
struct arizona *arizona = info->arizona;
int jack_irq_rise, jack_irq_fall;
bool change;
+ int ret;
- regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_ENA, 0,
- &change);
-
- if (change) {
+ ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA, 0,
+ &change);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
+ ret);
+ } else if (change) {
regulator_disable(info->micvdd);
pm_runtime_put(info->dev);
}
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
new file mode 100644
index 000000000000..350fb34abfa0
--- /dev/null
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * extcon-fsa9480.c - Fairchild Semiconductor FSA9480 extcon driver
+ *
+ * Copyright (c) 2019 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Loosely based on old fsa9480 misc-device driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/extcon-provider.h>
+#include <linux/irqdomain.h>
+#include <linux/regmap.h>
+
+/* FSA9480 I2C registers */
+#define FSA9480_REG_DEVID 0x01
+#define FSA9480_REG_CTRL 0x02
+#define FSA9480_REG_INT1 0x03
+#define FSA9480_REG_INT2 0x04
+#define FSA9480_REG_INT1_MASK 0x05
+#define FSA9480_REG_INT2_MASK 0x06
+#define FSA9480_REG_ADC 0x07
+#define FSA9480_REG_TIMING1 0x08
+#define FSA9480_REG_TIMING2 0x09
+#define FSA9480_REG_DEV_T1 0x0a
+#define FSA9480_REG_DEV_T2 0x0b
+#define FSA9480_REG_BTN1 0x0c
+#define FSA9480_REG_BTN2 0x0d
+#define FSA9480_REG_CK 0x0e
+#define FSA9480_REG_CK_INT1 0x0f
+#define FSA9480_REG_CK_INT2 0x10
+#define FSA9480_REG_CK_INTMASK1 0x11
+#define FSA9480_REG_CK_INTMASK2 0x12
+#define FSA9480_REG_MANSW1 0x13
+#define FSA9480_REG_MANSW2 0x14
+#define FSA9480_REG_END 0x15
+
+/* Control */
+#define CON_SWITCH_OPEN (1 << 4)
+#define CON_RAW_DATA (1 << 3)
+#define CON_MANUAL_SW (1 << 2)
+#define CON_WAIT (1 << 1)
+#define CON_INT_MASK (1 << 0)
+#define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \
+ CON_MANUAL_SW | CON_WAIT)
+
+/* Device Type 1 */
+#define DEV_USB_OTG 7
+#define DEV_DEDICATED_CHG 6
+#define DEV_USB_CHG 5
+#define DEV_CAR_KIT 4
+#define DEV_UART 3
+#define DEV_USB 2
+#define DEV_AUDIO_2 1
+#define DEV_AUDIO_1 0
+
+#define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB)
+#define DEV_T1_UART_MASK (DEV_UART)
+#define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG)
+
+/* Device Type 2 */
+#define DEV_AV 14
+#define DEV_TTY 13
+#define DEV_PPD 12
+#define DEV_JIG_UART_OFF 11
+#define DEV_JIG_UART_ON 10
+#define DEV_JIG_USB_OFF 9
+#define DEV_JIG_USB_ON 8
+
+#define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON)
+#define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
+#define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \
+ DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
+
+/*
+ * Manual Switch
+ * D- [7:5] / D+ [4:2]
+ * 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO
+ */
+#define SW_VAUDIO ((4 << 5) | (4 << 2))
+#define SW_UART ((3 << 5) | (3 << 2))
+#define SW_AUDIO ((2 << 5) | (2 << 2))
+#define SW_DHOST ((1 << 5) | (1 << 2))
+#define SW_AUTO ((0 << 5) | (0 << 2))
+
+/* Interrupt 1 */
+#define INT1_MASK (0xff << 0)
+#define INT_DETACH (1 << 1)
+#define INT_ATTACH (1 << 0)
+
+/* Interrupt 2 mask */
+#define INT2_MASK (0x1f << 0)
+
+/* Timing Set 1 */
+#define TIMING1_ADC_500MS (0x6 << 0)
+
+struct fsa9480_usbsw {
+ struct device *dev;
+ struct regmap *regmap;
+ struct extcon_dev *edev;
+ u16 cable;
+};
+
+static const unsigned int fsa9480_extcon_cable[] = {
+ EXTCON_USB_HOST,
+ EXTCON_USB,
+ EXTCON_CHG_USB_DCP,
+ EXTCON_CHG_USB_SDP,
+ EXTCON_CHG_USB_ACA,
+ EXTCON_JACK_LINE_OUT,
+ EXTCON_JACK_VIDEO_OUT,
+ EXTCON_JIG,
+
+ EXTCON_NONE,
+};
+
+static const u64 cable_types[] = {
+ [DEV_USB_OTG] = BIT_ULL(EXTCON_USB_HOST),
+ [DEV_DEDICATED_CHG] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_CHG_USB_DCP),
+ [DEV_USB_CHG] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_CHG_USB_SDP),
+ [DEV_CAR_KIT] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_CHG_USB_SDP)
+ | BIT_ULL(EXTCON_JACK_LINE_OUT),
+ [DEV_UART] = BIT_ULL(EXTCON_JIG),
+ [DEV_USB] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_CHG_USB_SDP),
+ [DEV_AUDIO_2] = BIT_ULL(EXTCON_JACK_LINE_OUT),
+ [DEV_AUDIO_1] = BIT_ULL(EXTCON_JACK_LINE_OUT),
+ [DEV_AV] = BIT_ULL(EXTCON_JACK_LINE_OUT)
+ | BIT_ULL(EXTCON_JACK_VIDEO_OUT),
+ [DEV_TTY] = BIT_ULL(EXTCON_JIG),
+ [DEV_PPD] = BIT_ULL(EXTCON_JACK_LINE_OUT) | BIT_ULL(EXTCON_CHG_USB_ACA),
+ [DEV_JIG_UART_OFF] = BIT_ULL(EXTCON_JIG),
+ [DEV_JIG_UART_ON] = BIT_ULL(EXTCON_JIG),
+ [DEV_JIG_USB_OFF] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_JIG),
+ [DEV_JIG_USB_ON] = BIT_ULL(EXTCON_USB) | BIT_ULL(EXTCON_JIG),
+};
+
+/* Define regmap configuration of FSA9480 for I2C communication */
+static bool fsa9480_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case FSA9480_REG_INT1_MASK:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static const struct regmap_config fsa9480_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = fsa9480_volatile_reg,
+ .max_register = FSA9480_REG_END,
+};
+
+static int fsa9480_write_reg(struct fsa9480_usbsw *usbsw, int reg, int value)
+{
+ int ret;
+
+ ret = regmap_write(usbsw->regmap, reg, value);
+ if (ret < 0)
+ dev_err(usbsw->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int fsa9480_read_reg(struct fsa9480_usbsw *usbsw, int reg)
+{
+ int ret, val;
+
+ ret = regmap_read(usbsw->regmap, reg, &val);
+ if (ret < 0) {
+ dev_err(usbsw->dev, "%s: err %d\n", __func__, ret);
+ return ret;
+ }
+
+ return val;
+}
+
+static int fsa9480_read_irq(struct fsa9480_usbsw *usbsw, int *value)
+{
+ u8 regs[2];
+ int ret;
+
+ ret = regmap_bulk_read(usbsw->regmap, FSA9480_REG_INT1, regs, 2);
+ if (ret < 0)
+ dev_err(usbsw->dev, "%s: err %d\n", __func__, ret);
+
+ *value = regs[1] << 8 | regs[0];
+ return ret;
+}
+
+static void fsa9480_handle_change(struct fsa9480_usbsw *usbsw,
+ u16 mask, bool attached)
+{
+ while (mask) {
+ int dev = fls64(mask) - 1;
+ u64 cables = cable_types[dev];
+
+ while (cables) {
+ int cable = fls64(cables) - 1;
+
+ extcon_set_state_sync(usbsw->edev, cable, attached);
+ cables &= ~BIT_ULL(cable);
+ }
+
+ mask &= ~BIT_ULL(dev);
+ }
+}
+
+static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw)
+{
+ int val1, val2;
+ u16 val;
+
+ val1 = fsa9480_read_reg(usbsw, FSA9480_REG_DEV_T1);
+ val2 = fsa9480_read_reg(usbsw, FSA9480_REG_DEV_T2);
+ if (val1 < 0 || val2 < 0) {
+ dev_err(usbsw->dev, "%s: failed to read registers", __func__);
+ return;
+ }
+ val = val2 << 8 | val1;
+
+ dev_info(usbsw->dev, "dev1: 0x%x, dev2: 0x%x\n", val1, val2);
+
+ /* handle detached cables first */
+ fsa9480_handle_change(usbsw, usbsw->cable & ~val, false);
+
+ /* then handle attached ones */
+ fsa9480_handle_change(usbsw, val & ~usbsw->cable, true);
+
+ usbsw->cable = val;
+}
+
+static irqreturn_t fsa9480_irq_handler(int irq, void *data)
+{
+ struct fsa9480_usbsw *usbsw = data;
+ int intr = 0;
+
+ /* clear interrupt */
+ fsa9480_read_irq(usbsw, &intr);
+ if (!intr)
+ return IRQ_NONE;
+
+ /* device detection */
+ fsa9480_detect_dev(usbsw);
+
+ return IRQ_HANDLED;
+}
+
+static int fsa9480_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct fsa9480_usbsw *info;
+ int ret;
+
+ if (!client->irq) {
+ dev_err(&client->dev, "no interrupt provided\n");
+ return -EINVAL;
+ }
+
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->dev = &client->dev;
+
+ i2c_set_clientdata(client, info);
+
+ /* External connector */
+ info->edev = devm_extcon_dev_allocate(info->dev,
+ fsa9480_extcon_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(info->dev, "failed to allocate memory for extcon\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ ret = devm_extcon_dev_register(info->dev, info->edev);
+ if (ret) {
+ dev_err(info->dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ info->regmap = devm_regmap_init_i2c(client, &fsa9480_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ ret = PTR_ERR(info->regmap);
+ dev_err(info->dev, "failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* ADC Detect Time: 500ms */
+ fsa9480_write_reg(info, FSA9480_REG_TIMING1, TIMING1_ADC_500MS);
+
+ /* configure automatic switching */
+ fsa9480_write_reg(info, FSA9480_REG_CTRL, CON_MASK);
+
+ /* unmask interrupt (attach/detach only) */
+ fsa9480_write_reg(info, FSA9480_REG_INT1_MASK,
+ INT1_MASK & ~(INT_ATTACH | INT_DETACH));
+ fsa9480_write_reg(info, FSA9480_REG_INT2_MASK, INT2_MASK);
+
+ ret = devm_request_threaded_irq(info->dev, client->irq, NULL,
+ fsa9480_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "fsa9480", info);
+ if (ret) {
+ dev_err(info->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ device_init_wakeup(info->dev, true);
+ fsa9480_detect_dev(info);
+
+ return 0;
+}
+
+static int fsa9480_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fsa9480_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev) && client->irq)
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int fsa9480_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev) && client->irq)
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsa9480_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fsa9480_suspend, fsa9480_resume)
+};
+
+static const struct i2c_device_id fsa9480_id[] = {
+ { "fsa9480", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, fsa9480_id);
+
+static const struct of_device_id fsa9480_of_match[] = {
+ { .compatible = "fcs,fsa9480", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fsa9480_of_match);
+
+static struct i2c_driver fsa9480_i2c_driver = {
+ .driver = {
+ .name = "fsa9480",
+ .pm = &fsa9480_pm_ops,
+ .of_match_table = fsa9480_of_match,
+ },
+ .probe = fsa9480_probe,
+ .remove = fsa9480_remove,
+ .id_table = fsa9480_id,
+};
+
+static int __init fsa9480_module_init(void)
+{
+ return i2c_add_driver(&fsa9480_i2c_driver);
+}
+subsys_initcall(fsa9480_module_init);
+
+static void __exit fsa9480_module_exit(void)
+{
+ i2c_del_driver(&fsa9480_i2c_driver);
+}
+module_exit(fsa9480_module_exit);
+
+MODULE_DESCRIPTION("Fairchild Semiconductor FSA9480 extcon driver");
+MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 1f63b3034b17..7b7b4a6eedda 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -12,7 +12,7 @@
#ifndef __COREBOOT_TABLE_H
#define __COREBOOT_TABLE_H
-#include <linux/io.h>
+#include <linux/device.h>
/* Coreboot table header structure */
struct coreboot_table_header {
@@ -83,4 +83,13 @@ int coreboot_driver_register(struct coreboot_driver *driver);
/* Unregister a driver that uses the data from a coreboot table. */
void coreboot_driver_unregister(struct coreboot_driver *driver);
+/* module_coreboot_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_coreboot_driver(__coreboot_driver) \
+ module_driver(__coreboot_driver, coreboot_driver_register, \
+ coreboot_driver_unregister)
+
#endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index 7e67b651e4ac..916f26adc595 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -89,19 +89,7 @@ static struct coreboot_driver framebuffer_driver = {
},
.tag = CB_TAG_FRAMEBUFFER,
};
-
-static int __init coreboot_framebuffer_init(void)
-{
- return coreboot_driver_register(&framebuffer_driver);
-}
-
-static void coreboot_framebuffer_exit(void)
-{
- coreboot_driver_unregister(&framebuffer_driver);
-}
-
-module_init(coreboot_framebuffer_init);
-module_exit(coreboot_framebuffer_exit);
+module_coreboot_driver(framebuffer_driver);
MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index ac90e8536565..fd7f0fbec07e 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -8,6 +8,7 @@
*/
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -26,7 +27,7 @@ struct cbmem_cons {
#define CURSOR_MASK ((1 << 28) - 1)
#define OVERFLOW (1 << 31)
-static struct cbmem_cons __iomem *cbmem_console;
+static struct cbmem_cons *cbmem_console;
static u32 cbmem_console_size;
/*
@@ -67,7 +68,7 @@ static ssize_t memconsole_coreboot_read(char *buf, loff_t pos, size_t count)
static int memconsole_probe(struct coreboot_device *dev)
{
- struct cbmem_cons __iomem *tmp_cbmc;
+ struct cbmem_cons *tmp_cbmc;
tmp_cbmc = memremap(dev->cbmem_ref.cbmem_addr,
sizeof(*tmp_cbmc), MEMREMAP_WB);
@@ -77,13 +78,13 @@ static int memconsole_probe(struct coreboot_device *dev)
/* Read size only once to prevent overrun attack through /dev/mem. */
cbmem_console_size = tmp_cbmc->size_dont_access_after_boot;
- cbmem_console = memremap(dev->cbmem_ref.cbmem_addr,
+ cbmem_console = devm_memremap(&dev->dev, dev->cbmem_ref.cbmem_addr,
cbmem_console_size + sizeof(*cbmem_console),
MEMREMAP_WB);
memunmap(tmp_cbmc);
- if (!cbmem_console)
- return -ENOMEM;
+ if (IS_ERR(cbmem_console))
+ return PTR_ERR(cbmem_console);
memconsole_setup(memconsole_coreboot_read);
@@ -94,9 +95,6 @@ static int memconsole_remove(struct coreboot_device *dev)
{
memconsole_exit();
- if (cbmem_console)
- memunmap(cbmem_console);
-
return 0;
}
@@ -108,19 +106,7 @@ static struct coreboot_driver memconsole_driver = {
},
.tag = CB_TAG_CBMEM_CONSOLE,
};
-
-static void coreboot_memconsole_exit(void)
-{
- coreboot_driver_unregister(&memconsole_driver);
-}
-
-static int __init coreboot_memconsole_init(void)
-{
- return coreboot_driver_register(&memconsole_driver);
-}
-
-module_exit(coreboot_memconsole_exit);
-module_init(coreboot_memconsole_init);
+module_coreboot_driver(memconsole_driver);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index fe5aa740c34d..44d314ad69e4 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -7,21 +7,22 @@
* Copyright 2017 Google Inc.
*/
-#include <linux/init.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include "memconsole.h"
-static ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
-
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
+ ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
+
+ memconsole_read_func = bin_attr->private;
if (WARN_ON_ONCE(!memconsole_read_func))
return -EIO;
+
return memconsole_read_func(buf, pos, count);
}
@@ -32,7 +33,7 @@ static struct bin_attribute memconsole_bin_attr = {
void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
{
- memconsole_read_func = read_func;
+ memconsole_bin_attr.private = read_func;
}
EXPORT_SYMBOL(memconsole_setup);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index fd5212c395c0..0739f3b70347 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -316,19 +316,7 @@ static struct coreboot_driver vpd_driver = {
},
.tag = CB_TAG_VPD,
};
-
-static int __init coreboot_vpd_init(void)
-{
- return coreboot_driver_register(&vpd_driver);
-}
-
-static void __exit coreboot_vpd_exit(void)
-{
- coreboot_driver_unregister(&vpd_driver);
-}
-
-module_init(coreboot_vpd_init);
-module_exit(coreboot_vpd_exit);
+module_coreboot_driver(vpd_driver);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index c62fa7063a7c..92e3258552fc 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -7,8 +7,6 @@
* Copyright 2017 Google Inc.
*/
-#include <linux/export.h>
-
#include "vpd_decode.h"
static int vpd_decode_len(const s32 max_len, const u8 *in,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 8072c195d831..474f304ec109 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -26,9 +26,9 @@ config FPGA_MGR_SOCFPGA_A10
FPGA manager driver support for Altera Arria10 SoCFPGA.
config ALTERA_PR_IP_CORE
- tristate "Altera Partial Reconfiguration IP Core"
- help
- Core driver support for Altera Partial Reconfiguration IP component
+ tristate "Altera Partial Reconfiguration IP Core"
+ help
+ Core driver support for Altera Partial Reconfiguration IP component
config ALTERA_PR_IP_CORE_PLAT
tristate "Platform support of Altera Partial Reconfiguration IP Core"
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index 76f37709dd1a..b3f7eee3c93f 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -30,8 +30,8 @@
#define FME_PR_STS 0x10
#define FME_PR_DATA 0x18
#define FME_PR_ERR 0x20
-#define FME_PR_INTFC_ID_H 0xA8
-#define FME_PR_INTFC_ID_L 0xB0
+#define FME_PR_INTFC_ID_L 0xA8
+#define FME_PR_INTFC_ID_H 0xB0
/* FME PR Control Register Bitfield */
#define FME_PR_CTRL_PR_RST BIT_ULL(0) /* Reset PR engine */
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index d9ca9554844a..3c71dc3faaf5 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -74,6 +74,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
struct dfl_fme *fme;
unsigned long minsz;
void *buf = NULL;
+ size_t length;
int ret = 0;
u64 v;
@@ -85,9 +86,6 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
if (port_pr.argsz < minsz || port_pr.flags)
return -EINVAL;
- if (!IS_ALIGNED(port_pr.buffer_size, 4))
- return -EINVAL;
-
/* get fme header region */
fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
FME_FEATURE_ID_HEADER);
@@ -103,7 +101,13 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
port_pr.buffer_size))
return -EFAULT;
- buf = vmalloc(port_pr.buffer_size);
+ /*
+ * align PR buffer per PR bandwidth, as HW ignores the extra padding
+ * data automatically.
+ */
+ length = ALIGN(port_pr.buffer_size, 4);
+
+ buf = vmalloc(length);
if (!buf)
return -ENOMEM;
@@ -140,7 +144,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
fpga_image_info_free(region->info);
info->buf = buf;
- info->count = port_pr.buffer_size;
+ info->count = length;
info->region_id = port_pr.port_id;
region->info = info;
@@ -159,9 +163,6 @@ unlock_exit:
mutex_unlock(&pdata->lock);
free_exit:
vfree(buf);
- if (copy_to_user((void __user *)arg, &port_pr, minsz))
- return -EFAULT;
-
return ret;
}
diff --git a/drivers/fsi/cf-fsi-fw.h b/drivers/fsi/cf-fsi-fw.h
index 712df0461911..1118eaf7ee39 100644
--- a/drivers/fsi/cf-fsi-fw.h
+++ b/drivers/fsi/cf-fsi-fw.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __CF_FSI_FW_H
#define __CF_FSI_FW_H
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 1d83f3ba478b..1f76740f33b6 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -1029,6 +1029,14 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
}
+ rc = fsi_slave_set_smode(slave);
+ if (rc) {
+ dev_warn(&master->dev,
+ "can't set smode on slave:%02x:%02x %d\n",
+ link, id, rc);
+ goto err_free;
+ }
+
/* Allocate a minor in the FSI space */
rc = __fsi_get_new_minor(slave, fsi_dev_cfam, &slave->dev.devt,
&slave->cdev_idx);
@@ -1040,17 +1048,14 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
rc = cdev_device_add(&slave->cdev, &slave->dev);
if (rc) {
dev_err(&slave->dev, "Error %d creating slave device\n", rc);
- goto err_free;
+ goto err_free_ida;
}
- rc = fsi_slave_set_smode(slave);
- if (rc) {
- dev_warn(&master->dev,
- "can't set smode on slave:%02x:%02x %d\n",
- link, id, rc);
- kfree(slave);
- return -ENODEV;
- }
+ /* Now that we have the cdev registered with the core, any fatal
+ * failures beyond this point will need to clean up through
+ * cdev_device_del(). Fortunately though, nothing past here is fatal.
+ */
+
if (master->link_config)
master->link_config(master, link,
slave->t_send_delay,
@@ -1067,10 +1072,13 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
dev_dbg(&master->dev, "failed during slave scan with: %d\n",
rc);
- return rc;
+ return 0;
- err_free:
- put_device(&slave->dev);
+err_free_ida:
+ fsi_free_minor(slave->dev.devt);
+err_free:
+ of_node_put(slave->dev.of_node);
+ kfree(slave);
return rc;
}
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index a2301cea1cbb..7da9c81759ac 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -412,6 +412,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
msecs_to_jiffies(OCC_CMD_IN_PRG_WAIT_MS);
struct occ *occ = dev_get_drvdata(dev);
struct occ_response *resp = response;
+ u8 seq_no;
u16 resp_data_length;
unsigned long start;
int rc;
@@ -426,6 +427,8 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
mutex_lock(&occ->occ_lock);
+ /* Extract the seq_no from the command (first byte) */
+ seq_no = *(const u8 *)request;
rc = occ_putsram(occ, OCC_SRAM_CMD_ADDR, request, req_len);
if (rc)
goto done;
@@ -441,11 +444,17 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
if (rc)
goto done;
- if (resp->return_status == OCC_RESP_CMD_IN_PRG) {
+ if (resp->return_status == OCC_RESP_CMD_IN_PRG ||
+ resp->seq_no != seq_no) {
rc = -ETIMEDOUT;
- if (time_after(jiffies, start + timeout))
- break;
+ if (time_after(jiffies, start + timeout)) {
+ dev_err(occ->dev, "resp timeout status=%02x "
+ "resp seq_no=%d our seq_no=%d\n",
+ resp->return_status, resp->seq_no,
+ seq_no);
+ goto done;
+ }
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(wait_time);
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index d92f5b87c251..f54df9ebc8b3 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -289,11 +289,11 @@ static int sbefifo_check_sbe_state(struct sbefifo *sbefifo)
switch ((sbm & CFAM_SBM_SBE_STATE_MASK) >> CFAM_SBM_SBE_STATE_SHIFT) {
case SBE_STATE_UNKNOWN:
return -ESHUTDOWN;
+ case SBE_STATE_DMT:
+ return -EBUSY;
case SBE_STATE_IPLING:
case SBE_STATE_ISTEP:
case SBE_STATE_MPIPL:
- case SBE_STATE_DMT:
- return -EBUSY;
case SBE_STATE_RUNTIME:
case SBE_STATE_DUMP: /* Not sure about that one */
break;
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index d593517af5c2..a7d2b16dd702 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -124,12 +124,12 @@ struct extended_sensor {
static int occ_poll(struct occ *occ)
{
int rc;
- u16 checksum = occ->poll_cmd_data + 1;
+ u16 checksum = occ->poll_cmd_data + occ->seq_no + 1;
u8 cmd[8];
struct occ_poll_response_header *header;
/* big endian */
- cmd[0] = 0; /* sequence number */
+ cmd[0] = occ->seq_no++; /* sequence number */
cmd[1] = 0; /* cmd type */
cmd[2] = 0; /* data length msb */
cmd[3] = 1; /* data length lsb */
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index fc13f3c73c47..67e6968b8978 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -95,6 +95,7 @@ struct occ {
struct occ_sensors sensors;
int powr_sample_time_us; /* average power sample time */
+ u8 seq_no;
u8 poll_cmd_data; /* to perform OCC poll command */
int (*send_cmd)(struct occ *occ, u8 *cmd);
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 5487d4a1abc2..14638db4991d 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -4,6 +4,7 @@
#
menuconfig CORESIGHT
bool "CoreSight Tracing Support"
+ depends on OF || ACPI
select ARM_AMBA
select PERF_EVENTS
help
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 3b435aa42af5..3c0ac421e211 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -2,8 +2,7 @@
#
# Makefile for CoreSight drivers.
#
-obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o
-obj-$(CONFIG_OF) += of_coresight.o
+obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o coresight-platform.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
coresight-tmc-etf.o \
coresight-tmc-etr.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 4ea68a3522e9..16ebf38a9f66 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -28,6 +28,8 @@
#define catu_dbg(x, ...) do {} while (0)
#endif
+DEFINE_CORESIGHT_DEVLIST(catu_devs, "catu");
+
struct catu_etr_buf {
struct tmc_sg_table *catu_table;
dma_addr_t sladdr;
@@ -328,19 +330,18 @@ static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
struct etr_buf *etr_buf, int node, void **pages)
{
struct coresight_device *csdev;
- struct device *catu_dev;
struct tmc_sg_table *catu_table;
struct catu_etr_buf *catu_buf;
csdev = tmc_etr_get_catu_device(tmc_drvdata);
if (!csdev)
return -ENODEV;
- catu_dev = csdev->dev.parent;
catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
if (!catu_buf)
return -ENOMEM;
- catu_table = catu_init_sg_table(catu_dev, node, etr_buf->size, pages);
+ catu_table = catu_init_sg_table(&csdev->dev, node,
+ etr_buf->size, pages);
if (IS_ERR(catu_table)) {
kfree(catu_buf);
return PTR_ERR(catu_table);
@@ -409,13 +410,14 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
int rc;
u32 control, mode;
struct etr_buf *etr_buf = data;
+ struct device *dev = &drvdata->csdev->dev;
if (catu_wait_for_ready(drvdata))
- dev_warn(drvdata->dev, "Timeout while waiting for READY\n");
+ dev_warn(dev, "Timeout while waiting for READY\n");
control = catu_read_control(drvdata);
if (control & BIT(CATU_CONTROL_ENABLE)) {
- dev_warn(drvdata->dev, "CATU is already enabled\n");
+ dev_warn(dev, "CATU is already enabled\n");
return -EBUSY;
}
@@ -441,7 +443,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
catu_write_irqen(drvdata, 0);
catu_write_mode(drvdata, mode);
catu_write_control(drvdata, control);
- dev_dbg(drvdata->dev, "Enabled in %s mode\n",
+ dev_dbg(dev, "Enabled in %s mode\n",
(mode == CATU_MODE_PASS_THROUGH) ?
"Pass through" :
"Translate");
@@ -462,15 +464,16 @@ static int catu_enable(struct coresight_device *csdev, void *data)
static int catu_disable_hw(struct catu_drvdata *drvdata)
{
int rc = 0;
+ struct device *dev = &drvdata->csdev->dev;
catu_write_control(drvdata, 0);
coresight_disclaim_device_unlocked(drvdata->base);
if (catu_wait_for_ready(drvdata)) {
- dev_info(drvdata->dev, "Timeout while waiting for READY\n");
+ dev_info(dev, "Timeout while waiting for READY\n");
rc = -EAGAIN;
}
- dev_dbg(drvdata->dev, "Disabled\n");
+ dev_dbg(dev, "Disabled\n");
return rc;
}
@@ -502,17 +505,11 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_desc catu_desc;
struct coresight_platform_data *pdata = NULL;
struct device *dev = &adev->dev;
- struct device_node *np = dev->of_node;
void __iomem *base;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out;
- }
- dev->platform_data = pdata;
- }
+ catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
+ if (!catu_desc.name)
+ return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata) {
@@ -520,7 +517,6 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
goto out;
}
- drvdata->dev = dev;
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, &adev->res);
if (IS_ERR(base)) {
@@ -547,6 +543,13 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
goto out;
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
+ }
+ dev->platform_data = pdata;
+
drvdata->base = base;
catu_desc.pdata = pdata;
catu_desc.dev = dev;
@@ -554,6 +557,7 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
catu_desc.ops = &catu_ops;
+
drvdata->csdev = coresight_register(&catu_desc);
if (IS_ERR(drvdata->csdev))
ret = PTR_ERR(drvdata->csdev);
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 1d2ad183fd92..80ceee3c739c 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -61,7 +61,6 @@
#define CATU_IRQEN_OFF 0x0
struct catu_drvdata {
- struct device *dev;
void __iomem *base;
struct coresight_device *csdev;
int irq;
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index e8819d750938..58bfd6319f65 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -572,14 +572,16 @@ static int debug_probe(struct amba_device *adev, const struct amba_id *id)
struct device *dev = &adev->dev;
struct debug_drvdata *drvdata;
struct resource *res = &adev->res;
- struct device_node *np = adev->dev.of_node;
int ret;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;
+ drvdata->cpu = coresight_get_cpu(dev);
+ if (drvdata->cpu < 0)
+ return drvdata->cpu;
+
if (per_cpu(debug_drvdata, drvdata->cpu)) {
dev_err(dev, "CPU%d drvdata has already been initialized\n",
drvdata->cpu);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 4ee4c80a4354..3810290e6d07 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -63,10 +63,11 @@
#define ETB_FFSR_BIT 1
#define ETB_FRAME_SIZE_WORDS 4
+DEFINE_CORESIGHT_DEVLIST(etb_devs, "etb");
+
/**
* struct etb_drvdata - specifics associated to an ETB component
* @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
* @atclk: optional clock for the core parts of the ETB.
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.etb" entry.
@@ -81,7 +82,6 @@
*/
struct etb_drvdata {
void __iomem *base;
- struct device *dev;
struct clk *atclk;
struct coresight_device *csdev;
struct miscdevice miscdev;
@@ -227,7 +227,6 @@ out:
static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
{
int ret;
- struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
switch (mode) {
case CS_MODE_SYSFS:
@@ -244,13 +243,14 @@ static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
if (ret)
return ret;
- dev_dbg(drvdata->dev, "ETB enabled\n");
+ dev_dbg(&csdev->dev, "ETB enabled\n");
return 0;
}
static void __etb_disable_hw(struct etb_drvdata *drvdata)
{
u32 ffcr;
+ struct device *dev = &drvdata->csdev->dev;
CS_UNLOCK(drvdata->base);
@@ -263,7 +263,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
- dev_err(drvdata->dev,
+ dev_err(dev,
"timeout while waiting for completion of Manual Flush\n");
}
@@ -271,7 +271,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
- dev_err(drvdata->dev,
+ dev_err(dev,
"timeout while waiting for Formatter to Stop\n");
}
@@ -286,6 +286,7 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
u32 read_data, depth;
u32 read_ptr, write_ptr;
u32 frame_off, frame_endoff;
+ struct device *dev = &drvdata->csdev->dev;
CS_UNLOCK(drvdata->base);
@@ -295,10 +296,10 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
if (frame_off) {
- dev_err(drvdata->dev,
+ dev_err(dev,
"write_ptr: %lu not aligned to formatter frame size\n",
(unsigned long)write_ptr);
- dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
+ dev_err(dev, "frameoff: %lu, frame_endoff: %lu\n",
(unsigned long)frame_off, (unsigned long)frame_endoff);
write_ptr += frame_endoff;
}
@@ -365,7 +366,7 @@ static int etb_disable(struct coresight_device *csdev)
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(drvdata->dev, "ETB disabled\n");
+ dev_dbg(&csdev->dev, "ETB disabled\n");
return 0;
}
@@ -373,12 +374,10 @@ static void *etb_alloc_buffer(struct coresight_device *csdev,
struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
- int node, cpu = event->cpu;
+ int node;
struct cs_buffers *buf;
- if (cpu == -1)
- cpu = smp_processor_id();
- node = cpu_to_node(cpu);
+ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
if (!buf)
@@ -460,7 +459,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
* chance to fix things.
*/
if (write_ptr % ETB_FRAME_SIZE_WORDS) {
- dev_err(drvdata->dev,
+ dev_err(&csdev->dev,
"write_ptr: %lu not aligned to formatter frame size\n",
(unsigned long)write_ptr);
@@ -512,7 +511,13 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
lost = true;
}
- if (lost)
+ /*
+ * Don't set the TRUNCATED flag in snapshot mode because 1) the
+ * captured buffer is expected to be truncated and 2) a full buffer
+ * prevents the event from being re-enabled by the perf core,
+ * resulting in stale data being send to user space.
+ */
+ if (!buf->snapshot && lost)
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
/* finally tell HW where we want to start reading from */
@@ -548,13 +553,14 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
/*
- * In snapshot mode we have to update the handle->head to point
- * to the new location.
+ * In snapshot mode we simply increment the head by the number of byte
+ * that were written. User space function cs_etm_find_snapshot() will
+ * figure out how many bytes to get from the AUX buffer based on the
+ * position of the head.
*/
- if (buf->snapshot) {
- handle->head = (cur * PAGE_SIZE) + offset;
- to_read = buf->nr_pages << PAGE_SHIFT;
- }
+ if (buf->snapshot)
+ handle->head += to_read;
+
__etb_enable_hw(drvdata);
CS_LOCK(drvdata->base);
out:
@@ -587,7 +593,7 @@ static void etb_dump(struct etb_drvdata *drvdata)
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(drvdata->dev, "ETB dumped\n");
+ dev_dbg(&drvdata->csdev->dev, "ETB dumped\n");
}
static int etb_open(struct inode *inode, struct file *file)
@@ -598,7 +604,7 @@ static int etb_open(struct inode *inode, struct file *file)
if (local_cmpxchg(&drvdata->reading, 0, 1))
return -EBUSY;
- dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+ dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
return 0;
}
@@ -608,6 +614,7 @@ static ssize_t etb_read(struct file *file, char __user *data,
u32 depth;
struct etb_drvdata *drvdata = container_of(file->private_data,
struct etb_drvdata, miscdev);
+ struct device *dev = &drvdata->csdev->dev;
etb_dump(drvdata);
@@ -616,13 +623,14 @@ static ssize_t etb_read(struct file *file, char __user *data,
len = depth * 4 - *ppos;
if (copy_to_user(data, drvdata->buf + *ppos, len)) {
- dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ dev_dbg(dev,
+ "%s: copy_to_user failed\n", __func__);
return -EFAULT;
}
*ppos += len;
- dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
+ dev_dbg(dev, "%s: %zu bytes copied, %d bytes left\n",
__func__, len, (int)(depth * 4 - *ppos));
return len;
}
@@ -633,7 +641,7 @@ static int etb_release(struct inode *inode, struct file *file)
struct etb_drvdata, miscdev);
local_set(&drvdata->reading, 0);
- dev_dbg(drvdata->dev, "%s: released\n", __func__);
+ dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
return 0;
}
@@ -724,20 +732,15 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
struct etb_drvdata *drvdata;
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
- }
+ desc.name = coresight_alloc_device_name(&etb_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &adev->dev;
drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
@@ -768,6 +771,11 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
/* This device is not associated with a session */
drvdata->pid = -1;
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &etb_cs_ops;
@@ -778,7 +786,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
- drvdata->miscdev.name = pdata->name;
+ drvdata->miscdev.name = desc.name;
drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
drvdata->miscdev.fops = &etb_fops;
ret = misc_register(&drvdata->miscdev);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 3c6294432748..5c1ca0df5cb0 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -523,7 +523,7 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
unsigned long hash;
const char *name;
struct device *pmu_dev = etm_pmu.dev;
- struct device *pdev = csdev->dev.parent;
+ struct device *dev = &csdev->dev;
struct dev_ext_attribute *ea;
if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
@@ -536,15 +536,15 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
if (!etm_perf_up)
return -EPROBE_DEFER;
- ea = devm_kzalloc(pdev, sizeof(*ea), GFP_KERNEL);
+ ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
if (!ea)
return -ENOMEM;
- name = dev_name(pdev);
+ name = dev_name(dev);
/* See function coresight_get_sink_by_id() to know where this is used */
hash = hashlen_hash(hashlen_string(NULL, name));
- ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL);
+ ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!ea->attr.attr.name)
return -ENOMEM;
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 79e1ad860d8a..f3ab96eaf44e 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -208,7 +208,6 @@ struct etm_config {
/**
* struct etm_drvdata - specifics associated to an ETM component
* @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
* @atclk: optional clock for the core parts of the ETM.
* @csdev: component vitals needed by the framework.
* @spinlock: only one at a time pls.
@@ -232,7 +231,6 @@ struct etm_config {
*/
struct etm_drvdata {
void __iomem *base;
- struct device *dev;
struct clk *atclk;
struct coresight_device *csdev;
spinlock_t spinlock;
@@ -260,7 +258,7 @@ static inline void etm_writel(struct etm_drvdata *drvdata,
{
if (drvdata->use_cp14) {
if (etm_writel_cp14(off, val)) {
- dev_err(drvdata->dev,
+ dev_err(&drvdata->csdev->dev,
"invalid CP14 access to ETM reg: %#x", off);
}
} else {
@@ -274,7 +272,7 @@ static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
if (drvdata->use_cp14) {
if (etm_readl_cp14(off, &val)) {
- dev_err(drvdata->dev,
+ dev_err(&drvdata->csdev->dev,
"invalid CP14 access to ETM reg: %#x", off);
}
} else {
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 75487b3fad86..e8c7649f123e 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -48,7 +48,7 @@ static ssize_t etmsr_show(struct device *dev,
unsigned long flags, val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- pm_runtime_get_sync(drvdata->dev);
+ pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -56,7 +56,7 @@ static ssize_t etmsr_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
+ pm_runtime_put(dev->parent);
return sprintf(buf, "%#lx\n", val);
}
@@ -131,7 +131,7 @@ static ssize_t mode_store(struct device *dev,
if (config->mode & ETM_MODE_STALL) {
if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
- dev_warn(drvdata->dev, "stall mode not supported\n");
+ dev_warn(dev, "stall mode not supported\n");
ret = -EINVAL;
goto err_unlock;
}
@@ -141,7 +141,7 @@ static ssize_t mode_store(struct device *dev,
if (config->mode & ETM_MODE_TIMESTAMP) {
if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
- dev_warn(drvdata->dev, "timestamp not supported\n");
+ dev_warn(dev, "timestamp not supported\n");
ret = -EINVAL;
goto err_unlock;
}
@@ -945,7 +945,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
goto out;
}
- pm_runtime_get_sync(drvdata->dev);
+ pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -953,7 +953,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
+ pm_runtime_put(dev->parent);
out:
return sprintf(buf, "%#lx\n", val);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index be302ec5f66b..e2cb6873c3f2 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -165,7 +165,7 @@ static void etm_set_prog(struct etm_drvdata *drvdata)
*/
isb();
if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
- dev_err(drvdata->dev,
+ dev_err(&drvdata->csdev->dev,
"%s: timeout observed when probing at offset %#x\n",
__func__, ETMSR);
}
@@ -184,7 +184,7 @@ static void etm_clr_prog(struct etm_drvdata *drvdata)
*/
isb();
if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
- dev_err(drvdata->dev,
+ dev_err(&drvdata->csdev->dev,
"%s: timeout observed when probing at offset %#x\n",
__func__, ETMSR);
}
@@ -425,7 +425,7 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
done:
CS_LOCK(drvdata->base);
- dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
+ dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n",
drvdata->cpu, rc);
return rc;
}
@@ -455,14 +455,16 @@ int etm_get_trace_id(struct etm_drvdata *drvdata)
{
unsigned long flags;
int trace_id = -1;
+ struct device *etm_dev;
if (!drvdata)
goto out;
+ etm_dev = drvdata->csdev->dev.parent;
if (!local_read(&drvdata->mode))
return drvdata->traceid;
- pm_runtime_get_sync(drvdata->dev);
+ pm_runtime_get_sync(etm_dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -471,7 +473,7 @@ int etm_get_trace_id(struct etm_drvdata *drvdata)
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
+ pm_runtime_put(etm_dev);
out:
return trace_id;
@@ -526,7 +528,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
if (!ret)
- dev_dbg(drvdata->dev, "ETM tracing enabled\n");
+ dev_dbg(&csdev->dev, "ETM tracing enabled\n");
return ret;
}
@@ -581,7 +583,8 @@ static void etm_disable_hw(void *info)
CS_LOCK(drvdata->base);
- dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
+ dev_dbg(&drvdata->csdev->dev,
+ "cpu: %d disable smp call done\n", drvdata->cpu);
}
static void etm_disable_perf(struct coresight_device *csdev)
@@ -628,7 +631,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
- dev_dbg(drvdata->dev, "ETM tracing disabled\n");
+ dev_dbg(&csdev->dev, "ETM tracing disabled\n");
}
static void etm_disable(struct coresight_device *csdev,
@@ -788,22 +791,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
struct etm_drvdata *drvdata;
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
-
- adev->dev.platform_data = pdata;
- drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
- }
-
- drvdata->dev = &adev->dev;
+ drvdata->use_cp14 = fwnode_property_read_bool(dev->fwnode, "arm,cp14");
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
@@ -822,7 +815,13 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
return ret;
}
- drvdata->cpu = pdata ? pdata->cpu : 0;
+ drvdata->cpu = coresight_get_cpu(dev);
+ if (drvdata->cpu < 0)
+ return drvdata->cpu;
+
+ desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
+ if (!desc.name)
+ return -ENOMEM;
cpus_read_lock();
etmdrvdata[drvdata->cpu] = drvdata;
@@ -852,6 +851,13 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
etm_init_trace_id(drvdata);
etm_set_default(&drvdata->config);
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err_arch_supported;
+ }
+ adev->dev.platform_data = pdata;
+
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
desc.ops = &etm_cs_ops;
@@ -871,7 +877,8 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
}
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)coresight_get_uci_data(id));
+ dev_info(&drvdata->csdev->dev,
+ "%s initialized\n", (char *)coresight_get_uci_data(id));
if (boot_enable) {
coresight_enable(drvdata->csdev);
drvdata->boot_enable = true;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 8bb0092c7ec2..7bcac8896fc1 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -88,6 +88,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
struct etmv4_config *config = &drvdata->config;
+ struct device *etm_dev = &drvdata->csdev->dev;
CS_UNLOCK(drvdata->base);
@@ -102,7 +103,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
/* wait for TRCSTATR.IDLE to go up */
if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
- dev_err(drvdata->dev,
+ dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
@@ -184,13 +185,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
/* wait for TRCSTATR.IDLE to go back down to '0' */
if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
- dev_err(drvdata->dev,
+ dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
done:
CS_LOCK(drvdata->base);
- dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
+ dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
drvdata->cpu, rc);
return rc;
}
@@ -400,7 +401,7 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
if (!ret)
- dev_dbg(drvdata->dev, "ETM tracing enabled\n");
+ dev_dbg(&csdev->dev, "ETM tracing enabled\n");
return ret;
}
@@ -461,7 +462,8 @@ static void etm4_disable_hw(void *info)
CS_LOCK(drvdata->base);
- dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
+ dev_dbg(&drvdata->csdev->dev,
+ "cpu: %d disable smp call done\n", drvdata->cpu);
}
static int etm4_disable_perf(struct coresight_device *csdev,
@@ -511,7 +513,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
- dev_dbg(drvdata->dev, "ETM tracing disabled\n");
+ dev_dbg(&csdev->dev, "ETM tracing disabled\n");
}
static void etm4_disable(struct coresight_device *csdev,
@@ -1082,20 +1084,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
struct etmv4_drvdata *drvdata;
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
- }
-
- drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
@@ -1107,7 +1100,13 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->cpu = pdata ? pdata->cpu : 0;
+ drvdata->cpu = coresight_get_cpu(dev);
+ if (drvdata->cpu < 0)
+ return drvdata->cpu;
+
+ desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
+ if (!desc.name)
+ return -ENOMEM;
cpus_read_lock();
etmdrvdata[drvdata->cpu] = drvdata;
@@ -1138,6 +1137,13 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err_arch_supported;
+ }
+ adev->dev.platform_data = pdata;
+
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
desc.ops = &etm4_cs_ops;
@@ -1157,7 +1163,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
}
pm_runtime_put(&adev->dev);
- dev_info(dev, "CPU%d: ETM v%d.%d initialized\n",
+ dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
if (boot_enable) {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 52786e9d8926..4523f10ddd0f 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -284,7 +284,6 @@ struct etmv4_config {
/**
* struct etm4_drvdata - specifics associated to an ETM component
* @base: Memory mapped base address for this component.
- * @dev: The device entity associated to this component.
* @csdev: Component vitals needed by the framework.
* @spinlock: Only one at a time pls.
* @mode: This tracer's mode, i.e sysFS, Perf or disabled.
@@ -340,7 +339,6 @@ struct etmv4_config {
*/
struct etmv4_drvdata {
void __iomem *base;
- struct device *dev;
struct coresight_device *csdev;
spinlock_t spinlock;
local_t mode;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 16b0c0e1e43a..fa97cb9ab4f9 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -29,17 +29,17 @@
#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
#define FUNNEL_ENSx_MASK 0xff
+DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel");
+
/**
* struct funnel_drvdata - specifics associated to a funnel component
* @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
* @atclk: optional clock for the core parts of the funnel.
* @csdev: component vitals needed by the framework.
* @priority: port selection order.
*/
struct funnel_drvdata {
void __iomem *base;
- struct device *dev;
struct clk *atclk;
struct coresight_device *csdev;
unsigned long priority;
@@ -80,7 +80,7 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
rc = dynamic_funnel_enable_hw(drvdata, inport);
if (!rc)
- dev_dbg(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
+ dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", inport);
return rc;
}
@@ -110,7 +110,7 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
if (drvdata->base)
dynamic_funnel_disable_hw(drvdata, inport);
- dev_dbg(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
+ dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
}
static const struct coresight_ops_link funnel_link_ops = {
@@ -165,11 +165,11 @@ static ssize_t funnel_ctrl_show(struct device *dev,
u32 val;
struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
- pm_runtime_get_sync(drvdata->dev);
+ pm_runtime_get_sync(dev->parent);
val = get_funnel_ctrl_hw(drvdata);
- pm_runtime_put(drvdata->dev);
+ pm_runtime_put(dev->parent);
return sprintf(buf, "%#x\n", val);
}
@@ -189,23 +189,19 @@ static int funnel_probe(struct device *dev, struct resource *res)
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
struct coresight_desc desc = { 0 };
- struct device_node *np = dev->of_node;
-
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- dev->platform_data = pdata;
- }
- if (of_device_is_compatible(np, "arm,coresight-funnel"))
+ if (is_of_node(dev_fwnode(dev)) &&
+ of_device_is_compatible(dev->of_node, "arm,coresight-funnel"))
pr_warn_once("Uses OBSOLETE CoreSight funnel binding\n");
+ desc.name = coresight_alloc_device_name(&funnel_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = dev;
drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
@@ -229,6 +225,13 @@ static int funnel_probe(struct device *dev, struct resource *res)
dev_set_drvdata(dev, drvdata);
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out_disable_clk;
+ }
+ dev->platform_data = pdata;
+
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
@@ -241,6 +244,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
pm_runtime_put(dev);
+ ret = 0;
out_disable_clk:
if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
new file mode 100644
index 000000000000..cf580ffbc27c
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/coresight.h>
+#include <linux/cpumask.h>
+#include <asm/smp_plat.h>
+
+#include "coresight-priv.h"
+/*
+ * coresight_alloc_conns: Allocate connections record for each output
+ * port from the device.
+ */
+static int coresight_alloc_conns(struct device *dev,
+ struct coresight_platform_data *pdata)
+{
+ if (pdata->nr_outport) {
+ pdata->conns = devm_kzalloc(dev, pdata->nr_outport *
+ sizeof(*pdata->conns),
+ GFP_KERNEL);
+ if (!pdata->conns)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int coresight_device_fwnode_match(struct device *dev, void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode;
+}
+
+static struct device *
+coresight_find_device_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct device *dev = NULL;
+
+ /*
+ * If we have a non-configurable replicator, it will be found on the
+ * platform bus.
+ */
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, coresight_device_fwnode_match);
+ if (dev)
+ return dev;
+
+ /*
+ * We have a configurable component - circle through the AMBA bus
+ * looking for the device that matches the endpoint node.
+ */
+ return bus_find_device(&amba_bustype, NULL,
+ fwnode, coresight_device_fwnode_match);
+}
+
+#ifdef CONFIG_OF
+static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
+{
+ return of_property_read_bool(ep, "slave-mode");
+}
+
+static void of_coresight_get_ports_legacy(const struct device_node *node,
+ int *nr_inport, int *nr_outport)
+{
+ struct device_node *ep = NULL;
+ int in = 0, out = 0;
+
+ do {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ if (of_coresight_legacy_ep_is_input(ep))
+ in++;
+ else
+ out++;
+
+ } while (ep);
+
+ *nr_inport = in;
+ *nr_outport = out;
+}
+
+static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
+{
+ struct device_node *parent = of_graph_get_port_parent(ep);
+
+ /*
+ * Skip one-level up to the real device node, if we
+ * are using the new bindings.
+ */
+ if (of_node_name_eq(parent, "in-ports") ||
+ of_node_name_eq(parent, "out-ports"))
+ parent = of_get_next_parent(parent);
+
+ return parent;
+}
+
+static inline struct device_node *
+of_coresight_get_input_ports_node(const struct device_node *node)
+{
+ return of_get_child_by_name(node, "in-ports");
+}
+
+static inline struct device_node *
+of_coresight_get_output_ports_node(const struct device_node *node)
+{
+ return of_get_child_by_name(node, "out-ports");
+}
+
+static inline int
+of_coresight_count_ports(struct device_node *port_parent)
+{
+ int i = 0;
+ struct device_node *ep = NULL;
+
+ while ((ep = of_graph_get_next_endpoint(port_parent, ep)))
+ i++;
+ return i;
+}
+
+static void of_coresight_get_ports(const struct device_node *node,
+ int *nr_inport, int *nr_outport)
+{
+ struct device_node *input_ports = NULL, *output_ports = NULL;
+
+ input_ports = of_coresight_get_input_ports_node(node);
+ output_ports = of_coresight_get_output_ports_node(node);
+
+ if (input_ports || output_ports) {
+ if (input_ports) {
+ *nr_inport = of_coresight_count_ports(input_ports);
+ of_node_put(input_ports);
+ }
+ if (output_ports) {
+ *nr_outport = of_coresight_count_ports(output_ports);
+ of_node_put(output_ports);
+ }
+ } else {
+ /* Fall back to legacy DT bindings parsing */
+ of_coresight_get_ports_legacy(node, nr_inport, nr_outport);
+ }
+}
+
+static int of_coresight_get_cpu(struct device *dev)
+{
+ int cpu;
+ struct device_node *dn;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ dn = of_parse_phandle(dev->of_node, "cpu", 0);
+ if (!dn)
+ return -ENODEV;
+
+ cpu = of_cpu_node_to_id(dn);
+ of_node_put(dn);
+
+ return cpu;
+}
+
+/*
+ * of_coresight_parse_endpoint : Parse the given output endpoint @ep
+ * and fill the connection information in @conn
+ *
+ * Parses the local port, remote device name and the remote port.
+ *
+ * Returns :
+ * 1 - If the parsing is successful and a connection record
+ * was created for an output connection.
+ * 0 - If the parsing completed without any fatal errors.
+ * -Errno - Fatal error, abort the scanning.
+ */
+static int of_coresight_parse_endpoint(struct device *dev,
+ struct device_node *ep,
+ struct coresight_connection *conn)
+{
+ int ret = 0;
+ struct of_endpoint endpoint, rendpoint;
+ struct device_node *rparent = NULL;
+ struct device_node *rep = NULL;
+ struct device *rdev = NULL;
+ struct fwnode_handle *rdev_fwnode;
+
+ do {
+ /* Parse the local port details */
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ break;
+ /*
+ * Get a handle on the remote endpoint and the device it is
+ * attached to.
+ */
+ rep = of_graph_get_remote_endpoint(ep);
+ if (!rep)
+ break;
+ rparent = of_coresight_get_port_parent(rep);
+ if (!rparent)
+ break;
+ if (of_graph_parse_endpoint(rep, &rendpoint))
+ break;
+
+ rdev_fwnode = of_fwnode_handle(rparent);
+ /* If the remote device is not available, defer probing */
+ rdev = coresight_find_device_by_fwnode(rdev_fwnode);
+ if (!rdev) {
+ ret = -EPROBE_DEFER;
+ break;
+ }
+
+ conn->outport = endpoint.port;
+ /*
+ * Hold the refcount to the target device. This could be
+ * released via:
+ * 1) coresight_release_platform_data() if the probe fails or
+ * this device is unregistered.
+ * 2) While removing the target device via
+ * coresight_remove_match()
+ */
+ conn->child_fwnode = fwnode_handle_get(rdev_fwnode);
+ conn->child_port = rendpoint.port;
+ /* Connection record updated */
+ ret = 1;
+ } while (0);
+
+ of_node_put(rparent);
+ of_node_put(rep);
+ put_device(rdev);
+
+ return ret;
+}
+
+static int of_get_coresight_platform_data(struct device *dev,
+ struct coresight_platform_data *pdata)
+{
+ int ret = 0;
+ struct coresight_connection *conn;
+ struct device_node *ep = NULL;
+ const struct device_node *parent = NULL;
+ bool legacy_binding = false;
+ struct device_node *node = dev->of_node;
+
+ /* Get the number of input and output port for this component */
+ of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
+
+ /* If there are no output connections, we are done */
+ if (!pdata->nr_outport)
+ return 0;
+
+ ret = coresight_alloc_conns(dev, pdata);
+ if (ret)
+ return ret;
+
+ parent = of_coresight_get_output_ports_node(node);
+ /*
+ * If the DT uses obsoleted bindings, the ports are listed
+ * under the device and we need to filter out the input
+ * ports.
+ */
+ if (!parent) {
+ legacy_binding = true;
+ parent = node;
+ dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
+ }
+
+ conn = pdata->conns;
+
+ /* Iterate through each output port to discover topology */
+ while ((ep = of_graph_get_next_endpoint(parent, ep))) {
+ /*
+ * Legacy binding mixes input/output ports under the
+ * same parent. So, skip the input ports if we are dealing
+ * with legacy binding, as they processed with their
+ * connected output ports.
+ */
+ if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
+ continue;
+
+ ret = of_coresight_parse_endpoint(dev, ep, conn);
+ switch (ret) {
+ case 1:
+ conn++; /* Fall through */
+ case 0:
+ break;
+ default:
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#else
+static inline int
+of_get_coresight_platform_data(struct device *dev,
+ struct coresight_platform_data *pdata)
+{
+ return -ENOENT;
+}
+
+static inline int of_coresight_get_cpu(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+
+#include <acpi/actypes.h>
+#include <acpi/processor.h>
+
+/* ACPI Graph _DSD UUID : "ab02a46b-74c7-45a2-bd68-f7d344ef2153" */
+static const guid_t acpi_graph_uuid = GUID_INIT(0xab02a46b, 0x74c7, 0x45a2,
+ 0xbd, 0x68, 0xf7, 0xd3,
+ 0x44, 0xef, 0x21, 0x53);
+/* Coresight ACPI Graph UUID : "3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd" */
+static const guid_t coresight_graph_uuid = GUID_INIT(0x3ecbc8b6, 0x1d0e, 0x4fb3,
+ 0x81, 0x07, 0xe6, 0x27,
+ 0xf8, 0x05, 0xc6, 0xcd);
+#define ACPI_CORESIGHT_LINK_SLAVE 0
+#define ACPI_CORESIGHT_LINK_MASTER 1
+
+static inline bool is_acpi_guid(const union acpi_object *obj)
+{
+ return (obj->type == ACPI_TYPE_BUFFER) && (obj->buffer.length == 16);
+}
+
+/*
+ * acpi_guid_matches - Checks if the given object is a GUID object and
+ * that it matches the supplied the GUID.
+ */
+static inline bool acpi_guid_matches(const union acpi_object *obj,
+ const guid_t *guid)
+{
+ return is_acpi_guid(obj) &&
+ guid_equal((guid_t *)obj->buffer.pointer, guid);
+}
+
+static inline bool is_acpi_dsd_graph_guid(const union acpi_object *obj)
+{
+ return acpi_guid_matches(obj, &acpi_graph_uuid);
+}
+
+static inline bool is_acpi_coresight_graph_guid(const union acpi_object *obj)
+{
+ return acpi_guid_matches(obj, &coresight_graph_uuid);
+}
+
+static inline bool is_acpi_coresight_graph(const union acpi_object *obj)
+{
+ const union acpi_object *graphid, *guid, *links;
+
+ if (obj->type != ACPI_TYPE_PACKAGE ||
+ obj->package.count < 3)
+ return false;
+
+ graphid = &obj->package.elements[0];
+ guid = &obj->package.elements[1];
+ links = &obj->package.elements[2];
+
+ if (graphid->type != ACPI_TYPE_INTEGER ||
+ links->type != ACPI_TYPE_INTEGER)
+ return false;
+
+ return is_acpi_coresight_graph_guid(guid);
+}
+
+/*
+ * acpi_validate_dsd_graph - Make sure the given _DSD graph conforms
+ * to the ACPI _DSD Graph specification.
+ *
+ * ACPI Devices Graph property has the following format:
+ * {
+ * Revision - Integer, must be 0
+ * NumberOfGraphs - Integer, N indicating the following list.
+ * Graph[1],
+ * ...
+ * Graph[N]
+ * }
+ *
+ * And each Graph entry has the following format:
+ * {
+ * GraphID - Integer, identifying a graph the device belongs to.
+ * UUID - UUID identifying the specification that governs
+ * this graph. (e.g, see is_acpi_coresight_graph())
+ * NumberOfLinks - Number "N" of connections on this node of the graph.
+ * Links[1]
+ * ...
+ * Links[N]
+ * }
+ *
+ * Where each "Links" entry has the following format:
+ *
+ * {
+ * SourcePortAddress - Integer
+ * DestinationPortAddress - Integer
+ * DestinationDeviceName - Reference to another device
+ * ( --- CoreSight specific extensions below ---)
+ * DirectionOfFlow - Integer 1 for output(master)
+ * 0 for input(slave)
+ * }
+ *
+ * e.g:
+ * For a Funnel device
+ *
+ * Device(MFUN) {
+ * ...
+ *
+ * Name (_DSD, Package() {
+ * // DSD Package contains tuples of { Proeprty_Type_UUID, Package() }
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), //Std. Property UUID
+ * Package() {
+ * Package(2) { "property-name", <property-value> }
+ * },
+ *
+ * ToUUID("ab02a46b-74c7-45a2-bd68-f7d344ef2153"), // ACPI Graph UUID
+ * Package() {
+ * 0, // Revision
+ * 1, // NumberOfGraphs.
+ * Package() { // Graph[0] Package
+ * 1, // GraphID
+ * // Coresight Graph UUID
+ * ToUUID("3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd"),
+ * 3, // NumberOfLinks aka ports
+ * // Link[0]: Output_0 -> Replicator:Input_0
+ * Package () { 0, 0, \_SB_.RPL0, 1 },
+ * // Link[1]: Input_0 <- Cluster0_Funnel0:Output_0
+ * Package () { 0, 0, \_SB_.CLU0.FUN0, 0 },
+ * // Link[2]: Input_1 <- Cluster1_Funnel0:Output_0
+ * Package () { 1, 0, \_SB_.CLU1.FUN0, 0 },
+ * } // End of Graph[0] Package
+ *
+ * }, // End of ACPI Graph Property
+ * })
+ */
+static inline bool acpi_validate_dsd_graph(const union acpi_object *graph)
+{
+ int i, n;
+ const union acpi_object *rev, *nr_graphs;
+
+ /* The graph must contain at least the Revision and Number of Graphs */
+ if (graph->package.count < 2)
+ return false;
+
+ rev = &graph->package.elements[0];
+ nr_graphs = &graph->package.elements[1];
+
+ if (rev->type != ACPI_TYPE_INTEGER ||
+ nr_graphs->type != ACPI_TYPE_INTEGER)
+ return false;
+
+ /* We only support revision 0 */
+ if (rev->integer.value != 0)
+ return false;
+
+ n = nr_graphs->integer.value;
+ /* CoreSight devices are only part of a single Graph */
+ if (n != 1)
+ return false;
+
+ /* Make sure the ACPI graph package has right number of elements */
+ if (graph->package.count != (n + 2))
+ return false;
+
+ /*
+ * Each entry must be a graph package with at least 3 members :
+ * { GraphID, UUID, NumberOfLinks(n), Links[.],... }
+ */
+ for (i = 2; i < n + 2; i++) {
+ const union acpi_object *obj = &graph->package.elements[i];
+
+ if (obj->type != ACPI_TYPE_PACKAGE ||
+ obj->package.count < 3)
+ return false;
+ }
+
+ return true;
+}
+
+/* acpi_get_dsd_graph - Find the _DSD Graph property for the given device. */
+const union acpi_object *
+acpi_get_dsd_graph(struct acpi_device *adev)
+{
+ int i;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ acpi_status status;
+ const union acpi_object *dsd;
+
+ status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL,
+ &buf, ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return NULL;
+
+ dsd = buf.pointer;
+
+ /*
+ * _DSD property consists tuples { Prop_UUID, Package() }
+ * Iterate through all the packages and find the Graph.
+ */
+ for (i = 0; i + 1 < dsd->package.count; i += 2) {
+ const union acpi_object *guid, *package;
+
+ guid = &dsd->package.elements[i];
+ package = &dsd->package.elements[i + 1];
+
+ /* All _DSD elements must have a UUID and a Package */
+ if (!is_acpi_guid(guid) || package->type != ACPI_TYPE_PACKAGE)
+ break;
+ /* Skip the non-Graph _DSD packages */
+ if (!is_acpi_dsd_graph_guid(guid))
+ continue;
+ if (acpi_validate_dsd_graph(package))
+ return package;
+ /* Invalid graph format, continue */
+ dev_warn(&adev->dev, "Invalid Graph _DSD property\n");
+ }
+
+ return NULL;
+}
+
+static inline bool
+acpi_validate_coresight_graph(const union acpi_object *cs_graph)
+{
+ int nlinks;
+
+ nlinks = cs_graph->package.elements[2].integer.value;
+ /*
+ * Graph must have the following fields :
+ * { GraphID, GraphUUID, NumberOfLinks, Links... }
+ */
+ if (cs_graph->package.count != (nlinks + 3))
+ return false;
+ /* The links are validated in acpi_coresight_parse_link() */
+ return true;
+}
+
+/*
+ * acpi_get_coresight_graph - Parse the device _DSD tables and find
+ * the Graph property matching the CoreSight Graphs.
+ *
+ * Returns the pointer to the CoreSight Graph Package when found. Otherwise
+ * returns NULL.
+ */
+const union acpi_object *
+acpi_get_coresight_graph(struct acpi_device *adev)
+{
+ const union acpi_object *graph_list, *graph;
+ int i, nr_graphs;
+
+ graph_list = acpi_get_dsd_graph(adev);
+ if (!graph_list)
+ return graph_list;
+
+ nr_graphs = graph_list->package.elements[1].integer.value;
+
+ for (i = 2; i < nr_graphs + 2; i++) {
+ graph = &graph_list->package.elements[i];
+ if (!is_acpi_coresight_graph(graph))
+ continue;
+ if (acpi_validate_coresight_graph(graph))
+ return graph;
+ /* Invalid graph format */
+ break;
+ }
+
+ return NULL;
+}
+
+/*
+ * acpi_coresight_parse_link - Parse the given Graph connection
+ * of the device and populate the coresight_connection for an output
+ * connection.
+ *
+ * CoreSight Graph specification mandates that the direction of the data
+ * flow must be specified in the link. i.e,
+ *
+ * SourcePortAddress, // Integer
+ * DestinationPortAddress, // Integer
+ * DestinationDeviceName, // Reference to another device
+ * DirectionOfFlow, // 1 for output(master), 0 for input(slave)
+ *
+ * Returns the direction of the data flow [ Input(slave) or Output(master) ]
+ * upon success.
+ * Returns an negative error number otherwise.
+ */
+static int acpi_coresight_parse_link(struct acpi_device *adev,
+ const union acpi_object *link,
+ struct coresight_connection *conn)
+{
+ int rc, dir;
+ const union acpi_object *fields;
+ struct acpi_device *r_adev;
+ struct device *rdev;
+
+ if (link->type != ACPI_TYPE_PACKAGE ||
+ link->package.count != 4)
+ return -EINVAL;
+
+ fields = link->package.elements;
+
+ if (fields[0].type != ACPI_TYPE_INTEGER ||
+ fields[1].type != ACPI_TYPE_INTEGER ||
+ fields[2].type != ACPI_TYPE_LOCAL_REFERENCE ||
+ fields[3].type != ACPI_TYPE_INTEGER)
+ return -EINVAL;
+
+ rc = acpi_bus_get_device(fields[2].reference.handle, &r_adev);
+ if (rc)
+ return rc;
+
+ dir = fields[3].integer.value;
+ if (dir == ACPI_CORESIGHT_LINK_MASTER) {
+ conn->outport = fields[0].integer.value;
+ conn->child_port = fields[1].integer.value;
+ rdev = coresight_find_device_by_fwnode(&r_adev->fwnode);
+ if (!rdev)
+ return -EPROBE_DEFER;
+ /*
+ * Hold the refcount to the target device. This could be
+ * released via:
+ * 1) coresight_release_platform_data() if the probe fails or
+ * this device is unregistered.
+ * 2) While removing the target device via
+ * coresight_remove_match().
+ */
+ conn->child_fwnode = fwnode_handle_get(&r_adev->fwnode);
+ }
+
+ return dir;
+}
+
+/*
+ * acpi_coresight_parse_graph - Parse the _DSD CoreSight graph
+ * connection information and populate the supplied coresight_platform_data
+ * instance.
+ */
+static int acpi_coresight_parse_graph(struct acpi_device *adev,
+ struct coresight_platform_data *pdata)
+{
+ int rc, i, nlinks;
+ const union acpi_object *graph;
+ struct coresight_connection *conns, *ptr;
+
+ pdata->nr_inport = pdata->nr_outport = 0;
+ graph = acpi_get_coresight_graph(adev);
+ if (!graph)
+ return -ENOENT;
+
+ nlinks = graph->package.elements[2].integer.value;
+ if (!nlinks)
+ return 0;
+
+ /*
+ * To avoid scanning the table twice (once for finding the number of
+ * output links and then later for parsing the output links),
+ * cache the links information in one go and then later copy
+ * it to the pdata.
+ */
+ conns = devm_kcalloc(&adev->dev, nlinks, sizeof(*conns), GFP_KERNEL);
+ if (!conns)
+ return -ENOMEM;
+ ptr = conns;
+ for (i = 0; i < nlinks; i++) {
+ const union acpi_object *link = &graph->package.elements[3 + i];
+ int dir;
+
+ dir = acpi_coresight_parse_link(adev, link, ptr);
+ if (dir < 0)
+ return dir;
+
+ if (dir == ACPI_CORESIGHT_LINK_MASTER) {
+ pdata->nr_outport++;
+ ptr++;
+ } else {
+ pdata->nr_inport++;
+ }
+ }
+
+ rc = coresight_alloc_conns(&adev->dev, pdata);
+ if (rc)
+ return rc;
+
+ /* Copy the connection information to the final location */
+ for (i = 0; i < pdata->nr_outport; i++)
+ pdata->conns[i] = conns[i];
+
+ devm_kfree(&adev->dev, conns);
+ return 0;
+}
+
+/*
+ * acpi_handle_to_logical_cpuid - Map a given acpi_handle to the
+ * logical CPU id of the corresponding CPU device.
+ *
+ * Returns the logical CPU id when found. Otherwise returns >= nr_cpus_id.
+ */
+static int
+acpi_handle_to_logical_cpuid(acpi_handle handle)
+{
+ int i;
+ struct acpi_processor *pr;
+
+ for_each_possible_cpu(i) {
+ pr = per_cpu(processors, i);
+ if (pr && pr->handle == handle)
+ break;
+ }
+
+ return i;
+}
+
+/*
+ * acpi_coresigh_get_cpu - Find the logical CPU id of the CPU associated
+ * with this coresight device. With ACPI bindings, the CoreSight components
+ * are listed as child device of the associated CPU.
+ *
+ * Returns the logical CPU id when found. Otherwise returns 0.
+ */
+static int acpi_coresight_get_cpu(struct device *dev)
+{
+ int cpu;
+ acpi_handle cpu_handle;
+ acpi_status status;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return -ENODEV;
+ status = acpi_get_parent(adev->handle, &cpu_handle);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ cpu = acpi_handle_to_logical_cpuid(cpu_handle);
+ if (cpu >= nr_cpu_ids)
+ return -ENODEV;
+ return cpu;
+}
+
+static int
+acpi_get_coresight_platform_data(struct device *dev,
+ struct coresight_platform_data *pdata)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -EINVAL;
+
+ return acpi_coresight_parse_graph(adev, pdata);
+}
+
+#else
+
+static inline int
+acpi_get_coresight_platform_data(struct device *dev,
+ struct coresight_platform_data *pdata)
+{
+ return -ENOENT;
+}
+
+static inline int acpi_coresight_get_cpu(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+int coresight_get_cpu(struct device *dev)
+{
+ if (is_of_node(dev->fwnode))
+ return of_coresight_get_cpu(dev);
+ else if (is_acpi_device_node(dev->fwnode))
+ return acpi_coresight_get_cpu(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(coresight_get_cpu);
+
+struct coresight_platform_data *
+coresight_get_platform_data(struct device *dev)
+{
+ int ret = -ENOENT;
+ struct coresight_platform_data *pdata = NULL;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (IS_ERR_OR_NULL(fwnode))
+ goto error;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (is_of_node(fwnode))
+ ret = of_get_coresight_platform_data(dev, pdata);
+ else if (is_acpi_device_node(fwnode))
+ ret = acpi_get_coresight_platform_data(dev, pdata);
+
+ if (!ret)
+ return pdata;
+error:
+ if (!IS_ERR_OR_NULL(pdata))
+ /* Cleanup the connection information */
+ coresight_release_platform_data(pdata);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(coresight_get_platform_data);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index e0684d06e9ee..8b07fe55395a 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -200,4 +200,8 @@ static inline void *coresight_get_uci_data(const struct amba_id *id)
return 0;
}
+void coresight_release_platform_data(struct coresight_platform_data *pdata);
+
+int coresight_device_fwnode_match(struct device *dev, void *fwnode);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 8c9ce74498e1..b7d6d59d56db 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -5,6 +5,7 @@
* Description: CoreSight Replicator driver
*/
+#include <linux/acpi.h>
#include <linux/amba/bus.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -22,17 +23,17 @@
#define REPLICATOR_IDFILTER0 0x000
#define REPLICATOR_IDFILTER1 0x004
+DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
+
/**
* struct replicator_drvdata - specifics associated to a replicator component
* @base: memory mapped base address for this component. Also indicates
* whether this one is programmable or not.
- * @dev: the device entity associated with this component
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
*/
struct replicator_drvdata {
void __iomem *base;
- struct device *dev;
struct clk *atclk;
struct coresight_device *csdev;
};
@@ -100,7 +101,7 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
if (drvdata->base)
rc = dynamic_replicator_enable(drvdata, inport, outport);
if (!rc)
- dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
+ dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
return rc;
}
@@ -139,7 +140,7 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
if (drvdata->base)
dynamic_replicator_disable(drvdata, inport, outport);
- dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
+ dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
@@ -179,24 +180,20 @@ static int replicator_probe(struct device *dev, struct resource *res)
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
struct coresight_desc desc = { 0 };
- struct device_node *np = dev->of_node;
void __iomem *base;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- dev->platform_data = pdata;
- }
-
- if (of_device_is_compatible(np, "arm,coresight-replicator"))
+ if (is_of_node(dev_fwnode(dev)) &&
+ of_device_is_compatible(dev->of_node, "arm,coresight-replicator"))
pr_warn_once("Uses OBSOLETE CoreSight replicator binding\n");
+ desc.name = coresight_alloc_device_name(&replicator_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = dev;
drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
@@ -220,11 +217,19 @@ static int replicator_probe(struct device *dev, struct resource *res)
dev_set_drvdata(dev, drvdata);
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out_disable_clk;
+ }
+ dev->platform_data = pdata;
+
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
desc.pdata = dev->platform_data;
desc.dev = dev;
+
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
@@ -292,11 +297,19 @@ static const struct of_device_id static_replicator_match[] = {
{}
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id static_replicator_acpi_ids[] = {
+ {"ARMHC985", 0}, /* ARM CoreSight Static Replicator */
+ {}
+};
+#endif
+
static struct platform_driver static_replicator_driver = {
.probe = static_replicator_probe,
.driver = {
.name = "coresight-static-replicator",
- .of_match_table = static_replicator_match,
+ .of_match_table = of_match_ptr(static_replicator_match),
+ .acpi_match_table = ACPI_PTR(static_replicator_acpi_ids),
.pm = &replicator_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 9f8a844ed7aa..b908ca104645 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -16,6 +16,7 @@
* (C) 2015-2016 Chunyan Zhang <zhang.chunyan@linaro.org>
*/
#include <asm/local.h>
+#include <linux/acpi.h>
#include <linux/amba/bus.h>
#include <linux/bitmap.h>
#include <linux/clk.h>
@@ -107,10 +108,11 @@ struct channel_space {
unsigned long *guaranteed;
};
+DEFINE_CORESIGHT_DEVLIST(stm_devs, "stm");
+
/**
* struct stm_drvdata - specifics associated to an STM component
* @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
* @atclk: optional clock for the core parts of the STM.
* @csdev: component vitals needed by the framework.
* @spinlock: only one at a time pls.
@@ -128,7 +130,6 @@ struct channel_space {
*/
struct stm_drvdata {
void __iomem *base;
- struct device *dev;
struct clk *atclk;
struct coresight_device *csdev;
spinlock_t spinlock;
@@ -205,13 +206,13 @@ static int stm_enable(struct coresight_device *csdev,
if (val)
return -EBUSY;
- pm_runtime_get_sync(drvdata->dev);
+ pm_runtime_get_sync(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
stm_enable_hw(drvdata);
spin_unlock(&drvdata->spinlock);
- dev_dbg(drvdata->dev, "STM tracing enabled\n");
+ dev_dbg(&csdev->dev, "STM tracing enabled\n");
return 0;
}
@@ -271,10 +272,10 @@ static void stm_disable(struct coresight_device *csdev,
/* Wait until the engine has completely stopped */
coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);
- pm_runtime_put(drvdata->dev);
+ pm_runtime_put(csdev->dev.parent);
local_set(&drvdata->mode, CS_MODE_DISABLED);
- dev_dbg(drvdata->dev, "STM tracing disabled\n");
+ dev_dbg(&csdev->dev, "STM tracing disabled\n");
}
}
@@ -685,14 +686,15 @@ static const struct attribute_group *coresight_stm_groups[] = {
NULL,
};
-static int stm_get_resource_byname(struct device_node *np,
- char *ch_base, struct resource *res)
+#ifdef CONFIG_OF
+static int of_stm_get_stimulus_area(struct device *dev, struct resource *res)
{
const char *name = NULL;
int index = 0, found = 0;
+ struct device_node *np = dev->of_node;
while (!of_property_read_string_index(np, "reg-names", index, &name)) {
- if (strcmp(ch_base, name)) {
+ if (strcmp("stm-stimulus-base", name)) {
index++;
continue;
}
@@ -707,6 +709,70 @@ static int stm_get_resource_byname(struct device_node *np,
return of_address_to_resource(np, index, res);
}
+#else
+static inline int of_stm_get_stimulus_area(struct device *dev,
+ struct resource *res)
+{
+ return -ENOENT;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int acpi_stm_get_stimulus_area(struct device *dev, struct resource *res)
+{
+ int rc;
+ bool found_base = false;
+ struct resource_entry *rent;
+ LIST_HEAD(res_list);
+
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return -ENODEV;
+ rc = acpi_dev_get_resources(adev, &res_list, NULL, NULL);
+ if (rc < 0)
+ return rc;
+
+ /*
+ * The stimulus base for STM device must be listed as the second memory
+ * resource, followed by the programming base address as described in
+ * "Section 2.3 Resources" in ACPI for CoreSightTM 1.0 Platform Design
+ * document (DEN0067).
+ */
+ rc = -ENOENT;
+ list_for_each_entry(rent, &res_list, node) {
+ if (resource_type(rent->res) != IORESOURCE_MEM)
+ continue;
+ if (found_base) {
+ *res = *rent->res;
+ rc = 0;
+ break;
+ }
+
+ found_base = true;
+ }
+
+ acpi_dev_free_resource_list(&res_list);
+ return rc;
+}
+#else
+static inline int acpi_stm_get_stimulus_area(struct device *dev,
+ struct resource *res)
+{
+ return -ENOENT;
+}
+#endif
+
+static int stm_get_stimulus_area(struct device *dev, struct resource *res)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (is_of_node(fwnode))
+ return of_stm_get_stimulus_area(dev, res);
+ else if (is_acpi_node(fwnode))
+ return acpi_stm_get_stimulus_area(dev, res);
+ return -ENOENT;
+}
static u32 stm_fundamental_data_size(struct stm_drvdata *drvdata)
{
@@ -763,9 +829,10 @@ static void stm_init_default_data(struct stm_drvdata *drvdata)
bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp);
}
-static void stm_init_generic_data(struct stm_drvdata *drvdata)
+static void stm_init_generic_data(struct stm_drvdata *drvdata,
+ const char *name)
{
- drvdata->stm.name = dev_name(drvdata->dev);
+ drvdata->stm.name = name;
/*
* MasterIDs are assigned at HW design phase. As such the core is
@@ -795,19 +862,15 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
struct resource ch_res;
size_t bitmap_size;
struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
- }
+ desc.name = coresight_alloc_device_name(&stm_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &adev->dev;
drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
@@ -821,7 +884,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
- ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res);
+ ret = stm_get_stimulus_area(dev, &ch_res);
if (ret)
return ret;
drvdata->chs.phys = ch_res.start;
@@ -848,14 +911,22 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
stm_init_default_data(drvdata);
- stm_init_generic_data(drvdata);
+ stm_init_generic_data(drvdata, desc.name);
if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) {
dev_info(dev,
- "stm_register_device failed, probing deferred\n");
+ "%s : stm_register_device failed, probing deferred\n",
+ desc.name);
return -EPROBE_DEFER;
}
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto stm_unregister;
+ }
+ adev->dev.platform_data = pdata;
+
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
desc.ops = &stm_cs_ops;
@@ -870,7 +941,8 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)coresight_get_uci_data(id));
+ dev_info(&drvdata->csdev->dev, "%s initialized\n",
+ (char *)coresight_get_uci_data(id));
return 0;
stm_unregister:
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 2527b5d3b65e..23b7ff00af5c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -280,7 +280,6 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
u32 mode, void *data)
{
int ret;
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
switch (mode) {
case CS_MODE_SYSFS:
@@ -298,7 +297,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
if (ret)
return ret;
- dev_dbg(drvdata->dev, "TMC-ETB/ETF enabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
return 0;
}
@@ -328,7 +327,7 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
return 0;
}
@@ -351,7 +350,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (!ret)
- dev_dbg(drvdata->dev, "TMC-ETF enabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
return ret;
}
@@ -371,19 +370,17 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
- int node, cpu = event->cpu;
+ int node;
struct cs_buffers *buf;
- if (cpu == -1)
- cpu = smp_processor_id();
- node = cpu_to_node(cpu);
+ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
/* Allocate memory structure for interaction with Perf */
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
@@ -477,9 +474,11 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
/*
* The TMC RAM buffer may be bigger than the space available in the
* perf ring buffer (handle->size). If so advance the RRP so that we
- * get the latest trace data.
+ * get the latest trace data. In snapshot mode none of that matters
+ * since we are expected to clobber stale data in favour of the latest
+ * traces.
*/
- if (to_read > handle->size) {
+ if (!buf->snapshot && to_read > handle->size) {
u32 mask = 0;
/*
@@ -516,7 +515,13 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
lost = true;
}
- if (lost)
+ /*
+ * Don't set the TRUNCATED flag in snapshot mode because 1) the
+ * captured buffer is expected to be truncated and 2) a full buffer
+ * prevents the event from being re-enabled by the perf core,
+ * resulting in stale data being send to user space.
+ */
+ if (!buf->snapshot && lost)
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
cur = buf->cur;
@@ -542,11 +547,15 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
}
}
- /* In snapshot mode we have to update the head */
- if (buf->snapshot) {
- handle->head = (cur * PAGE_SIZE) + offset;
- to_read = buf->nr_pages << PAGE_SHIFT;
- }
+ /*
+ * In snapshot mode we simply increment the head by the number of byte
+ * that were written. User space function cs_etm_find_snapshot() will
+ * figure out how many bytes to get from the AUX buffer based on the
+ * position of the head.
+ */
+ if (buf->snapshot)
+ handle->head += to_read;
+
CS_LOCK(drvdata->base);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index df6e4b0b84e9..17006705287a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -162,10 +162,11 @@ static void tmc_pages_free(struct tmc_pages *tmc_pages,
struct device *dev, enum dma_data_direction dir)
{
int i;
+ struct device *real_dev = dev->parent;
for (i = 0; i < tmc_pages->nr_pages; i++) {
if (tmc_pages->daddrs && tmc_pages->daddrs[i])
- dma_unmap_page(dev, tmc_pages->daddrs[i],
+ dma_unmap_page(real_dev, tmc_pages->daddrs[i],
PAGE_SIZE, dir);
if (tmc_pages->pages && tmc_pages->pages[i])
__free_page(tmc_pages->pages[i]);
@@ -193,6 +194,7 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
int i, nr_pages;
dma_addr_t paddr;
struct page *page;
+ struct device *real_dev = dev->parent;
nr_pages = tmc_pages->nr_pages;
tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
@@ -216,8 +218,8 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
page = alloc_pages_node(node,
GFP_KERNEL | __GFP_ZERO, 0);
}
- paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(dev, paddr))
+ paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(real_dev, paddr))
goto err;
tmc_pages->daddrs[i] = paddr;
tmc_pages->pages[i] = page;
@@ -304,7 +306,7 @@ static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
* and data buffers. TMC writes to the data buffers and reads from the SG
* Table pages.
*
- * @dev - Device to which page should be DMA mapped.
+ * @dev - Coresight device to which page should be DMA mapped.
* @node - Numa node for mem allocations
* @nr_tpages - Number of pages for the table entries.
* @nr_dpages - Number of pages for Data buffer.
@@ -348,13 +350,13 @@ void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
{
int i, index, start;
int npages = DIV_ROUND_UP(size, PAGE_SIZE);
- struct device *dev = table->dev;
+ struct device *real_dev = table->dev->parent;
struct tmc_pages *data = &table->data_pages;
start = offset >> PAGE_SHIFT;
for (i = start; i < (start + npages); i++) {
index = i % data->nr_pages;
- dma_sync_single_for_cpu(dev, data->daddrs[index],
+ dma_sync_single_for_cpu(real_dev, data->daddrs[index],
PAGE_SIZE, DMA_FROM_DEVICE);
}
}
@@ -363,11 +365,11 @@ void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
{
int i;
- struct device *dev = sg_table->dev;
+ struct device *real_dev = sg_table->dev->parent;
struct tmc_pages *table_pages = &sg_table->table_pages;
for (i = 0; i < table_pages->nr_pages; i++)
- dma_sync_single_for_device(dev, table_pages->daddrs[i],
+ dma_sync_single_for_device(real_dev, table_pages->daddrs[i],
PAGE_SIZE, DMA_TO_DEVICE);
}
@@ -590,6 +592,7 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
void **pages)
{
struct etr_flat_buf *flat_buf;
+ struct device *real_dev = drvdata->csdev->dev.parent;
/* We cannot reuse existing pages for flat buf */
if (pages)
@@ -599,7 +602,7 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
if (!flat_buf)
return -ENOMEM;
- flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
+ flat_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size,
&flat_buf->daddr, GFP_KERNEL);
if (!flat_buf->vaddr) {
kfree(flat_buf);
@@ -607,7 +610,7 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
}
flat_buf->size = etr_buf->size;
- flat_buf->dev = drvdata->dev;
+ flat_buf->dev = &drvdata->csdev->dev;
etr_buf->hwaddr = flat_buf->daddr;
etr_buf->mode = ETR_MODE_FLAT;
etr_buf->private = flat_buf;
@@ -618,9 +621,12 @@ static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
{
struct etr_flat_buf *flat_buf = etr_buf->private;
- if (flat_buf && flat_buf->daddr)
- dma_free_coherent(flat_buf->dev, flat_buf->size,
+ if (flat_buf && flat_buf->daddr) {
+ struct device *real_dev = flat_buf->dev->parent;
+
+ dma_free_coherent(real_dev, flat_buf->size,
flat_buf->vaddr, flat_buf->daddr);
+ }
kfree(flat_buf);
}
@@ -666,8 +672,9 @@ static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
void **pages)
{
struct etr_sg_table *etr_table;
+ struct device *dev = &drvdata->csdev->dev;
- etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
+ etr_table = tmc_init_etr_sg_table(dev, node,
etr_buf->size, pages);
if (IS_ERR(etr_table))
return -ENOMEM;
@@ -751,8 +758,8 @@ tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
return NULL;
- for (i = 0; i < etr->nr_outport; i++) {
- tmp = etr->conns[i].child_dev;
+ for (i = 0; i < etr->pdata->nr_outport; i++) {
+ tmp = etr->pdata->conns[i].child_dev;
if (tmp && coresight_is_catu_device(tmp))
return tmp;
}
@@ -823,9 +830,10 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
bool has_etr_sg, has_iommu;
bool has_sg, has_catu;
struct etr_buf *etr_buf;
+ struct device *dev = &drvdata->csdev->dev;
has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
- has_iommu = iommu_get_domain_for_dev(drvdata->dev);
+ has_iommu = iommu_get_domain_for_dev(dev->parent);
has_catu = !!tmc_etr_get_catu_device(drvdata);
has_sg = has_catu || has_etr_sg;
@@ -863,7 +871,7 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
return ERR_PTR(rc);
}
- dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
+ dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n",
(unsigned long)size >> 10, etr_buf->mode);
return etr_buf;
}
@@ -1162,7 +1170,7 @@ out:
tmc_etr_free_sysfs_buf(free_buf);
if (!ret)
- dev_dbg(drvdata->dev, "TMC-ETR enabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
return ret;
}
@@ -1178,14 +1186,11 @@ static struct etr_buf *
alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
int nr_pages, void **pages, bool snapshot)
{
- int node, cpu = event->cpu;
+ int node;
struct etr_buf *etr_buf;
unsigned long size;
- if (cpu == -1)
- cpu = smp_processor_id();
- node = cpu_to_node(cpu);
-
+ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
/*
* Try to match the perf ring buffer size if it is larger
* than the size requested via sysfs.
@@ -1317,13 +1322,11 @@ static struct etr_perf_buffer *
tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
int nr_pages, void **pages, bool snapshot)
{
- int node, cpu = event->cpu;
+ int node;
struct etr_buf *etr_buf;
struct etr_perf_buffer *etr_perf;
- if (cpu == -1)
- cpu = smp_processor_id();
- node = cpu_to_node(cpu);
+ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
if (!etr_perf)
@@ -1358,7 +1361,7 @@ static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
etr_perf = tmc_etr_setup_perf_buf(drvdata, event,
nr_pages, pages, snapshot);
if (IS_ERR(etr_perf)) {
- dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n");
+ dev_dbg(&csdev->dev, "Unable to allocate ETR buffer\n");
return NULL;
}
@@ -1501,18 +1504,23 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
tmc_etr_sync_perf_buffer(etr_perf);
/*
- * Update handle->head in snapshot mode. Also update the size to the
- * hardware buffer size if there was an overflow.
+ * In snapshot mode we simply increment the head by the number of byte
+ * that were written. User space function cs_etm_find_snapshot() will
+ * figure out how many bytes to get from the AUX buffer based on the
+ * position of the head.
*/
- if (etr_perf->snapshot) {
+ if (etr_perf->snapshot)
handle->head += size;
- if (etr_buf->full)
- size = etr_buf->size;
- }
lost |= etr_buf->full;
out:
- if (lost)
+ /*
+ * Don't set the TRUNCATED flag in snapshot mode because 1) the
+ * captured buffer is expected to be truncated and 2) a full buffer
+ * prevents the event from being re-enabled by the perf core,
+ * resulting in stale data being send to user space.
+ */
+ if (!etr_perf->snapshot && lost)
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
return size;
}
@@ -1612,7 +1620,7 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(drvdata->dev, "TMC-ETR disabled\n");
+ dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 3f718729d741..be37aff573b4 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -27,12 +27,16 @@
#include "coresight-priv.h"
#include "coresight-tmc.h"
+DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
+DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
+DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
+
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
/* Ensure formatter, unformatter and hardware fifo are empty */
if (coresight_timeout(drvdata->base,
TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
- dev_err(drvdata->dev,
+ dev_err(&drvdata->csdev->dev,
"timeout while waiting for TMC to be Ready\n");
}
}
@@ -49,7 +53,7 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
/* Ensure flush completes */
if (coresight_timeout(drvdata->base,
TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
- dev_err(drvdata->dev,
+ dev_err(&drvdata->csdev->dev,
"timeout while waiting for completion of Manual Flush\n");
}
@@ -83,7 +87,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
}
if (!ret)
- dev_dbg(drvdata->dev, "TMC read start\n");
+ dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
return ret;
}
@@ -105,7 +109,7 @@ static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
}
if (!ret)
- dev_dbg(drvdata->dev, "TMC read end\n");
+ dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
return ret;
}
@@ -122,7 +126,7 @@ static int tmc_open(struct inode *inode, struct file *file)
nonseekable_open(inode, file);
- dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+ dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
return 0;
}
@@ -152,12 +156,13 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
return 0;
if (copy_to_user(data, bufp, actual)) {
- dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ dev_dbg(&drvdata->csdev->dev,
+ "%s: copy_to_user failed\n", __func__);
return -EFAULT;
}
*ppos += actual;
- dev_dbg(drvdata->dev, "%zu bytes copied\n", actual);
+ dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
return actual;
}
@@ -172,7 +177,7 @@ static int tmc_release(struct inode *inode, struct file *file)
if (ret)
return ret;
- dev_dbg(drvdata->dev, "%s: released\n", __func__);
+ dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
return 0;
}
@@ -332,24 +337,22 @@ const struct attribute_group *coresight_tmc_groups[] = {
NULL,
};
-static inline bool tmc_etr_can_use_sg(struct tmc_drvdata *drvdata)
+static inline bool tmc_etr_can_use_sg(struct device *dev)
{
- return fwnode_property_present(drvdata->dev->fwnode,
- "arm,scatter-gather");
+ return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
}
/* Detect and initialise the capabilities of a TMC ETR */
-static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
- u32 devid, void *dev_caps)
+static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
{
int rc;
-
u32 dma_mask = 0;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
/* Set the unadvertised capabilities */
tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
- if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(drvdata))
+ if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
tmc_etr_set_cap(drvdata, TMC_ETR_SG);
/* Check if the AXI address width is available */
@@ -367,18 +370,27 @@ static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
case 44:
case 48:
case 52:
- dev_info(drvdata->dev, "Detected dma mask %dbits\n", dma_mask);
+ dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
break;
default:
dma_mask = 40;
}
- rc = dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
+ rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
if (rc)
- dev_err(drvdata->dev, "Failed to setup DMA mask: %d\n", rc);
+ dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
return rc;
}
+static u32 tmc_etr_get_default_buffer_size(struct device *dev)
+{
+ u32 size;
+
+ if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
+ size = SZ_1M;
+ return size;
+}
+
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
@@ -389,23 +401,13 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
struct tmc_drvdata *drvdata;
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
-
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out;
- }
- adev->dev.platform_data = pdata;
- }
+ struct coresight_dev_list *dev_list = NULL;
ret = -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
goto out;
- drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
@@ -425,18 +427,11 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
/* This device is not associated with a session */
drvdata->pid = -1;
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (np)
- ret = of_property_read_u32(np,
- "arm,buffer-size",
- &drvdata->size);
- if (ret)
- drvdata->size = SZ_1M;
- } else {
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+ drvdata->size = tmc_etr_get_default_buffer_size(dev);
+ else
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
- }
- desc.pdata = pdata;
desc.dev = dev;
desc.groups = coresight_tmc_groups;
@@ -445,36 +440,53 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etb_cs_ops;
+ dev_list = &etb_devs;
break;
case TMC_CONFIG_TYPE_ETR:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etr_cs_ops;
- ret = tmc_etr_setup_caps(drvdata, devid,
+ ret = tmc_etr_setup_caps(dev, devid,
coresight_get_uci_data(id));
if (ret)
goto out;
idr_init(&drvdata->idr);
mutex_init(&drvdata->idr_mutex);
+ dev_list = &etr_devs;
break;
case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
desc.ops = &tmc_etf_cs_ops;
+ dev_list = &etf_devs;
break;
default:
- pr_err("%s: Unsupported TMC config\n", pdata->name);
+ pr_err("%s: Unsupported TMC config\n", desc.name);
ret = -EINVAL;
goto out;
}
+ desc.name = coresight_alloc_device_name(dev_list, dev);
+ if (!desc.name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
+ }
+ adev->dev.platform_data = pdata;
+ desc.pdata = pdata;
+
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto out;
}
- drvdata->miscdev.name = pdata->name;
+ drvdata->miscdev.name = desc.name;
drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
drvdata->miscdev.fops = &tmc_fops;
ret = misc_register(&drvdata->miscdev);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 503f1b3a3741..1ed50411cc3c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -161,7 +161,6 @@ struct etr_buf {
/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
@@ -184,7 +183,6 @@ struct etr_buf {
*/
struct tmc_drvdata {
void __iomem *base;
- struct device *dev;
struct coresight_device *csdev;
struct miscdevice miscdev;
spinlock_t spinlock;
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 63d9af31f57f..f8583e4032a6 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -47,15 +47,15 @@
#define FFCR_FON_MAN BIT(6)
#define FFCR_STOP_FI BIT(12)
+DEFINE_CORESIGHT_DEVLIST(tpiu_devs, "tpiu");
+
/**
* @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
* @atclk: optional clock for the core parts of the TPIU.
* @csdev: component vitals needed by the framework.
*/
struct tpiu_drvdata {
void __iomem *base;
- struct device *dev;
struct clk *atclk;
struct coresight_device *csdev;
};
@@ -75,7 +75,7 @@ static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
tpiu_enable_hw(drvdata);
atomic_inc(csdev->refcnt);
- dev_dbg(drvdata->dev, "TPIU enabled\n");
+ dev_dbg(&csdev->dev, "TPIU enabled\n");
return 0;
}
@@ -104,7 +104,7 @@ static int tpiu_disable(struct coresight_device *csdev)
tpiu_disable_hw(drvdata);
- dev_dbg(drvdata->dev, "TPIU disabled\n");
+ dev_dbg(&csdev->dev, "TPIU disabled\n");
return 0;
}
@@ -126,20 +126,15 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
struct tpiu_drvdata *drvdata;
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
- }
+ desc.name = coresight_alloc_device_name(&tpiu_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &adev->dev;
drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
@@ -158,6 +153,11 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
/* Disable tpiu to support older devices */
tpiu_disable_hw(drvdata);
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ dev->platform_data = pdata;
+
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
desc.ops = &tpiu_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 4b130281236a..86d1fc2c1bd4 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -100,8 +100,8 @@ static int coresight_find_link_inport(struct coresight_device *csdev,
int i;
struct coresight_connection *conn;
- for (i = 0; i < parent->nr_outport; i++) {
- conn = &parent->conns[i];
+ for (i = 0; i < parent->pdata->nr_outport; i++) {
+ conn = &parent->pdata->conns[i];
if (conn->child_dev == csdev)
return conn->child_port;
}
@@ -118,8 +118,8 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
int i;
struct coresight_connection *conn;
- for (i = 0; i < csdev->nr_outport; i++) {
- conn = &csdev->conns[i];
+ for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ conn = &csdev->pdata->conns[i];
if (conn->child_dev == child)
return conn->outport;
}
@@ -306,10 +306,10 @@ static void coresight_disable_link(struct coresight_device *csdev,
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
refport = inport;
- nr_conns = csdev->nr_inport;
+ nr_conns = csdev->pdata->nr_inport;
} else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
refport = outport;
- nr_conns = csdev->nr_outport;
+ nr_conns = csdev->pdata->nr_outport;
} else {
refport = 0;
nr_conns = 1;
@@ -595,9 +595,10 @@ static void coresight_grab_device(struct coresight_device *csdev)
{
int i;
- for (i = 0; i < csdev->nr_outport; i++) {
- struct coresight_device *child = csdev->conns[i].child_dev;
+ for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ struct coresight_device *child;
+ child = csdev->pdata->conns[i].child_dev;
if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
pm_runtime_get_sync(child->dev.parent);
}
@@ -613,9 +614,10 @@ static void coresight_drop_device(struct coresight_device *csdev)
int i;
pm_runtime_put(csdev->dev.parent);
- for (i = 0; i < csdev->nr_outport; i++) {
- struct coresight_device *child = csdev->conns[i].child_dev;
+ for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ struct coresight_device *child;
+ child = csdev->pdata->conns[i].child_dev;
if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
pm_runtime_put(child->dev.parent);
}
@@ -645,9 +647,10 @@ static int _coresight_build_path(struct coresight_device *csdev,
goto out;
/* Not a sink - recursively explore each port found on this element */
- for (i = 0; i < csdev->nr_outport; i++) {
- struct coresight_device *child_dev = csdev->conns[i].child_dev;
+ for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ struct coresight_device *child_dev;
+ child_dev = csdev->pdata->conns[i].child_dev;
if (child_dev &&
_coresight_build_path(child_dev, sink, path) == 0) {
found = true;
@@ -975,6 +978,7 @@ static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
+ fwnode_handle_put(csdev->dev.fwnode);
kfree(csdev->refcnt);
kfree(csdev);
}
@@ -1000,19 +1004,17 @@ static int coresight_orphan_match(struct device *dev, void *data)
* Circle throuch all the connection of that component. If we find
* an orphan connection whose name matches @csdev, link it.
*/
- for (i = 0; i < i_csdev->nr_outport; i++) {
- conn = &i_csdev->conns[i];
+ for (i = 0; i < i_csdev->pdata->nr_outport; i++) {
+ conn = &i_csdev->pdata->conns[i];
/* We have found at least one orphan connection */
if (conn->child_dev == NULL) {
/* Does it match this newly added device? */
- if (conn->child_name &&
- !strcmp(dev_name(&csdev->dev), conn->child_name)) {
+ if (conn->child_fwnode == csdev->dev.fwnode)
conn->child_dev = csdev;
- } else {
+ else
/* This component still has an orphan */
still_orphan = true;
- }
}
}
@@ -1040,13 +1042,13 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
{
int i;
- for (i = 0; i < csdev->nr_outport; i++) {
- struct coresight_connection *conn = &csdev->conns[i];
+ for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ struct coresight_connection *conn = &csdev->pdata->conns[i];
struct device *dev = NULL;
- if (conn->child_name)
- dev = bus_find_device_by_name(&coresight_bustype, NULL,
- conn->child_name);
+ dev = bus_find_device(&coresight_bustype, NULL,
+ (void *)conn->child_fwnode,
+ coresight_device_fwnode_match);
if (dev) {
conn->child_dev = to_coresight_device(dev);
/* and put reference from 'bus_find_device()' */
@@ -1075,15 +1077,21 @@ static int coresight_remove_match(struct device *dev, void *data)
* Circle throuch all the connection of that component. If we find
* a connection whose name matches @csdev, remove it.
*/
- for (i = 0; i < iterator->nr_outport; i++) {
- conn = &iterator->conns[i];
+ for (i = 0; i < iterator->pdata->nr_outport; i++) {
+ conn = &iterator->pdata->conns[i];
if (conn->child_dev == NULL)
continue;
- if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+ if (csdev->dev.fwnode == conn->child_fwnode) {
iterator->orphan = true;
conn->child_dev = NULL;
+ /*
+ * Drop the reference to the handle for the remote
+ * device acquired in parsing the connections from
+ * platform data.
+ */
+ fwnode_handle_put(conn->child_fwnode);
/* No need to continue */
break;
}
@@ -1096,10 +1104,21 @@ static int coresight_remove_match(struct device *dev, void *data)
return 0;
}
+/*
+ * coresight_remove_conns - Remove references to this given devices
+ * from the connections of other devices.
+ */
static void coresight_remove_conns(struct coresight_device *csdev)
{
- bus_for_each_dev(&coresight_bustype, NULL,
- csdev, coresight_remove_match);
+ /*
+ * Another device will point to this device only if there is
+ * an output port connected to this one. i.e, if the device
+ * doesn't have at least one input port, there is no point
+ * in searching all the devices.
+ */
+ if (csdev->pdata->nr_inport)
+ bus_for_each_dev(&coresight_bustype, NULL,
+ csdev, coresight_remove_match);
}
/**
@@ -1152,6 +1171,22 @@ static int __init coresight_init(void)
}
postcore_initcall(coresight_init);
+/*
+ * coresight_release_platform_data: Release references to the devices connected
+ * to the output port of this device.
+ */
+void coresight_release_platform_data(struct coresight_platform_data *pdata)
+{
+ int i;
+
+ for (i = 0; i < pdata->nr_outport; i++) {
+ if (pdata->conns[i].child_fwnode) {
+ fwnode_handle_put(pdata->conns[i].child_fwnode);
+ pdata->conns[i].child_fwnode = NULL;
+ }
+ }
+}
+
struct coresight_device *coresight_register(struct coresight_desc *desc)
{
int ret;
@@ -1184,10 +1219,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->refcnt = refcnts;
- csdev->nr_inport = desc->pdata->nr_inport;
- csdev->nr_outport = desc->pdata->nr_outport;
-
- csdev->conns = desc->pdata->conns;
+ csdev->pdata = desc->pdata;
csdev->type = desc->type;
csdev->subtype = desc->subtype;
@@ -1199,7 +1231,12 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->dev.parent = desc->dev;
csdev->dev.release = coresight_device_release;
csdev->dev.bus = &coresight_bustype;
- dev_set_name(&csdev->dev, "%s", desc->pdata->name);
+ /*
+ * Hold the reference to our parent device. This will be
+ * dropped only in coresight_device_release().
+ */
+ csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
+ dev_set_name(&csdev->dev, "%s", desc->name);
ret = device_register(&csdev->dev);
if (ret) {
@@ -1239,6 +1276,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
err_free_csdev:
kfree(csdev);
err_out:
+ /* Cleanup the connection information */
+ coresight_release_platform_data(desc->pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_register);
@@ -1248,6 +1287,65 @@ void coresight_unregister(struct coresight_device *csdev)
etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
+ coresight_release_platform_data(csdev->pdata);
device_unregister(&csdev->dev);
}
EXPORT_SYMBOL_GPL(coresight_unregister);
+
+
+/*
+ * coresight_search_device_idx - Search the fwnode handle of a device
+ * in the given dev_idx list. Must be called with the coresight_mutex held.
+ *
+ * Returns the index of the entry, when found. Otherwise, -ENOENT.
+ */
+static inline int coresight_search_device_idx(struct coresight_dev_list *dict,
+ struct fwnode_handle *fwnode)
+{
+ int i;
+
+ for (i = 0; i < dict->nr_idx; i++)
+ if (dict->fwnode_list[i] == fwnode)
+ return i;
+ return -ENOENT;
+}
+
+/*
+ * coresight_alloc_device_name - Get an index for a given device in the
+ * device index list specific to a driver. An index is allocated for a
+ * device and is tracked with the fwnode_handle to prevent allocating
+ * duplicate indices for the same device (e.g, if we defer probing of
+ * a device due to dependencies), in case the index is requested again.
+ */
+char *coresight_alloc_device_name(struct coresight_dev_list *dict,
+ struct device *dev)
+{
+ int idx;
+ char *name = NULL;
+ struct fwnode_handle **list;
+
+ mutex_lock(&coresight_mutex);
+
+ idx = coresight_search_device_idx(dict, dev_fwnode(dev));
+ if (idx < 0) {
+ /* Make space for the new entry */
+ idx = dict->nr_idx;
+ list = krealloc(dict->fwnode_list,
+ (idx + 1) * sizeof(*dict->fwnode_list),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(list)) {
+ idx = -ENOMEM;
+ goto done;
+ }
+
+ list[idx] = dev_fwnode(dev);
+ dict->fwnode_list = list;
+ dict->nr_idx = idx + 1;
+ }
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", dict->pfx, idx);
+done:
+ mutex_unlock(&coresight_mutex);
+ return name;
+}
+EXPORT_SYMBOL_GPL(coresight_alloc_device_name);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
deleted file mode 100644
index 7045930fc958..000000000000
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
-#include <linux/coresight.h>
-#include <linux/cpumask.h>
-#include <asm/smp_plat.h>
-
-
-static int of_dev_node_match(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static struct device *
-of_coresight_get_endpoint_device(struct device_node *endpoint)
-{
- struct device *dev = NULL;
-
- /*
- * If we have a non-configurable replicator, it will be found on the
- * platform bus.
- */
- dev = bus_find_device(&platform_bus_type, NULL,
- endpoint, of_dev_node_match);
- if (dev)
- return dev;
-
- /*
- * We have a configurable component - circle through the AMBA bus
- * looking for the device that matches the endpoint node.
- */
- return bus_find_device(&amba_bustype, NULL,
- endpoint, of_dev_node_match);
-}
-
-static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
-{
- return of_property_read_bool(ep, "slave-mode");
-}
-
-static void of_coresight_get_ports_legacy(const struct device_node *node,
- int *nr_inport, int *nr_outport)
-{
- struct device_node *ep = NULL;
- int in = 0, out = 0;
-
- do {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
- if (of_coresight_legacy_ep_is_input(ep))
- in++;
- else
- out++;
-
- } while (ep);
-
- *nr_inport = in;
- *nr_outport = out;
-}
-
-static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
-{
- struct device_node *parent = of_graph_get_port_parent(ep);
-
- /*
- * Skip one-level up to the real device node, if we
- * are using the new bindings.
- */
- if (of_node_name_eq(parent, "in-ports") ||
- of_node_name_eq(parent, "out-ports"))
- parent = of_get_next_parent(parent);
-
- return parent;
-}
-
-static inline struct device_node *
-of_coresight_get_input_ports_node(const struct device_node *node)
-{
- return of_get_child_by_name(node, "in-ports");
-}
-
-static inline struct device_node *
-of_coresight_get_output_ports_node(const struct device_node *node)
-{
- return of_get_child_by_name(node, "out-ports");
-}
-
-static inline int
-of_coresight_count_ports(struct device_node *port_parent)
-{
- int i = 0;
- struct device_node *ep = NULL;
-
- while ((ep = of_graph_get_next_endpoint(port_parent, ep)))
- i++;
- return i;
-}
-
-static void of_coresight_get_ports(const struct device_node *node,
- int *nr_inport, int *nr_outport)
-{
- struct device_node *input_ports = NULL, *output_ports = NULL;
-
- input_ports = of_coresight_get_input_ports_node(node);
- output_ports = of_coresight_get_output_ports_node(node);
-
- if (input_ports || output_ports) {
- if (input_ports) {
- *nr_inport = of_coresight_count_ports(input_ports);
- of_node_put(input_ports);
- }
- if (output_ports) {
- *nr_outport = of_coresight_count_ports(output_ports);
- of_node_put(output_ports);
- }
- } else {
- /* Fall back to legacy DT bindings parsing */
- of_coresight_get_ports_legacy(node, nr_inport, nr_outport);
- }
-}
-
-static int of_coresight_alloc_memory(struct device *dev,
- struct coresight_platform_data *pdata)
-{
- if (pdata->nr_outport) {
- pdata->conns = devm_kzalloc(dev, pdata->nr_outport *
- sizeof(*pdata->conns),
- GFP_KERNEL);
- if (!pdata->conns)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-int of_coresight_get_cpu(const struct device_node *node)
-{
- int cpu;
- struct device_node *dn;
-
- dn = of_parse_phandle(node, "cpu", 0);
- /* Affinity defaults to CPU0 */
- if (!dn)
- return 0;
- cpu = of_cpu_node_to_id(dn);
- of_node_put(dn);
-
- /* Affinity to CPU0 if no cpu nodes are found */
- return (cpu < 0) ? 0 : cpu;
-}
-EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
-
-/*
- * of_coresight_parse_endpoint : Parse the given output endpoint @ep
- * and fill the connection information in @conn
- *
- * Parses the local port, remote device name and the remote port.
- *
- * Returns :
- * 1 - If the parsing is successful and a connection record
- * was created for an output connection.
- * 0 - If the parsing completed without any fatal errors.
- * -Errno - Fatal error, abort the scanning.
- */
-static int of_coresight_parse_endpoint(struct device *dev,
- struct device_node *ep,
- struct coresight_connection *conn)
-{
- int ret = 0;
- struct of_endpoint endpoint, rendpoint;
- struct device_node *rparent = NULL;
- struct device_node *rep = NULL;
- struct device *rdev = NULL;
-
- do {
- /* Parse the local port details */
- if (of_graph_parse_endpoint(ep, &endpoint))
- break;
- /*
- * Get a handle on the remote endpoint and the device it is
- * attached to.
- */
- rep = of_graph_get_remote_endpoint(ep);
- if (!rep)
- break;
- rparent = of_coresight_get_port_parent(rep);
- if (!rparent)
- break;
- if (of_graph_parse_endpoint(rep, &rendpoint))
- break;
-
- /* If the remote device is not available, defer probing */
- rdev = of_coresight_get_endpoint_device(rparent);
- if (!rdev) {
- ret = -EPROBE_DEFER;
- break;
- }
-
- conn->outport = endpoint.port;
- conn->child_name = devm_kstrdup(dev,
- dev_name(rdev),
- GFP_KERNEL);
- conn->child_port = rendpoint.port;
- /* Connection record updated */
- ret = 1;
- } while (0);
-
- of_node_put(rparent);
- of_node_put(rep);
- put_device(rdev);
-
- return ret;
-}
-
-struct coresight_platform_data *
-of_get_coresight_platform_data(struct device *dev,
- const struct device_node *node)
-{
- int ret = 0;
- struct coresight_platform_data *pdata;
- struct coresight_connection *conn;
- struct device_node *ep = NULL;
- const struct device_node *parent = NULL;
- bool legacy_binding = false;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- /* Use device name as sysfs handle */
- pdata->name = dev_name(dev);
- pdata->cpu = of_coresight_get_cpu(node);
-
- /* Get the number of input and output port for this component */
- of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
-
- /* If there are no output connections, we are done */
- if (!pdata->nr_outport)
- return pdata;
-
- ret = of_coresight_alloc_memory(dev, pdata);
- if (ret)
- return ERR_PTR(ret);
-
- parent = of_coresight_get_output_ports_node(node);
- /*
- * If the DT uses obsoleted bindings, the ports are listed
- * under the device and we need to filter out the input
- * ports.
- */
- if (!parent) {
- legacy_binding = true;
- parent = node;
- dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
- }
-
- conn = pdata->conns;
-
- /* Iterate through each output port to discover topology */
- while ((ep = of_graph_get_next_endpoint(parent, ep))) {
- /*
- * Legacy binding mixes input/output ports under the
- * same parent. So, skip the input ports if we are dealing
- * with legacy binding, as they processed with their
- * connected output ports.
- */
- if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
- continue;
-
- ret = of_coresight_parse_endpoint(dev, ep, conn);
- switch (ret) {
- case 1:
- conn++; /* Fall through */
- case 0:
- break;
- default:
- return ERR_PTR(ret);
- }
- }
-
- return pdata;
-}
-EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 81bb54fa3ce8..8ab28e5fb366 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -33,14 +33,18 @@
* @entry: window list linkage (msc::win_list)
* @pgoff: page offset into the buffer that this window starts at
* @nr_blocks: number of blocks (pages) in this window
+ * @nr_segs: number of segments in this window (<= @nr_blocks)
+ * @_sgt: array of block descriptors
* @sgt: array of block descriptors
*/
struct msc_window {
struct list_head entry;
unsigned long pgoff;
unsigned int nr_blocks;
+ unsigned int nr_segs;
struct msc *msc;
- struct sg_table sgt;
+ struct sg_table _sgt;
+ struct sg_table *sgt;
};
/**
@@ -138,13 +142,19 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
static inline struct msc_block_desc *
msc_win_block(struct msc_window *win, unsigned int block)
{
- return sg_virt(&win->sgt.sgl[block]);
+ return sg_virt(&win->sgt->sgl[block]);
+}
+
+static inline size_t
+msc_win_actual_bsz(struct msc_window *win, unsigned int block)
+{
+ return win->sgt->sgl[block].length;
}
static inline dma_addr_t
msc_win_baddr(struct msc_window *win, unsigned int block)
{
- return sg_dma_address(&win->sgt.sgl[block]);
+ return sg_dma_address(&win->sgt->sgl[block]);
}
static inline unsigned long
@@ -179,17 +189,18 @@ static struct msc_window *msc_next_window(struct msc_window *win)
}
/**
- * msc_oldest_window() - locate the window with oldest data
+ * msc_find_window() - find a window matching a given sg_table
* @msc: MSC device
+ * @sgt: SG table of the window
+ * @nonempty: skip over empty windows
*
- * This should only be used in multiblock mode. Caller should hold the
- * msc::user_count reference.
- *
- * Return: the oldest window with valid data
+ * Return: MSC window structure pointer or NULL if the window
+ * could not be found.
*/
-static struct msc_window *msc_oldest_window(struct msc *msc)
+static struct msc_window *
+msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
{
- struct msc_window *win, *next = msc_next_window(msc->cur_win);
+ struct msc_window *win;
unsigned int found = 0;
if (list_empty(&msc->win_list))
@@ -201,17 +212,40 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
* something like 2, in which case we're good
*/
list_for_each_entry(win, &msc->win_list, entry) {
- if (win == next)
+ if (win->sgt == sgt)
found++;
/* skip the empty ones */
- if (msc_block_is_empty(msc_win_block(win, 0)))
+ if (nonempty && msc_block_is_empty(msc_win_block(win, 0)))
continue;
if (found)
return win;
}
+ return NULL;
+}
+
+/**
+ * msc_oldest_window() - locate the window with oldest data
+ * @msc: MSC device
+ *
+ * This should only be used in multiblock mode. Caller should hold the
+ * msc::user_count reference.
+ *
+ * Return: the oldest window with valid data
+ */
+static struct msc_window *msc_oldest_window(struct msc *msc)
+{
+ struct msc_window *win;
+
+ if (list_empty(&msc->win_list))
+ return NULL;
+
+ win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
+ if (win)
+ return win;
+
return list_first_entry(&msc->win_list, struct msc_window, entry);
}
@@ -234,7 +268,7 @@ static unsigned int msc_win_oldest_block(struct msc_window *win)
* with wrapping, last written block contains both the newest and the
* oldest data for this window.
*/
- for (blk = 0; blk < win->nr_blocks; blk++) {
+ for (blk = 0; blk < win->nr_segs; blk++) {
bdesc = msc_win_block(win, blk);
if (msc_block_last_written(bdesc))
@@ -366,7 +400,7 @@ static int msc_iter_block_advance(struct msc_iter *iter)
return msc_iter_win_advance(iter);
/* block advance */
- if (++iter->block == iter->win->nr_blocks)
+ if (++iter->block == iter->win->nr_segs)
iter->block = 0;
/* no wrapping, sanity check in case there is no last written block */
@@ -478,7 +512,7 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
size_t hw_sz = sizeof(struct msc_block_desc) -
offsetof(struct msc_block_desc, hw_tag);
- for (blk = 0; blk < win->nr_blocks; blk++) {
+ for (blk = 0; blk < win->nr_segs; blk++) {
struct msc_block_desc *bdesc = msc_win_block(win, blk);
memset(&bdesc->hw_tag, 0, hw_sz);
@@ -667,7 +701,7 @@ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
goto err_out;
ret = -ENOMEM;
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
if (!page)
goto err_free_sgt;
@@ -734,17 +768,17 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc,
}
static int __msc_buffer_win_alloc(struct msc_window *win,
- unsigned int nr_blocks)
+ unsigned int nr_segs)
{
struct scatterlist *sg_ptr;
void *block;
int i, ret;
- ret = sg_alloc_table(&win->sgt, nr_blocks, GFP_KERNEL);
+ ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
if (ret)
return -ENOMEM;
- for_each_sg(win->sgt.sgl, sg_ptr, nr_blocks, i) {
+ for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
PAGE_SIZE, &sg_dma_address(sg_ptr),
GFP_KERNEL);
@@ -754,7 +788,7 @@ static int __msc_buffer_win_alloc(struct msc_window *win,
sg_set_buf(sg_ptr, block, PAGE_SIZE);
}
- return nr_blocks;
+ return nr_segs;
err_nomem:
for (i--; i >= 0; i--)
@@ -762,11 +796,35 @@ err_nomem:
msc_win_block(win, i),
msc_win_baddr(win, i));
- sg_free_table(&win->sgt);
+ sg_free_table(win->sgt);
return -ENOMEM;
}
+#ifdef CONFIG_X86
+static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
+{
+ int i;
+
+ for (i = 0; i < nr_segs; i++)
+ /* Set the page as uncached */
+ set_memory_uc((unsigned long)msc_win_block(win, i), 1);
+}
+
+static void msc_buffer_set_wb(struct msc_window *win)
+{
+ int i;
+
+ for (i = 0; i < win->nr_segs; i++)
+ /* Reset the page to write-back */
+ set_memory_wb((unsigned long)msc_win_block(win, i), 1);
+}
+#else /* !X86 */
+static inline void
+msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
+static inline void msc_buffer_set_wb(struct msc_window *win) {}
+#endif /* CONFIG_X86 */
+
/**
* msc_buffer_win_alloc() - alloc a window for a multiblock mode
* @msc: MSC device
@@ -780,7 +838,7 @@ err_nomem:
static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
{
struct msc_window *win;
- int ret = -ENOMEM, i;
+ int ret = -ENOMEM;
if (!nr_blocks)
return 0;
@@ -797,13 +855,13 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
return -ENOMEM;
win->msc = msc;
+ win->sgt = &win->_sgt;
if (!list_empty(&msc->win_list)) {
struct msc_window *prev = list_last_entry(&msc->win_list,
struct msc_window,
entry);
- /* This works as long as blocks are page-sized */
win->pgoff = prev->pgoff + prev->nr_blocks;
}
@@ -811,13 +869,10 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
if (ret < 0)
goto err_nomem;
-#ifdef CONFIG_X86
- for (i = 0; i < ret; i++)
- /* Set the page as uncached */
- set_memory_uc((unsigned long)msc_win_block(win, i), 1);
-#endif
+ msc_buffer_set_uc(win, ret);
- win->nr_blocks = ret;
+ win->nr_segs = ret;
+ win->nr_blocks = nr_blocks;
if (list_empty(&msc->win_list)) {
msc->base = msc_win_block(win, 0);
@@ -840,14 +895,14 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
{
int i;
- for (i = 0; i < win->nr_blocks; i++) {
- struct page *page = sg_page(&win->sgt.sgl[i]);
+ for (i = 0; i < win->nr_segs; i++) {
+ struct page *page = sg_page(&win->sgt->sgl[i]);
page->mapping = NULL;
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
msc_win_block(win, i), msc_win_baddr(win, i));
}
- sg_free_table(&win->sgt);
+ sg_free_table(win->sgt);
}
/**
@@ -860,8 +915,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
*/
static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
{
- int i;
-
msc->nr_pages -= win->nr_blocks;
list_del(&win->entry);
@@ -870,11 +923,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
msc->base_addr = 0;
}
-#ifdef CONFIG_X86
- for (i = 0; i < win->nr_blocks; i++)
- /* Reset the page to write-back */
- set_memory_wb((unsigned long)msc_win_block(win, i), 1);
-#endif
+ msc_buffer_set_wb(win);
__msc_buffer_win_free(msc, win);
@@ -909,7 +958,7 @@ static void msc_buffer_relink(struct msc *msc)
next_win = list_next_entry(win, entry);
}
- for (blk = 0; blk < win->nr_blocks; blk++) {
+ for (blk = 0; blk < win->nr_segs; blk++) {
struct msc_block_desc *bdesc = msc_win_block(win, blk);
memset(bdesc, 0, sizeof(*bdesc));
@@ -920,7 +969,7 @@ static void msc_buffer_relink(struct msc *msc)
* Similarly to last window, last block should point
* to the first one.
*/
- if (blk == win->nr_blocks - 1) {
+ if (blk == win->nr_segs - 1) {
sw_tag |= MSC_SW_TAG_LASTBLK;
bdesc->next_blk = msc_win_bpfn(win, 0);
} else {
@@ -928,7 +977,7 @@ static void msc_buffer_relink(struct msc *msc)
}
bdesc->sw_tag = sw_tag;
- bdesc->block_sz = PAGE_SIZE / 64;
+ bdesc->block_sz = msc_win_actual_bsz(win, blk) / 64;
}
}
@@ -1087,6 +1136,7 @@ static int msc_buffer_free_unless_used(struct msc *msc)
static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
{
struct msc_window *win;
+ unsigned int blk;
if (msc->mode == MSC_MODE_SINGLE)
return msc_buffer_contig_get_page(msc, pgoff);
@@ -1099,7 +1149,18 @@ static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
found:
pgoff -= win->pgoff;
- return sg_page(&win->sgt.sgl[pgoff]);
+
+ for (blk = 0; blk < win->nr_segs; blk++) {
+ struct page *page = sg_page(&win->sgt->sgl[blk]);
+ size_t pgsz = PFN_DOWN(msc_win_actual_bsz(win, blk));
+
+ if (pgoff < pgsz)
+ return page + pgoff;
+
+ pgoff -= pgsz;
+ }
+
+ return NULL;
}
/**
@@ -1386,10 +1447,9 @@ static int intel_th_msc_init(struct msc *msc)
static void msc_win_switch(struct msc *msc)
{
- struct msc_window *last, *first;
+ struct msc_window *first;
first = list_first_entry(&msc->win_list, struct msc_window, entry);
- last = list_last_entry(&msc->win_list, struct msc_window, entry);
if (msc_is_last_win(msc->cur_win))
msc->cur_win = first;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index f1228708f2a2..c0378c3de9a4 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -194,6 +194,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Ice Lake NNPI */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 392ad4f5c570..dbdee02bb592 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -123,7 +123,7 @@ config FSL_IFC
config JZ4780_NEMC
bool "Ingenic JZ4780 SoC NEMC driver"
default y
- depends on MACH_JZ4780 || COMPILE_TEST
+ depends on MIPS || COMPILE_TEST
depends on HAS_IOMEM && OF
help
This driver is for the NAND/External Memory Controller (NEMC) in
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
index 698da973de35..2a3f7ef1c8c4 100644
--- a/drivers/memory/jz4780-nemc.c
+++ b/drivers/memory/jz4780-nemc.c
@@ -41,9 +41,14 @@
#define NEMC_NFCSR_NFCEn(n) BIT((((n) - 1) << 1) + 1)
#define NEMC_NFCSR_TNFEn(n) BIT(16 + (n) - 1)
+struct jz_soc_info {
+ u8 tas_tah_cycles_max;
+};
+
struct jz4780_nemc {
spinlock_t lock;
struct device *dev;
+ const struct jz_soc_info *soc_info;
void __iomem *base;
struct clk *clk;
uint32_t clk_period;
@@ -158,7 +163,7 @@ static bool jz4780_nemc_configure_bank(struct jz4780_nemc *nemc,
* Conversion of tBP and tAW cycle counts to values supported by the
* hardware (round up to the next supported value).
*/
- static const uint32_t convert_tBP_tAW[] = {
+ static const u8 convert_tBP_tAW[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
/* 11 - 12 -> 12 cycles */
@@ -199,7 +204,7 @@ static bool jz4780_nemc_configure_bank(struct jz4780_nemc *nemc,
if (of_property_read_u32(node, "ingenic,nemc-tAS", &val) == 0) {
smcr &= ~NEMC_SMCR_TAS_MASK;
cycles = jz4780_nemc_ns_to_cycles(nemc, val);
- if (cycles > 15) {
+ if (cycles > nemc->soc_info->tas_tah_cycles_max) {
dev_err(nemc->dev, "tAS %u is too high (%u cycles)\n",
val, cycles);
return false;
@@ -211,7 +216,7 @@ static bool jz4780_nemc_configure_bank(struct jz4780_nemc *nemc,
if (of_property_read_u32(node, "ingenic,nemc-tAH", &val) == 0) {
smcr &= ~NEMC_SMCR_TAH_MASK;
cycles = jz4780_nemc_ns_to_cycles(nemc, val);
- if (cycles > 15) {
+ if (cycles > nemc->soc_info->tas_tah_cycles_max) {
dev_err(nemc->dev, "tAH %u is too high (%u cycles)\n",
val, cycles);
return false;
@@ -275,6 +280,10 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
if (!nemc)
return -ENOMEM;
+ nemc->soc_info = device_get_match_data(dev);
+ if (!nemc->soc_info)
+ return -EINVAL;
+
spin_lock_init(&nemc->lock);
nemc->dev = dev;
@@ -367,8 +376,17 @@ static int jz4780_nemc_remove(struct platform_device *pdev)
return 0;
}
+static const struct jz_soc_info jz4740_soc_info = {
+ .tas_tah_cycles_max = 7,
+};
+
+static const struct jz_soc_info jz4780_soc_info = {
+ .tas_tah_cycles_max = 15,
+};
+
static const struct of_device_id jz4780_nemc_dt_match[] = {
- { .compatible = "ingenic,jz4780-nemc" },
+ { .compatible = "ingenic,jz4740-nemc", .data = &jz4740_soc_info, },
+ { .compatible = "ingenic,jz4780-nemc", .data = &jz4780_soc_info, },
{},
};
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 85fc77148d19..6abfc8e92fcc 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -9,7 +9,6 @@ config SENSORS_LIS3LV02D
tristate
depends on INPUT
select INPUT_POLLDEV
- default n
config AD525X_DPOT
tristate "Analog Devices Digital Potentiometers"
@@ -62,7 +61,6 @@ config ATMEL_TCLIB
config DUMMY_IRQ
tristate "Dummy IRQ handler"
- default n
---help---
This module accepts a single 'irq' parameter, which it should register for.
The sole purpose of this module is to help with debugging of systems on
@@ -118,7 +116,6 @@ config PHANTOM
config INTEL_MID_PTI
tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
depends on PCI && TTY && (X86_INTEL_MID || COMPILE_TEST)
- default n
help
The PTI (Parallel Trace Interface) driver directs
trace data routed from various parts in the system out
@@ -194,7 +191,6 @@ config ATMEL_SSC
config ENCLOSURE_SERVICES
tristate "Enclosure Services"
- default n
help
Provides support for intelligent enclosures (bays which
contain storage devices). You also need either a host
@@ -218,7 +214,6 @@ config SGI_XP
config CS5535_MFGPT
tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
depends on MFD_CS5535
- default n
help
This driver provides access to MFGPT functionality for other
drivers that need timers. MFGPTs are available in the CS5535 and
@@ -251,7 +246,6 @@ config CS5535_CLOCK_EVENT_SRC
config HP_ILO
tristate "Channel interface driver for the HP iLO processor"
depends on PCI
- default n
help
The channel interface driver allows applications to communicate
with iLO management processors present on HP ProLiant servers.
@@ -286,7 +280,6 @@ config QCOM_FASTRPC
config SGI_GRU
tristate "SGI GRU driver"
depends on X86_UV && SMP
- default n
select MMU_NOTIFIER
---help---
The GRU is a hardware resource located in the system chipset. The GRU
@@ -301,7 +294,6 @@ config SGI_GRU
config SGI_GRU_DEBUG
bool "SGI GRU driver debug"
depends on SGI_GRU
- default n
---help---
This option enables additional debugging code for the SGI GRU driver.
If you are unsure, say N.
@@ -359,7 +351,6 @@ config SENSORS_BH1770
config SENSORS_APDS990X
tristate "APDS990X combined als and proximity sensors"
depends on I2C
- default n
---help---
Say Y here if you want to build a driver for Avago APDS990x
combined ambient light and proximity sensor chip.
@@ -387,7 +378,6 @@ config DS1682
config SPEAR13XX_PCIE_GADGET
bool "PCIe gadget support for SPEAr13XX platform"
depends on ARCH_SPEAR13XX && BROKEN
- default n
help
This option enables gadget support for PCIe controller. If
board file defines any controller as PCIe endpoint then a sysfs
@@ -397,6 +387,7 @@ config SPEAR13XX_PCIE_GADGET
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
+ select MEMORY_BALLOON
help
This is VMware physical memory management driver which acts
like a "balloon" that can be inflated to reclaim physical pages
@@ -431,15 +422,6 @@ config PCH_PHUB
To compile this driver as a module, choose M here: the module will
be called pch_phub.
-config USB_SWITCH_FSA9480
- tristate "FSA9480 USB Switch"
- depends on I2C
- help
- The FSA9480 is a USB port accessory detector and switch.
- The FSA9480 is fully controlled using I2C and enables USB data,
- stereo and mono audio, video, microphone and UART data to use
- a common connector port.
-
config LATTICE_ECP3_CONFIG
tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
depends on SPI && SYSFS
@@ -481,6 +463,18 @@ config PCI_ENDPOINT_TEST
Enable this configuration option to enable the host side test driver
for PCI Endpoint.
+config XILINX_SDFEC
+ tristate "Xilinx SDFEC 16"
+ help
+ This option enables support for the Xilinx SDFEC (Soft Decision
+ Forward Error Correction) driver. This enables a char driver
+ for the SDFEC.
+
+ You may select this driver if your design instantiates the
+ SDFEC(16nm) hardened block. To compile this as a module choose M.
+
+ If unsure, say N.
+
config MISC_RTSX
tristate
default MISC_RTSX_PCI || MISC_RTSX_USB
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b9affcdaa3d6..abd8ae249746 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
obj-y += lis3lv02d/
-obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
@@ -59,3 +58,4 @@ obj-$(CONFIG_OCXL) += ocxl/
obj-y += cardreader/
obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
+obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
diff --git a/drivers/misc/altera-stapl/Kconfig b/drivers/misc/altera-stapl/Kconfig
index b34863544365..6c4c6575ec31 100644
--- a/drivers/misc/altera-stapl/Kconfig
+++ b/drivers/misc/altera-stapl/Kconfig
@@ -5,6 +5,5 @@ comment "Altera FPGA firmware download module (requires I2C)"
config ALTERA_STAPL
tristate "Altera FPGA firmware download module"
depends on I2C
- default n
help
An Altera FPGA module. Say Y when you want to support this tool.
diff --git a/drivers/misc/c2port/Kconfig b/drivers/misc/c2port/Kconfig
index 192e25094bd4..e20516ffd91e 100644
--- a/drivers/misc/c2port/Kconfig
+++ b/drivers/misc/c2port/Kconfig
@@ -5,7 +5,6 @@
menuconfig C2PORT
tristate "Silicon Labs C2 port support"
- default n
help
This option enables support for Silicon Labs C2 port used to
program Silicon micro controller chips (and other 8051 compatible).
@@ -24,7 +23,6 @@ if C2PORT
config C2PORT_DURAMAR_2150
tristate "C2 port support for Eurotech's Duramar 2150"
depends on X86
- default n
help
This option enables C2 support for the Eurotech's Duramar 2150
on board micro controller.
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
index 3c7356d55423..a696d7509024 100644
--- a/drivers/misc/cb710/Kconfig
+++ b/drivers/misc/cb710/Kconfig
@@ -15,7 +15,6 @@ config CB710_CORE
config CB710_DEBUG
bool "Enable driver debugging"
depends on CB710_CORE != n
- default n
help
This is an option for use by developers; most people should
say N here. This adds a lot of debugging output to dmesg.
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index f1d9a843e361..39eec9031487 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -5,16 +5,13 @@
config CXL_BASE
bool
- default n
select PPC_COPRO_BASE
config CXL_AFU_DRIVER_OPS
bool
- default n
config CXL_LIB
bool
- default n
config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
diff --git a/drivers/misc/echo/Kconfig b/drivers/misc/echo/Kconfig
index 39656413e70d..be70b263e271 100644
--- a/drivers/misc/echo/Kconfig
+++ b/drivers/misc/echo/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config ECHO
tristate "Line Echo Canceller support"
- default n
---help---
This driver provides line echo cancelling support for mISDN and
Zaptel drivers.
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index be3263df278a..6f00c33cfe22 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -2,7 +2,7 @@
/*
* ee1004 - driver for DDR4 SPD EEPROMs
*
- * Copyright (C) 2017 Jean Delvare
+ * Copyright (C) 2017-2019 Jean Delvare
*
* Based on the at24 driver:
* Copyright (C) 2005-2007 David Brownell
@@ -53,6 +53,24 @@ MODULE_DEVICE_TABLE(i2c, ee1004_ids);
/*-------------------------------------------------------------------------*/
+static int ee1004_get_current_page(void)
+{
+ int err;
+
+ err = i2c_smbus_read_byte(ee1004_set_page[0]);
+ if (err == -ENXIO) {
+ /* Nack means page 1 is selected */
+ return 1;
+ }
+ if (err < 0) {
+ /* Anything else is a real error, bail out */
+ return err;
+ }
+
+ /* Ack means page 0 is selected, returned value meaningless */
+ return 0;
+}
+
static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
unsigned int offset, size_t count)
{
@@ -102,6 +120,16 @@ static ssize_t ee1004_read(struct file *filp, struct kobject *kobj,
/* Data is ignored */
status = i2c_smbus_write_byte(ee1004_set_page[page],
0x00);
+ if (status == -ENXIO) {
+ /*
+ * Don't give up just yet. Some memory
+ * modules will select the page but not
+ * ack the command. Check which page is
+ * selected now.
+ */
+ if (ee1004_get_current_page() == page)
+ status = 0;
+ }
if (status < 0) {
dev_err(dev, "Failed to select page %d (%d)\n",
page, status);
@@ -186,17 +214,10 @@ static int ee1004_probe(struct i2c_client *client,
}
/* Remember current page to avoid unneeded page select */
- err = i2c_smbus_read_byte(ee1004_set_page[0]);
- if (err == -ENXIO) {
- /* Nack means page 1 is selected */
- ee1004_current_page = 1;
- } else if (err < 0) {
- /* Anything else is a real error, bail out */
+ err = ee1004_get_current_page();
+ if (err < 0)
goto err_clients;
- } else {
- /* Ack means page 0 is selected, returned value meaningless */
- ee1004_current_page = 0;
- }
+ ee1004_current_page = err;
dev_dbg(&client->dev, "Currently selected page: %d\n",
ee1004_current_page);
mutex_unlock(&ee1004_bus_lock);
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 8a4659518c33..81c70e5bc168 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -115,7 +115,6 @@ static struct dentry *csr_dbgdir;
* @client: i2c client used to perform IO operations
*
* @ee_file: EEPROM read/write sysfs-file
- * @csr_file: CSR read/write debugfs-node
*/
struct idt_smb_seq;
struct idt_89hpesx_dev {
@@ -137,7 +136,6 @@ struct idt_89hpesx_dev {
struct bin_attribute *ee_file;
struct dentry *csr_dir;
- struct dentry *csr_file;
};
/*
@@ -1378,8 +1376,8 @@ static void idt_create_dbgfs_files(struct idt_89hpesx_dev *pdev)
pdev->csr_dir = debugfs_create_dir(fname, csr_dbgdir);
/* Create Debugfs file for CSR read/write operations */
- pdev->csr_file = debugfs_create_file(cli->name, 0600,
- pdev->csr_dir, pdev, &csr_dbgfs_ops);
+ debugfs_create_file(cli->name, 0600, pdev->csr_dir, pdev,
+ &csr_dbgfs_ops);
}
/*
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
deleted file mode 100644
index fab02f2da077..000000000000
--- a/drivers/misc/fsa9480.c
+++ /dev/null
@@ -1,547 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * fsa9480.c - FSA9480 micro USB switch device driver
- *
- * Copyright (C) 2010 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- * Wonguk Jeong <wonguk.jeong@samsung.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/fsa9480.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
-
-/* FSA9480 I2C registers */
-#define FSA9480_REG_DEVID 0x01
-#define FSA9480_REG_CTRL 0x02
-#define FSA9480_REG_INT1 0x03
-#define FSA9480_REG_INT2 0x04
-#define FSA9480_REG_INT1_MASK 0x05
-#define FSA9480_REG_INT2_MASK 0x06
-#define FSA9480_REG_ADC 0x07
-#define FSA9480_REG_TIMING1 0x08
-#define FSA9480_REG_TIMING2 0x09
-#define FSA9480_REG_DEV_T1 0x0a
-#define FSA9480_REG_DEV_T2 0x0b
-#define FSA9480_REG_BTN1 0x0c
-#define FSA9480_REG_BTN2 0x0d
-#define FSA9480_REG_CK 0x0e
-#define FSA9480_REG_CK_INT1 0x0f
-#define FSA9480_REG_CK_INT2 0x10
-#define FSA9480_REG_CK_INTMASK1 0x11
-#define FSA9480_REG_CK_INTMASK2 0x12
-#define FSA9480_REG_MANSW1 0x13
-#define FSA9480_REG_MANSW2 0x14
-
-/* Control */
-#define CON_SWITCH_OPEN (1 << 4)
-#define CON_RAW_DATA (1 << 3)
-#define CON_MANUAL_SW (1 << 2)
-#define CON_WAIT (1 << 1)
-#define CON_INT_MASK (1 << 0)
-#define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \
- CON_MANUAL_SW | CON_WAIT)
-
-/* Device Type 1 */
-#define DEV_USB_OTG (1 << 7)
-#define DEV_DEDICATED_CHG (1 << 6)
-#define DEV_USB_CHG (1 << 5)
-#define DEV_CAR_KIT (1 << 4)
-#define DEV_UART (1 << 3)
-#define DEV_USB (1 << 2)
-#define DEV_AUDIO_2 (1 << 1)
-#define DEV_AUDIO_1 (1 << 0)
-
-#define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB)
-#define DEV_T1_UART_MASK (DEV_UART)
-#define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG)
-
-/* Device Type 2 */
-#define DEV_AV (1 << 6)
-#define DEV_TTY (1 << 5)
-#define DEV_PPD (1 << 4)
-#define DEV_JIG_UART_OFF (1 << 3)
-#define DEV_JIG_UART_ON (1 << 2)
-#define DEV_JIG_USB_OFF (1 << 1)
-#define DEV_JIG_USB_ON (1 << 0)
-
-#define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON)
-#define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
-#define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \
- DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
-
-/*
- * Manual Switch
- * D- [7:5] / D+ [4:2]
- * 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO
- */
-#define SW_VAUDIO ((4 << 5) | (4 << 2))
-#define SW_UART ((3 << 5) | (3 << 2))
-#define SW_AUDIO ((2 << 5) | (2 << 2))
-#define SW_DHOST ((1 << 5) | (1 << 2))
-#define SW_AUTO ((0 << 5) | (0 << 2))
-
-/* Interrupt 1 */
-#define INT_DETACH (1 << 1)
-#define INT_ATTACH (1 << 0)
-
-struct fsa9480_usbsw {
- struct i2c_client *client;
- struct fsa9480_platform_data *pdata;
- int dev1;
- int dev2;
- int mansw;
-};
-
-static struct fsa9480_usbsw *chip;
-
-static int fsa9480_write_reg(struct i2c_client *client,
- int reg, int value)
-{
- int ret;
-
- ret = i2c_smbus_write_byte_data(client, reg, value);
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
-}
-
-static int fsa9480_read_reg(struct i2c_client *client, int reg)
-{
- int ret;
-
- ret = i2c_smbus_read_byte_data(client, reg);
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
-}
-
-static int fsa9480_read_irq(struct i2c_client *client, int *value)
-{
- int ret;
-
- ret = i2c_smbus_read_i2c_block_data(client,
- FSA9480_REG_INT1, 2, (u8 *)value);
- *value &= 0xffff;
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
-}
-
-static void fsa9480_set_switch(const char *buf)
-{
- struct fsa9480_usbsw *usbsw = chip;
- struct i2c_client *client = usbsw->client;
- unsigned int value;
- unsigned int path = 0;
-
- value = fsa9480_read_reg(client, FSA9480_REG_CTRL);
-
- if (!strncmp(buf, "VAUDIO", 6)) {
- path = SW_VAUDIO;
- value &= ~CON_MANUAL_SW;
- } else if (!strncmp(buf, "UART", 4)) {
- path = SW_UART;
- value &= ~CON_MANUAL_SW;
- } else if (!strncmp(buf, "AUDIO", 5)) {
- path = SW_AUDIO;
- value &= ~CON_MANUAL_SW;
- } else if (!strncmp(buf, "DHOST", 5)) {
- path = SW_DHOST;
- value &= ~CON_MANUAL_SW;
- } else if (!strncmp(buf, "AUTO", 4)) {
- path = SW_AUTO;
- value |= CON_MANUAL_SW;
- } else {
- printk(KERN_ERR "Wrong command\n");
- return;
- }
-
- usbsw->mansw = path;
- fsa9480_write_reg(client, FSA9480_REG_MANSW1, path);
- fsa9480_write_reg(client, FSA9480_REG_CTRL, value);
-}
-
-static ssize_t fsa9480_get_switch(char *buf)
-{
- struct fsa9480_usbsw *usbsw = chip;
- struct i2c_client *client = usbsw->client;
- unsigned int value;
-
- value = fsa9480_read_reg(client, FSA9480_REG_MANSW1);
-
- if (value == SW_VAUDIO)
- return sprintf(buf, "VAUDIO\n");
- else if (value == SW_UART)
- return sprintf(buf, "UART\n");
- else if (value == SW_AUDIO)
- return sprintf(buf, "AUDIO\n");
- else if (value == SW_DHOST)
- return sprintf(buf, "DHOST\n");
- else if (value == SW_AUTO)
- return sprintf(buf, "AUTO\n");
- else
- return sprintf(buf, "%x", value);
-}
-
-static ssize_t fsa9480_show_device(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct fsa9480_usbsw *usbsw = dev_get_drvdata(dev);
- struct i2c_client *client = usbsw->client;
- int dev1, dev2;
-
- dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
- dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
-
- if (!dev1 && !dev2)
- return sprintf(buf, "NONE\n");
-
- /* USB */
- if (dev1 & DEV_T1_USB_MASK || dev2 & DEV_T2_USB_MASK)
- return sprintf(buf, "USB\n");
-
- /* UART */
- if (dev1 & DEV_T1_UART_MASK || dev2 & DEV_T2_UART_MASK)
- return sprintf(buf, "UART\n");
-
- /* CHARGER */
- if (dev1 & DEV_T1_CHARGER_MASK)
- return sprintf(buf, "CHARGER\n");
-
- /* JIG */
- if (dev2 & DEV_T2_JIG_MASK)
- return sprintf(buf, "JIG\n");
-
- return sprintf(buf, "UNKNOWN\n");
-}
-
-static ssize_t fsa9480_show_manualsw(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return fsa9480_get_switch(buf);
-
-}
-
-static ssize_t fsa9480_set_manualsw(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- fsa9480_set_switch(buf);
-
- return count;
-}
-
-static DEVICE_ATTR(device, S_IRUGO, fsa9480_show_device, NULL);
-static DEVICE_ATTR(switch, S_IRUGO | S_IWUSR,
- fsa9480_show_manualsw, fsa9480_set_manualsw);
-
-static struct attribute *fsa9480_attributes[] = {
- &dev_attr_device.attr,
- &dev_attr_switch.attr,
- NULL
-};
-
-static const struct attribute_group fsa9480_group = {
- .attrs = fsa9480_attributes,
-};
-
-static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw, int intr)
-{
- int val1, val2, ctrl;
- struct fsa9480_platform_data *pdata = usbsw->pdata;
- struct i2c_client *client = usbsw->client;
-
- val1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
- val2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
- ctrl = fsa9480_read_reg(client, FSA9480_REG_CTRL);
-
- dev_info(&client->dev, "intr: 0x%x, dev1: 0x%x, dev2: 0x%x\n",
- intr, val1, val2);
-
- if (!intr)
- goto out;
-
- if (intr & INT_ATTACH) { /* Attached */
- /* USB */
- if (val1 & DEV_T1_USB_MASK || val2 & DEV_T2_USB_MASK) {
- if (pdata->usb_cb)
- pdata->usb_cb(FSA9480_ATTACHED);
-
- if (usbsw->mansw) {
- fsa9480_write_reg(client,
- FSA9480_REG_MANSW1, usbsw->mansw);
- }
- }
-
- /* UART */
- if (val1 & DEV_T1_UART_MASK || val2 & DEV_T2_UART_MASK) {
- if (pdata->uart_cb)
- pdata->uart_cb(FSA9480_ATTACHED);
-
- if (!(ctrl & CON_MANUAL_SW)) {
- fsa9480_write_reg(client,
- FSA9480_REG_MANSW1, SW_UART);
- }
- }
-
- /* CHARGER */
- if (val1 & DEV_T1_CHARGER_MASK) {
- if (pdata->charger_cb)
- pdata->charger_cb(FSA9480_ATTACHED);
- }
-
- /* JIG */
- if (val2 & DEV_T2_JIG_MASK) {
- if (pdata->jig_cb)
- pdata->jig_cb(FSA9480_ATTACHED);
- }
- } else if (intr & INT_DETACH) { /* Detached */
- /* USB */
- if (usbsw->dev1 & DEV_T1_USB_MASK ||
- usbsw->dev2 & DEV_T2_USB_MASK) {
- if (pdata->usb_cb)
- pdata->usb_cb(FSA9480_DETACHED);
- }
-
- /* UART */
- if (usbsw->dev1 & DEV_T1_UART_MASK ||
- usbsw->dev2 & DEV_T2_UART_MASK) {
- if (pdata->uart_cb)
- pdata->uart_cb(FSA9480_DETACHED);
- }
-
- /* CHARGER */
- if (usbsw->dev1 & DEV_T1_CHARGER_MASK) {
- if (pdata->charger_cb)
- pdata->charger_cb(FSA9480_DETACHED);
- }
-
- /* JIG */
- if (usbsw->dev2 & DEV_T2_JIG_MASK) {
- if (pdata->jig_cb)
- pdata->jig_cb(FSA9480_DETACHED);
- }
- }
-
- usbsw->dev1 = val1;
- usbsw->dev2 = val2;
-
-out:
- ctrl &= ~CON_INT_MASK;
- fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl);
-}
-
-static irqreturn_t fsa9480_irq_handler(int irq, void *data)
-{
- struct fsa9480_usbsw *usbsw = data;
- struct i2c_client *client = usbsw->client;
- int intr;
-
- /* clear interrupt */
- fsa9480_read_irq(client, &intr);
-
- /* device detection */
- fsa9480_detect_dev(usbsw, intr);
-
- return IRQ_HANDLED;
-}
-
-static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw)
-{
- struct fsa9480_platform_data *pdata = usbsw->pdata;
- struct i2c_client *client = usbsw->client;
- int ret;
- int intr;
- unsigned int ctrl = CON_MASK;
-
- /* clear interrupt */
- fsa9480_read_irq(client, &intr);
-
- /* unmask interrupt (attach/detach only) */
- fsa9480_write_reg(client, FSA9480_REG_INT1_MASK, 0xfc);
- fsa9480_write_reg(client, FSA9480_REG_INT2_MASK, 0x1f);
-
- usbsw->mansw = fsa9480_read_reg(client, FSA9480_REG_MANSW1);
-
- if (usbsw->mansw)
- ctrl &= ~CON_MANUAL_SW; /* Manual Switching Mode */
-
- fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl);
-
- if (pdata && pdata->cfg_gpio)
- pdata->cfg_gpio();
-
- if (client->irq) {
- ret = request_threaded_irq(client->irq, NULL,
- fsa9480_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "fsa9480 micro USB", usbsw);
- if (ret) {
- dev_err(&client->dev, "failed to request IRQ\n");
- return ret;
- }
-
- if (pdata)
- device_init_wakeup(&client->dev, pdata->wakeup);
- }
-
- return 0;
-}
-
-static int fsa9480_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct fsa9480_usbsw *usbsw;
- int ret = 0;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -EIO;
-
- usbsw = kzalloc(sizeof(struct fsa9480_usbsw), GFP_KERNEL);
- if (!usbsw) {
- dev_err(&client->dev, "failed to allocate driver data\n");
- return -ENOMEM;
- }
-
- usbsw->client = client;
- usbsw->pdata = client->dev.platform_data;
-
- chip = usbsw;
-
- i2c_set_clientdata(client, usbsw);
-
- ret = fsa9480_irq_init(usbsw);
- if (ret)
- goto fail1;
-
- ret = sysfs_create_group(&client->dev.kobj, &fsa9480_group);
- if (ret) {
- dev_err(&client->dev,
- "failed to create fsa9480 attribute group\n");
- goto fail2;
- }
-
- /* ADC Detect Time: 500ms */
- fsa9480_write_reg(client, FSA9480_REG_TIMING1, 0x6);
-
- if (chip->pdata->reset_cb)
- chip->pdata->reset_cb();
-
- /* device detection */
- fsa9480_detect_dev(usbsw, INT_ATTACH);
-
- pm_runtime_set_active(&client->dev);
-
- return 0;
-
-fail2:
- if (client->irq)
- free_irq(client->irq, usbsw);
-fail1:
- kfree(usbsw);
- return ret;
-}
-
-static int fsa9480_remove(struct i2c_client *client)
-{
- struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
-
- if (client->irq)
- free_irq(client->irq, usbsw);
-
- sysfs_remove_group(&client->dev.kobj, &fsa9480_group);
- device_init_wakeup(&client->dev, 0);
- kfree(usbsw);
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-
-static int fsa9480_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
- struct fsa9480_platform_data *pdata = usbsw->pdata;
-
- if (device_may_wakeup(&client->dev) && client->irq)
- enable_irq_wake(client->irq);
-
- if (pdata->usb_power)
- pdata->usb_power(0);
-
- return 0;
-}
-
-static int fsa9480_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
- int dev1, dev2;
-
- if (device_may_wakeup(&client->dev) && client->irq)
- disable_irq_wake(client->irq);
-
- /*
- * Clear Pending interrupt. Note that detect_dev does what
- * the interrupt handler does. So, we don't miss pending and
- * we reenable interrupt if there is one.
- */
- fsa9480_read_reg(client, FSA9480_REG_INT1);
- fsa9480_read_reg(client, FSA9480_REG_INT2);
-
- dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
- dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
-
- /* device detection */
- fsa9480_detect_dev(usbsw, (dev1 || dev2) ? INT_ATTACH : INT_DETACH);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(fsa9480_pm_ops, fsa9480_suspend, fsa9480_resume);
-#define FSA9480_PM_OPS (&fsa9480_pm_ops)
-
-#else
-
-#define FSA9480_PM_OPS NULL
-
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct i2c_device_id fsa9480_id[] = {
- {"fsa9480", 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, fsa9480_id);
-
-static struct i2c_driver fsa9480_i2c_driver = {
- .driver = {
- .name = "fsa9480",
- .pm = FSA9480_PM_OPS,
- },
- .probe = fsa9480_probe,
- .remove = fsa9480_remove,
- .id_table = fsa9480_id,
-};
-
-module_i2c_driver(fsa9480_i2c_driver);
-
-MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
-MODULE_DESCRIPTION("FSA9480 USB Switch driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig
index a8a608713d26..97f64bcf9fe0 100644
--- a/drivers/misc/genwqe/Kconfig
+++ b/drivers/misc/genwqe/Kconfig
@@ -7,7 +7,6 @@ menuconfig GENWQE
tristate "GenWQE PCIe Accelerator"
depends on PCI && 64BIT
select CRC_ITU_T
- default n
help
Enables PCIe card driver for IBM GenWQE accelerators.
The user-space interface is described in
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c
index f54e7971a762..2c01461701a3 100644
--- a/drivers/misc/habanalabs/asid.c
+++ b/drivers/misc/habanalabs/asid.c
@@ -18,7 +18,7 @@ int hl_asid_init(struct hl_device *hdev)
mutex_init(&hdev->asid_mutex);
- /* ASID 0 is reserved for KMD */
+ /* ASID 0 is reserved for KMD and device CPU */
set_bit(0, hdev->asid_bitmap);
return 0;
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 6fe785e26859..6ad83d5ef4b0 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -682,14 +682,12 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
u32 tmp;
rc = hl_poll_timeout_memory(hdev,
- (u64) (uintptr_t) &ctx->thread_ctx_switch_wait_token,
- jiffies_to_usecs(hdev->timeout_jiffies),
- &tmp);
+ &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
+ 100, jiffies_to_usecs(hdev->timeout_jiffies));
- if (rc || !tmp) {
+ if (rc == -ETIMEDOUT) {
dev_err(hdev->dev,
- "context switch phase didn't finish in time\n");
- rc = -ETIMEDOUT;
+ "context switch phase timeout (%d)\n", tmp);
goto out;
}
}
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index f4c92f110a72..8682590e3f6e 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -31,9 +31,13 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
* Coresight might be still working by accessing addresses
* related to the stopped engines. Hence stop it explicitly.
*/
- hdev->asic_funcs->halt_coresight(hdev);
+ if (hdev->in_debug)
+ hl_device_set_debug_mode(hdev, false);
+
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
+ } else {
+ hl_mmu_ctx_fini(ctx);
}
}
@@ -117,6 +121,11 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
+ rc = hl_mmu_ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to init mmu ctx module\n");
+ goto mem_ctx_err;
+ }
} else {
ctx->asid = hl_asid_alloc(hdev);
if (!ctx->asid) {
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index ba418aaa404c..18e499c900c7 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -355,7 +355,7 @@ static int mmu_show(struct seq_file *s, void *data)
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- struct hl_ctx *ctx = hdev->user_ctx;
+ struct hl_ctx *ctx;
u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
@@ -367,6 +367,11 @@ static int mmu_show(struct seq_file *s, void *data)
if (!hdev->mmu_enable)
return 0;
+ if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
+ ctx = hdev->kernel_ctx;
+ else
+ ctx = hdev->user_ctx;
+
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return 0;
@@ -495,6 +500,36 @@ err:
return -EINVAL;
}
+static int engines_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ hdev->asic_funcs->is_device_idle(hdev, NULL, s);
+
+ return 0;
+}
+
+static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ if (!hdev->mmu_enable)
+ goto out;
+
+ if (hdev->dram_supports_virtual_memory &&
+ addr >= prop->va_space_dram_start_address &&
+ addr < prop->va_space_dram_end_address)
+ return true;
+
+ if (addr >= prop->va_space_host_start_address &&
+ addr < prop->va_space_host_end_address)
+ return true;
+out:
+ return false;
+}
+
static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
u64 *phys_addr)
{
@@ -568,7 +603,6 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
char tmp_buf[32];
u64 addr = entry->addr;
u32 val;
@@ -577,11 +611,8 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
if (*ppos)
return 0;
- if (addr >= prop->va_space_dram_start_address &&
- addr < prop->va_space_dram_end_address &&
- hdev->mmu_enable &&
- hdev->dram_supports_virtual_memory) {
- rc = device_va_to_pa(hdev, entry->addr, &addr);
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
if (rc)
return rc;
}
@@ -602,7 +633,6 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 addr = entry->addr;
u32 value;
ssize_t rc;
@@ -611,11 +641,8 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
if (rc)
return rc;
- if (addr >= prop->va_space_dram_start_address &&
- addr < prop->va_space_dram_end_address &&
- hdev->mmu_enable &&
- hdev->dram_supports_virtual_memory) {
- rc = device_va_to_pa(hdev, entry->addr, &addr);
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
if (rc)
return rc;
}
@@ -877,6 +904,7 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"userptr", userptr_show, NULL},
{"vm", vm_show, NULL},
{"mmu", mmu_show, mmu_write},
+ {"engines", engines_show, NULL}
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 0b19d3eefb98..0c4894dd9c02 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -231,6 +231,7 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->fd_open_cnt_lock);
mutex_init(&hdev->send_cpu_message_lock);
+ mutex_init(&hdev->debug_lock);
mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
spin_lock_init(&hdev->hw_queues_mirror_lock);
@@ -262,6 +263,7 @@ early_fini:
static void device_early_fini(struct hl_device *hdev)
{
mutex_destroy(&hdev->mmu_cache_lock);
+ mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
@@ -324,7 +326,15 @@ static int device_late_init(struct hl_device *hdev)
{
int rc;
- INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
+ if (hdev->asic_funcs->late_init) {
+ rc = hdev->asic_funcs->late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed late initialization for the H/W\n");
+ return rc;
+ }
+ }
+
hdev->high_pll = hdev->asic_prop.high_pll;
/* force setting to low frequency */
@@ -335,17 +345,9 @@ static int device_late_init(struct hl_device *hdev)
else
hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
- if (hdev->asic_funcs->late_init) {
- rc = hdev->asic_funcs->late_init(hdev);
- if (rc) {
- dev_err(hdev->dev,
- "failed late initialization for the H/W\n");
- return rc;
- }
- }
-
+ INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
schedule_delayed_work(&hdev->work_freq,
- usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
if (hdev->heartbeat) {
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
@@ -420,6 +422,52 @@ int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
return 1;
}
+int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
+{
+ int rc = 0;
+
+ mutex_lock(&hdev->debug_lock);
+
+ if (!enable) {
+ if (!hdev->in_debug) {
+ dev_err(hdev->dev,
+ "Failed to disable debug mode because device was not in debug mode\n");
+ rc = -EFAULT;
+ goto out;
+ }
+
+ hdev->asic_funcs->halt_coresight(hdev);
+ hdev->in_debug = 0;
+
+ goto out;
+ }
+
+ if (hdev->in_debug) {
+ dev_err(hdev->dev,
+ "Failed to enable debug mode because device is already in debug mode\n");
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mutex_lock(&hdev->fd_open_cnt_lock);
+
+ if (atomic_read(&hdev->fd_open_cnt) > 1) {
+ dev_err(hdev->dev,
+ "Failed to enable debug mode. More then a single user is using the device\n");
+ rc = -EPERM;
+ goto unlock_fd_open_lock;
+ }
+
+ hdev->in_debug = 1;
+
+unlock_fd_open_lock:
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+out:
+ mutex_unlock(&hdev->debug_lock);
+
+ return rc;
+}
+
/*
* hl_device_suspend - initiate device suspend
*
@@ -647,13 +695,6 @@ again:
hdev->hard_reset_pending = true;
- if (!hdev->pdev) {
- dev_err(hdev->dev,
- "Reset action is NOT supported in simulator\n");
- rc = -EINVAL;
- goto out_err;
- }
-
device_reset_work = kzalloc(sizeof(*device_reset_work),
GFP_ATOMIC);
if (!device_reset_work) {
@@ -704,6 +745,7 @@ again:
if (hard_reset) {
hl_vm_fini(hdev);
+ hl_mmu_fini(hdev);
hl_eq_reset(hdev, &hdev->event_queue);
}
@@ -731,6 +773,13 @@ again:
goto out_err;
}
+ rc = hl_mmu_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to initialize MMU S/W after hard reset\n");
+ goto out_err;
+ }
+
/* Allocate the kernel context */
hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
GFP_KERNEL);
@@ -902,11 +951,18 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto cq_fini;
}
+ /* MMU S/W must be initialized before kernel context is created */
+ rc = hl_mmu_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
+ goto eq_fini;
+ }
+
/* Allocate the kernel context */
hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
- goto eq_fini;
+ goto mmu_fini;
}
hdev->user_ctx = NULL;
@@ -954,8 +1010,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto out_disabled;
}
- /* After test_queues, KMD can start sending messages to device CPU */
-
rc = device_late_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed late initialization\n");
@@ -1001,6 +1055,8 @@ release_ctx:
"kernel ctx is still alive on initialization failure\n");
free_ctx:
kfree(hdev->kernel_ctx);
+mmu_fini:
+ hl_mmu_fini(hdev);
eq_fini:
hl_eq_fini(hdev, &hdev->event_queue);
cq_fini:
@@ -1105,6 +1161,8 @@ void hl_device_fini(struct hl_device *hdev)
hl_vm_fini(hdev);
+ hl_mmu_fini(hdev);
+
hl_eq_fini(hdev, &hdev->event_queue);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
@@ -1126,95 +1184,6 @@ void hl_device_fini(struct hl_device *hdev)
}
/*
- * hl_poll_timeout_memory - Periodically poll a host memory address
- * until it is not zero or a timeout occurs
- * @hdev: pointer to habanalabs device structure
- * @addr: Address to poll
- * @timeout_us: timeout in us
- * @val: Variable to read the value into
- *
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
- *
- * The function sleeps for 100us with timeout value of
- * timeout_us
- */
-int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr,
- u32 timeout_us, u32 *val)
-{
- /*
- * address in this function points always to a memory location in the
- * host's (server's) memory. That location is updated asynchronously
- * either by the direct access of the device or by another core
- */
- u32 *paddr = (u32 *) (uintptr_t) addr;
- ktime_t timeout;
-
- /* timeout should be longer when working with simulator */
- if (!hdev->pdev)
- timeout_us *= 10;
-
- timeout = ktime_add_us(ktime_get(), timeout_us);
-
- might_sleep();
-
- for (;;) {
- /*
- * Flush CPU read/write buffers to make sure we read updates
- * done by other cores or by the device
- */
- mb();
- *val = *paddr;
- if (*val)
- break;
- if (ktime_compare(ktime_get(), timeout) > 0) {
- *val = *paddr;
- break;
- }
- usleep_range((100 >> 2) + 1, 100);
- }
-
- return *val ? 0 : -ETIMEDOUT;
-}
-
-/*
- * hl_poll_timeout_devicememory - Periodically poll a device memory address
- * until it is not zero or a timeout occurs
- * @hdev: pointer to habanalabs device structure
- * @addr: Device address to poll
- * @timeout_us: timeout in us
- * @val: Variable to read the value into
- *
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
- *
- * The function sleeps for 100us with timeout value of
- * timeout_us
- */
-int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
- u32 timeout_us, u32 *val)
-{
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
-
- might_sleep();
-
- for (;;) {
- *val = readl(addr);
- if (*val)
- break;
- if (ktime_compare(ktime_get(), timeout) > 0) {
- *val = readl(addr);
- break;
- }
- usleep_range((100 >> 2) + 1, 100);
- }
-
- return *val ? 0 : -ETIMEDOUT;
-}
-
-/*
* MMIO register access helper functions.
*/
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index eda5d7fcb79f..cc8168bacb24 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -29,13 +29,13 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
rc = request_firmware(&fw, fw_name, hdev->dev);
if (rc) {
- dev_err(hdev->dev, "Failed to request %s\n", fw_name);
+ dev_err(hdev->dev, "Firmware file %s is not found!\n", fw_name);
goto out;
}
fw_size = fw->size;
if ((fw_size % 4) != 0) {
- dev_err(hdev->dev, "illegal %s firmware size %zu\n",
+ dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
fw_name, fw_size);
rc = -EINVAL;
goto out;
@@ -85,12 +85,6 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u32 tmp;
int rc = 0;
- if (len > HL_CPU_CB_SIZE) {
- dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
- len);
- return -ENOMEM;
- }
-
pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
&pkt_dma_addr);
if (!pkt) {
@@ -117,33 +111,28 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
goto out;
}
- rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
- timeout, &tmp);
+ rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
+ (tmp == ARMCP_PACKET_FENCE_VAL), 1000, timeout);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
+ dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
hdev->device_cpu_disabled = true;
goto out;
}
- if (tmp == ARMCP_PACKET_FENCE_VAL) {
- u32 ctl = le32_to_cpu(pkt->ctl);
+ tmp = le32_to_cpu(pkt->ctl);
- rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
- if (rc) {
- dev_err(hdev->dev,
- "F/W ERROR %d for CPU packet %d\n",
- rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
+ rc = (tmp & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
+ if (rc) {
+ dev_err(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
+ rc,
+ (tmp & ARMCP_PKT_CTL_OPCODE_MASK)
>> ARMCP_PKT_CTL_OPCODE_SHIFT);
- rc = -EINVAL;
- } else if (result) {
- *result = (long) le64_to_cpu(pkt->result);
- }
- } else {
- dev_err(hdev->dev, "CPU packet wrong fence value\n");
- rc = -EINVAL;
+ rc = -EIO;
+ } else if (result) {
+ *result = (long) le64_to_cpu(pkt->result);
}
out:
@@ -186,9 +175,6 @@ void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
{
u64 kernel_addr;
- /* roundup to HL_CPU_PKT_SIZE */
- size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
-
kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
*dma_handle = hdev->cpu_accessible_dma_address +
@@ -200,9 +186,6 @@ void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr)
{
- /* roundup to HL_CPU_PKT_SIZE */
- size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
-
gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
size);
}
@@ -256,7 +239,7 @@ int hl_fw_armcp_info_get(struct hl_device *hdev)
HL_ARMCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
- "Failed to send armcp info pkt, error %d\n", rc);
+ "Failed to send ArmCP info pkt, error %d\n", rc);
goto out;
}
@@ -291,7 +274,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
max_size, &eeprom_info_dma_addr);
if (!eeprom_info_cpu_addr) {
dev_err(hdev->dev,
- "Failed to allocate DMA memory for EEPROM info packet\n");
+ "Failed to allocate DMA memory for ArmCP EEPROM packet\n");
return -ENOMEM;
}
@@ -307,7 +290,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
if (rc) {
dev_err(hdev->dev,
- "Failed to send armcp EEPROM pkt, error %d\n", rc);
+ "Failed to send ArmCP EEPROM packet, error %d\n", rc);
goto out;
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 02d116b01a1a..75294ec65257 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -14,6 +14,8 @@
#include <linux/genalloc.h>
#include <linux/hwmon.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
/*
* GOYA security scheme:
@@ -89,6 +91,30 @@
#define GOYA_CB_POOL_CB_CNT 512
#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
+#define IS_QM_IDLE(engine, qm_glbl_sts0) \
+ (((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK)
+#define IS_DMA_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(DMA, qm_glbl_sts0)
+#define IS_TPC_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(TPC, qm_glbl_sts0)
+#define IS_MME_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(MME, qm_glbl_sts0)
+
+#define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \
+ (((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \
+ engine##_CMDQ_IDLE_MASK)
+#define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \
+ IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0)
+#define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \
+ IS_CMDQ_IDLE(MME, cmdq_glbl_sts0)
+
+#define IS_DMA_IDLE(dma_core_sts0) \
+ !((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK)
+
+#define IS_TPC_IDLE(tpc_cfg_sts) \
+ (((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK)
+
+#define IS_MME_IDLE(mme_arch_sts) \
+ (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
+
+
static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
"goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
"goya cq 4", "goya cpu eq"
@@ -297,6 +323,11 @@ static u32 goya_all_events[] = {
GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
};
+static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
+static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
+static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
+static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
+
void goya_get_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -467,7 +498,7 @@ static int goya_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
- rc = hl_pci_init(hdev, 39);
+ rc = hl_pci_init(hdev, 48);
if (rc)
return rc;
@@ -539,9 +570,36 @@ int goya_late_init(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
+ goya_fetch_psoc_frequency(hdev);
+
+ rc = goya_mmu_clear_pgt_range(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to clear MMU page tables range %d\n", rc);
+ return rc;
+ }
+
+ rc = goya_mmu_set_dram_default_page(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
+ return rc;
+ }
+
+ rc = goya_mmu_add_mappings_for_device_cpu(hdev);
+ if (rc)
+ return rc;
+
+ rc = goya_init_cpu_queues(hdev);
+ if (rc)
+ return rc;
+
+ rc = goya_test_cpu_queue(hdev);
+ if (rc)
+ return rc;
+
rc = goya_armcp_info_get(hdev);
if (rc) {
- dev_err(hdev->dev, "Failed to get armcp info\n");
+ dev_err(hdev->dev, "Failed to get armcp info %d\n", rc);
return rc;
}
@@ -553,33 +611,15 @@ int goya_late_init(struct hl_device *hdev)
rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
if (rc) {
- dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ dev_err(hdev->dev,
+ "Failed to enable PCI access from CPU %d\n", rc);
return rc;
}
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
- goya_fetch_psoc_frequency(hdev);
-
- rc = goya_mmu_clear_pgt_range(hdev);
- if (rc) {
- dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
- goto disable_pci_access;
- }
-
- rc = goya_mmu_set_dram_default_page(hdev);
- if (rc) {
- dev_err(hdev->dev, "Failed to set DRAM default page\n");
- goto disable_pci_access;
- }
-
return 0;
-
-disable_pci_access:
- hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
-
- return rc;
}
/*
@@ -655,7 +695,10 @@ static int goya_sw_init(struct hl_device *hdev)
goto free_dma_pool;
}
- hdev->cpu_accessible_dma_pool = gen_pool_create(HL_CPU_PKT_SHIFT, -1);
+ dev_dbg(hdev->dev, "cpu accessible memory at bus address 0x%llx\n",
+ hdev->cpu_accessible_dma_address);
+
+ hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
if (!hdev->cpu_accessible_dma_pool) {
dev_err(hdev->dev,
"Failed to create CPU accessible DMA pool\n");
@@ -786,7 +829,6 @@ static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
else
sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
- WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr));
WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
}
@@ -973,9 +1015,9 @@ int goya_init_cpu_queues(struct hl_device *hdev)
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(eq->bus_address));
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8,
- lower_32_bits(hdev->cpu_accessible_dma_address));
+ lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9,
- upper_32_bits(hdev->cpu_accessible_dma_address));
+ upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
@@ -1001,7 +1043,7 @@ int goya_init_cpu_queues(struct hl_device *hdev)
if (err) {
dev_err(hdev->dev,
- "Failed to communicate with ARM CPU (ArmCP timeout)\n");
+ "Failed to setup communication with device CPU\n");
return -EIO;
}
@@ -2061,10 +2103,12 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
goya_disable_external_queues(hdev);
goya_disable_internal_queues(hdev);
- if (hard_reset)
+ if (hard_reset) {
goya_disable_msix(hdev);
- else
+ goya_mmu_remove_device_cpu_mappings(hdev);
+ } else {
goya_sync_irqs(hdev);
+ }
}
/*
@@ -2277,14 +2321,14 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
- if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
- goto out;
-
if (!hdev->fw_loading) {
dev_info(hdev->dev, "Skip loading FW\n");
goto out;
}
+ if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
+ goto out;
+
rc = goya_push_linux_to_device(hdev);
if (rc)
return rc;
@@ -2466,34 +2510,11 @@ static int goya_hw_init(struct hl_device *hdev)
if (rc)
goto disable_queues;
- rc = goya_init_cpu_queues(hdev);
- if (rc) {
- dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
- rc);
- goto disable_msix;
- }
-
- /*
- * Check if we managed to set the DMA mask to more then 32 bits. If so,
- * let's try to increase it again because in Goya we set the initial
- * dma mask to less then 39 bits so that the allocation of the memory
- * area for the device's cpu will be under 39 bits
- */
- if (hdev->dma_mask > 32) {
- rc = hl_pci_set_dma_mask(hdev, 48);
- if (rc)
- goto disable_pci_access;
- }
-
/* Perform read from the device to flush all MSI-X configuration */
val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
return 0;
-disable_pci_access:
- hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
-disable_msix:
- goya_disable_msix(hdev);
disable_queues:
goya_disable_internal_queues(hdev);
goya_disable_external_queues(hdev);
@@ -2629,7 +2650,6 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
{
u32 db_reg_offset, db_value;
- bool invalid_queue = false;
switch (hw_queue_id) {
case GOYA_QUEUE_ID_DMA_0:
@@ -2653,10 +2673,7 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
break;
case GOYA_QUEUE_ID_CPU_PQ:
- if (hdev->cpu_queues_enable)
- db_reg_offset = mmCPU_IF_PF_PQ_PI;
- else
- invalid_queue = true;
+ db_reg_offset = mmCPU_IF_PF_PQ_PI;
break;
case GOYA_QUEUE_ID_MME:
@@ -2696,12 +2713,8 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
break;
default:
- invalid_queue = true;
- }
-
- if (invalid_queue) {
/* Should never get here */
- dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
+ dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
hw_queue_id);
return;
}
@@ -2808,7 +2821,6 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
dma_addr_t fence_dma_addr;
struct hl_cb *cb;
u32 tmp, timeout;
- char buf[16] = {};
int rc;
if (hdev->pldm)
@@ -2816,10 +2828,9 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
else
timeout = HL_DEVICE_TIMEOUT_USEC;
- if (!hdev->asic_funcs->is_device_idle(hdev, buf, sizeof(buf))) {
+ if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
dev_err_ratelimited(hdev->dev,
- "Can't send KMD job on QMAN0 because %s is busy\n",
- buf);
+ "Can't send KMD job on QMAN0 because the device is not idle\n");
return -EBUSY;
}
@@ -2831,16 +2842,8 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
return -ENOMEM;
}
- *fence_ptr = 0;
-
goya_qman0_set_security(hdev, true);
- /*
- * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
- * synchronized kernel jobs we only need space for 1 packet_msg_prot
- */
- job->job_cb_size -= sizeof(struct packet_msg_prot);
-
cb = job->patched_cb;
fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
@@ -2860,14 +2863,14 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
goto free_fence_ptr;
}
- rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout,
- &tmp);
+ rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
+ (tmp == GOYA_QMAN0_FENCE_VAL), 1000, timeout);
hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
- if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
- dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
- rc = -ETIMEDOUT;
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
+ goto free_fence_ptr;
}
free_fence_ptr:
@@ -2941,20 +2944,19 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
goto free_pkt;
}
- rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
- GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
+ rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
+ 1000, GOYA_TEST_QUEUE_WAIT_USEC);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
- if ((!rc) && (tmp == fence_val)) {
- dev_info(hdev->dev,
- "queue test on H/W queue %d succeeded\n",
- hw_queue_id);
- } else {
+ if (rc == -ETIMEDOUT) {
dev_err(hdev->dev,
"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
- rc = -EINVAL;
+ rc = -EIO;
+ } else {
+ dev_info(hdev->dev, "queue test on H/W queue %d succeeded\n",
+ hw_queue_id);
}
free_pkt:
@@ -2990,12 +2992,6 @@ int goya_test_queues(struct hl_device *hdev)
ret_val = -EINVAL;
}
- if (hdev->cpu_queues_enable) {
- rc = goya_test_cpu_queue(hdev);
- if (rc)
- ret_val = -EINVAL;
- }
-
return ret_val;
}
@@ -3028,7 +3024,13 @@ static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle)
{
- return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+ void *vaddr;
+
+ vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+ *dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
+ VA_CPU_ACCESSIBLE_MEM_ADDR;
+
+ return vaddr;
}
void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
@@ -3907,8 +3909,8 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
return goya_parse_cb_no_mmu(hdev, parser);
}
-void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
- u32 cq_val, u32 msix_vec)
+void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
+ u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec)
{
struct packet_msg_prot *cq_pkt;
u32 tmp;
@@ -3939,6 +3941,11 @@ void goya_update_eq_ci(struct hl_device *hdev, u32 val)
void goya_restore_phase_topology(struct hl_device *hdev)
{
+
+}
+
+static void goya_clear_sm_regs(struct hl_device *hdev)
+{
int i, num_of_sob_in_longs, num_of_mon_in_longs;
num_of_sob_in_longs =
@@ -3958,10 +3965,11 @@ void goya_restore_phase_topology(struct hl_device *hdev)
}
/*
- * goya_debugfs_read32 - read a 32bit value from a given device address
+ * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped
+ * address.
*
* @hdev: pointer to hl_device structure
- * @addr: address in device
+ * @addr: device or host mapped address
* @val: returned value
*
* In case of DDR address that is not mapped into the default aperture that
@@ -4002,6 +4010,10 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
}
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
+
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
+
} else {
rc = -EFAULT;
}
@@ -4010,10 +4022,11 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
}
/*
- * goya_debugfs_write32 - write a 32bit value to a given device address
+ * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped
+ * address.
*
* @hdev: pointer to hl_device structure
- * @addr: address in device
+ * @addr: device or host mapped address
* @val: returned value
*
* In case of DDR address that is not mapped into the default aperture that
@@ -4054,6 +4067,10 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
}
if (ddr_bar_addr == U64_MAX)
rc = -EIO;
+
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+
} else {
rc = -EFAULT;
}
@@ -4086,6 +4103,47 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
static const char *_goya_get_event_desc(u16 event_type)
{
switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_PCIE_IF:
+ return "PCIe_if";
+ case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
+ return "TPC%d_ecc";
+ case GOYA_ASYNC_EVENT_ID_MME_ECC:
+ return "MME_ecc";
+ case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
+ return "MME_ecc_ext";
+ case GOYA_ASYNC_EVENT_ID_MMU_ECC:
+ return "MMU_ecc";
+ case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
+ return "DMA_macro";
+ case GOYA_ASYNC_EVENT_ID_DMA_ECC:
+ return "DMA_ecc";
+ case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
+ return "CPU_if_ecc";
+ case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
+ return "PSOC_mem";
+ case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
+ return "PSOC_coresight";
+ case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
+ return "SRAM%d";
+ case GOYA_ASYNC_EVENT_ID_GIC500:
+ return "GIC500";
+ case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
+ return "PLL%d";
+ case GOYA_ASYNC_EVENT_ID_AXI_ECC:
+ return "AXI_ecc";
+ case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
+ return "L2_ram_ecc";
+ case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
+ return "PSOC_gpio_05_sw_reset";
+ case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
+ return "PSOC_gpio_10_vrhot_icrit";
case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
return "PCIe_dec";
case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
@@ -4128,6 +4186,17 @@ static const char *_goya_get_event_desc(u16 event_type)
return "DMA%d_qm";
case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
return "DMA%d_ch";
+ case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
+ return "TPC%d_bmon_spmu";
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
+ return "DMA_bm_ch%d";
default:
return "N/A";
}
@@ -4138,6 +4207,25 @@ static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
u8 index;
switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
+ index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
+ index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
+ index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
@@ -4176,6 +4264,21 @@ static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
+ case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
+ index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
+ index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
default:
snprintf(desc, size, _goya_get_event_desc(event_type));
break;
@@ -4226,7 +4329,8 @@ static void goya_print_mmu_error_info(struct hl_device *hdev)
}
}
-static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
+static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
+ bool razwi)
{
char desc[20] = "";
@@ -4234,8 +4338,10 @@ static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
- goya_print_razwi_info(hdev);
- goya_print_mmu_error_info(hdev);
+ if (razwi) {
+ goya_print_razwi_info(hdev);
+ goya_print_mmu_error_info(hdev);
+ }
}
static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
@@ -4339,19 +4445,12 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
case GOYA_ASYNC_EVENT_ID_GIC500:
- case GOYA_ASYNC_EVENT_ID_PLL0:
- case GOYA_ASYNC_EVENT_ID_PLL1:
- case GOYA_ASYNC_EVENT_ID_PLL3:
- case GOYA_ASYNC_EVENT_ID_PLL4:
- case GOYA_ASYNC_EVENT_ID_PLL5:
- case GOYA_ASYNC_EVENT_ID_PLL6:
+ case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
case GOYA_ASYNC_EVENT_ID_AXI_ECC:
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
- dev_err(hdev->dev,
- "Received H/W interrupt %d, reset the chip\n",
- event_type);
+ goya_print_irq_info(hdev, event_type, false);
hl_device_reset(hdev, true, false);
break;
@@ -4382,7 +4481,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
- goya_print_irq_info(hdev, event_type);
+ goya_print_irq_info(hdev, event_type, true);
goya_unmask_irq(hdev, event_type);
break;
@@ -4394,12 +4493,9 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
- case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
- case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
- case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
- case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
- case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
- dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
+ goya_print_irq_info(hdev, event_type, false);
+ goya_unmask_irq(hdev, event_type);
break;
default:
@@ -4418,36 +4514,47 @@ void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
return goya->events_stat;
}
-static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
+static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
u64 val, bool is_dram)
{
struct packet_lin_dma *lin_dma_pkt;
struct hl_cs_job *job;
u32 cb_size, ctl;
struct hl_cb *cb;
- int rc;
+ int rc, lin_dma_pkts_cnt;
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
+ cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
+ sizeof(struct packet_msg_prot);
+ cb = hl_cb_kernel_create(hdev, cb_size);
if (!cb)
- return -EFAULT;
+ return -ENOMEM;
lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
- memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
- cb_size = sizeof(*lin_dma_pkt);
-
- ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
- (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
- (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
- (1 << GOYA_PKT_CTL_RB_SHIFT) |
- (1 << GOYA_PKT_CTL_MB_SHIFT));
- ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
- GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
- lin_dma_pkt->ctl = cpu_to_le32(ctl);
+ do {
+ memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
+
+ ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
+ (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
+ (1 << GOYA_PKT_CTL_RB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT));
+ ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+ lin_dma_pkt->ctl = cpu_to_le32(ctl);
+
+ lin_dma_pkt->src_addr = cpu_to_le64(val);
+ lin_dma_pkt->dst_addr = cpu_to_le64(addr);
+ if (lin_dma_pkts_cnt > 1)
+ lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
+ else
+ lin_dma_pkt->tsize = cpu_to_le32(size);
- lin_dma_pkt->src_addr = cpu_to_le64(val);
- lin_dma_pkt->dst_addr = cpu_to_le64(addr);
- lin_dma_pkt->tsize = cpu_to_le32(size);
+ size -= SZ_2G;
+ addr += SZ_2G;
+ lin_dma_pkt++;
+ } while (--lin_dma_pkts_cnt);
job = hl_cs_allocate_job(hdev, true);
if (!job) {
@@ -4462,8 +4569,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
job->user_cb_size = cb_size;
job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
job->patched_cb = job->user_cb;
- job->job_cb_size = job->user_cb_size +
- sizeof(struct packet_msg_prot) * 2;
+ job->job_cb_size = job->user_cb_size;
hl_debugfs_add_job(hdev, job);
@@ -4485,10 +4591,12 @@ release_cb:
int goya_context_switch(struct hl_device *hdev, u32 asid)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 addr = prop->sram_base_address;
+ u64 addr = prop->sram_base_address, sob_addr;
u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
u64 val = 0x7777777777777777ull;
- int rc;
+ int rc, dma_id;
+ u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
+ mmDMA_CH_0_WR_COMP_ADDR_LO;
rc = goya_memset_device_memory(hdev, addr, size, val, false);
if (rc) {
@@ -4496,13 +4604,27 @@ int goya_context_switch(struct hl_device *hdev, u32 asid)
return rc;
}
+ /* we need to reset registers that the user is allowed to change */
+ sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
+ WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
+
+ for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
+ sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
+ (dma_id - 1) * 4;
+ WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
+ lower_32_bits(sob_addr));
+ }
+
WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
+
goya_mmu_prepare(hdev, asid);
+ goya_clear_sm_regs(hdev);
+
return 0;
}
-int goya_mmu_clear_pgt_range(struct hl_device *hdev)
+static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
@@ -4516,7 +4638,7 @@ int goya_mmu_clear_pgt_range(struct hl_device *hdev)
return goya_memset_device_memory(hdev, addr, size, 0, true);
}
-int goya_mmu_set_dram_default_page(struct hl_device *hdev)
+static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
@@ -4529,7 +4651,123 @@ int goya_mmu_set_dram_default_page(struct hl_device *hdev)
return goya_memset_device_memory(hdev, addr, size, val, true);
}
-void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
+static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+ s64 off, cpu_off;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return 0;
+
+ for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
+ rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off,
+ prop->dram_base_address + off, PAGE_SIZE_2MB);
+ if (rc) {
+ dev_err(hdev->dev, "Map failed for address 0x%llx\n",
+ prop->dram_base_address + off);
+ goto unmap;
+ }
+ }
+
+ if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
+ rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
+ hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Map failed for CPU accessible memory\n");
+ off -= PAGE_SIZE_2MB;
+ goto unmap;
+ }
+ } else {
+ for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
+ rc = hl_mmu_map(hdev->kernel_ctx,
+ VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
+ hdev->cpu_accessible_dma_address + cpu_off,
+ PAGE_SIZE_4KB);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Map failed for CPU accessible memory\n");
+ cpu_off -= PAGE_SIZE_4KB;
+ goto unmap_cpu;
+ }
+ }
+ }
+
+ goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
+ goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
+ WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
+ WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
+
+ /* Make sure configuration is flushed to device */
+ RREG32(mmCPU_IF_AWUSER_OVR_EN);
+
+ goya->device_cpu_mmu_mappings_done = true;
+
+ return 0;
+
+unmap_cpu:
+ for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
+ if (hl_mmu_unmap(hdev->kernel_ctx,
+ VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
+ PAGE_SIZE_4KB))
+ dev_warn_ratelimited(hdev->dev,
+ "failed to unmap address 0x%llx\n",
+ VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
+unmap:
+ for (; off >= 0 ; off -= PAGE_SIZE_2MB)
+ if (hl_mmu_unmap(hdev->kernel_ctx,
+ prop->dram_base_address + off, PAGE_SIZE_2MB))
+ dev_warn_ratelimited(hdev->dev,
+ "failed to unmap address 0x%llx\n",
+ prop->dram_base_address + off);
+
+ return rc;
+}
+
+void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+ u32 off, cpu_off;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ if (!goya->device_cpu_mmu_mappings_done)
+ return;
+
+ WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
+ WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
+
+ if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
+ if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
+ PAGE_SIZE_2MB))
+ dev_warn(hdev->dev,
+ "Failed to unmap CPU accessible memory\n");
+ } else {
+ for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
+ if (hl_mmu_unmap(hdev->kernel_ctx,
+ VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
+ PAGE_SIZE_4KB))
+ dev_warn_ratelimited(hdev->dev,
+ "failed to unmap address 0x%llx\n",
+ VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
+ }
+
+ for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
+ if (hl_mmu_unmap(hdev->kernel_ctx,
+ prop->dram_base_address + off, PAGE_SIZE_2MB))
+ dev_warn_ratelimited(hdev->dev,
+ "Failed to unmap address 0x%llx\n",
+ prop->dram_base_address + off);
+
+ goya->device_cpu_mmu_mappings_done = false;
+}
+
+static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
{
struct goya_device *goya = hdev->asic_specific;
int i;
@@ -4676,57 +4914,82 @@ int goya_armcp_info_get(struct hl_device *hdev)
return 0;
}
-static bool goya_is_device_idle(struct hl_device *hdev, char *buf, size_t size)
+static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
+ struct seq_file *s)
{
- u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
+ const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
+ const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
+ u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
+ mme_arch_sts;
+ bool is_idle = true, is_eng_idle;
+ u64 offset;
int i;
+ if (s)
+ seq_puts(s, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
+ "--- ------- ------------ -------------\n");
+
offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
for (i = 0 ; i < DMA_MAX_NUM ; i++) {
- dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
+ qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset);
+ dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset);
+ is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) &&
+ IS_DMA_IDLE(dma_core_sts0);
+ is_idle &= is_eng_idle;
- if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
- DMA_QM_IDLE_MASK)
- return HL_ENG_BUSY(buf, size, "DMA%d_QM", i);
+ if (mask)
+ *mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i);
+ if (s)
+ seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, dma_core_sts0);
}
+ if (s)
+ seq_puts(s,
+ "\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n"
+ "--- ------- ------------ -------------- ----------\n");
+
offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
for (i = 0 ; i < TPC_MAX_NUM ; i++) {
- tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
- tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
- tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
-
- if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
- TPC_QM_IDLE_MASK)
- return HL_ENG_BUSY(buf, size, "TPC%d_QM", i);
-
- if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
- TPC_CMDQ_IDLE_MASK)
- return HL_ENG_BUSY(buf, size, "TPC%d_CMDQ", i);
-
- if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
- TPC_CFG_IDLE_MASK)
- return HL_ENG_BUSY(buf, size, "TPC%d_CFG", i);
- }
-
- if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
- MME_QM_IDLE_MASK)
- return HL_ENG_BUSY(buf, size, "MME_QM");
-
- if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
- MME_CMDQ_IDLE_MASK)
- return HL_ENG_BUSY(buf, size, "MME_CMDQ");
-
- if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
- MME_ARCH_IDLE_MASK)
- return HL_ENG_BUSY(buf, size, "MME_ARCH");
-
- if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
- return HL_ENG_BUSY(buf, size, "MME");
-
- return true;
+ qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset);
+ cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset);
+ tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset);
+ is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) &&
+ IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) &&
+ IS_TPC_IDLE(tpc_cfg_sts);
+ is_idle &= is_eng_idle;
+
+ if (mask)
+ *mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i);
+ if (s)
+ seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
+ }
+
+ if (s)
+ seq_puts(s,
+ "\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n"
+ "--- ------- ------------ -------------- -----------\n");
+
+ qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0);
+ cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0);
+ mme_arch_sts = RREG32(mmMME_ARCH_STATUS);
+ is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) &&
+ IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) &&
+ IS_MME_IDLE(mme_arch_sts);
+ is_idle &= is_eng_idle;
+
+ if (mask)
+ *mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0;
+ if (s) {
+ seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
+ cmdq_glbl_sts0, mme_arch_sts);
+ seq_puts(s, "\n");
+ }
+
+ return is_idle;
}
static void goya_hw_queues_lock(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index c83cab0d641e..f8c611883dc1 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -126,6 +126,12 @@
#define VA_DDR_SPACE_SIZE (VA_DDR_SPACE_END - \
VA_DDR_SPACE_START) /* 128GB */
+#if (HL_CPU_ACCESSIBLE_MEM_SIZE != SZ_2M)
+#error "HL_CPU_ACCESSIBLE_MEM_SIZE must be exactly 2MB to enable MMU mapping"
+#endif
+
+#define VA_CPU_ACCESSIBLE_MEM_ADDR 0x8000000000ull
+
#define DMA_MAX_TRANSFER_SIZE U32_MAX
#define HW_CAP_PLL 0x00000001
@@ -157,6 +163,7 @@ struct goya_device {
u64 ddr_bar_cur_addr;
u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE];
u32 hw_cap_initialized;
+ u8 device_cpu_mmu_mappings_done;
};
void goya_get_fixed_properties(struct hl_device *hdev);
@@ -204,18 +211,14 @@ int goya_armcp_info_get(struct hl_device *hdev);
int goya_debug_coresight(struct hl_device *hdev, void *data);
void goya_halt_coresight(struct hl_device *hdev);
-void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
-int goya_mmu_clear_pgt_range(struct hl_device *hdev);
-int goya_mmu_set_dram_default_page(struct hl_device *hdev);
-
int goya_suspend(struct hl_device *hdev);
int goya_resume(struct hl_device *hdev);
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
void *goya_get_events_stat(struct hl_device *hdev, u32 *size);
-void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
- u32 cq_val, u32 msix_vec);
+void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
+ u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec);
int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
@@ -225,5 +228,6 @@ void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle);
void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
+void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index d95d1b2f860d..d6ec12b3e692 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -677,6 +677,17 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC0_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC0_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC0_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC0_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_LFSR_POLYNOM & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -684,6 +695,11 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index adef7d9d7488..10da9940ee0d 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -34,6 +34,8 @@
#define HL_ARMCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
#define HL_ARMCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
+
#define HL_MAX_QUEUES 128
#define HL_MAX_JOBS_PER_CS 64
@@ -123,7 +125,7 @@ enum hl_device_hw_state {
/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
- * @armcp_info: received various information from ArmCP regarding the H/W. e.g.
+ * @armcp_info: received various information from ArmCP regarding the H/W, e.g.
* available sensors.
* @uboot_ver: F/W U-boot version.
* @preboot_ver: F/W Preboot version.
@@ -318,18 +320,8 @@ struct hl_cs_job;
#define HL_EQ_LENGTH 64
#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
-#define HL_CPU_PKT_SHIFT 5
-#define HL_CPU_PKT_SIZE (1 << HL_CPU_PKT_SHIFT)
-#define HL_CPU_PKT_MASK (~((1 << HL_CPU_PKT_SHIFT) - 1))
-#define HL_CPU_MAX_PKTS_IN_CB 32
-#define HL_CPU_CB_SIZE (HL_CPU_PKT_SIZE * \
- HL_CPU_MAX_PKTS_IN_CB)
-#define HL_CPU_CB_QUEUE_SIZE (HL_QUEUE_LENGTH * HL_CPU_CB_SIZE)
-
-/* KMD <-> ArmCP shared memory size (EQ + PQ + CPU CB queue) */
-#define HL_CPU_ACCESSIBLE_MEM_SIZE (HL_EQ_SIZE_IN_BYTES + \
- HL_QUEUE_SIZE_IN_BYTES + \
- HL_CPU_CB_QUEUE_SIZE)
+/* KMD <-> ArmCP shared memory size */
+#define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M
/**
* struct hl_hw_queue - describes a H/W transport queue.
@@ -543,8 +535,9 @@ struct hl_asic_funcs {
enum dma_data_direction dir);
u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
struct sg_table *sgt);
- void (*add_end_of_cb_packets)(u64 kernel_address, u32 len, u64 cq_addr,
- u32 cq_val, u32 msix_num);
+ void (*add_end_of_cb_packets)(struct hl_device *hdev,
+ u64 kernel_address, u32 len,
+ u64 cq_addr, u32 cq_val, u32 msix_num);
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
int (*context_switch)(struct hl_device *hdev, u32 asid);
void (*restore_phase_topology)(struct hl_device *hdev);
@@ -564,7 +557,8 @@ struct hl_asic_funcs {
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
- bool (*is_device_idle)(struct hl_device *hdev, char *buf, size_t size);
+ bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
+ struct seq_file *s);
int (*soft_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
@@ -1065,12 +1059,59 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
(cond) ? 0 : -ETIMEDOUT; \
})
+/*
+ * address in this macro points always to a memory location in the
+ * host's (server's) memory. That location is updated asynchronously
+ * either by the direct access of the device or by another core
+ */
+#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t __timeout; \
+ /* timeout should be longer when working with simulator */ \
+ if (hdev->pdev) \
+ __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ else \
+ __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ might_sleep_if(sleep_us); \
+ for (;;) { \
+ /* Verify we read updates done by other cores or by device */ \
+ mb(); \
+ (val) = *((u32 *) (uintptr_t) (addr)); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
+ (val) = *((u32 *) (uintptr_t) (addr)); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
-#define HL_ENG_BUSY(buf, size, fmt, ...) ({ \
- if (buf) \
- snprintf(buf, size, fmt, ##__VA_ARGS__); \
- false; \
- })
+#define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
+ timeout_us) \
+({ \
+ ktime_t __timeout; \
+ /* timeout should be longer when working with simulator */ \
+ if (hdev->pdev) \
+ __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ else \
+ __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ might_sleep_if(sleep_us); \
+ for (;;) { \
+ (val) = readl(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
+ (val) = readl(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
struct hwmon_chip_info;
@@ -1117,6 +1158,7 @@ struct hl_device_reset_work {
* lock here so we can flush user processes which are opening
* the device while we are trying to hard reset it
* @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
+ * @debug_lock: protects critical section of setting debug mode for device
* @asic_prop: ASIC specific immutable properties.
* @asic_funcs: ASIC specific functions.
* @asic_specific: ASIC specific information to use only from ASIC files.
@@ -1159,6 +1201,8 @@ struct hl_device_reset_work {
* @mmu_enable: is MMU enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
+ * @in_debug: is device under debug. This, together with fd_open_cnt, enforces
+ * that only a single user is configuring the debug infrastructure.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -1188,6 +1232,7 @@ struct hl_device {
/* TODO: remove fd_open_cnt_lock for multiple process support */
struct mutex fd_open_cnt_lock;
struct mutex send_cpu_message_lock;
+ struct mutex debug_lock;
struct asic_fixed_properties asic_prop;
const struct hl_asic_funcs *asic_funcs;
void *asic_specific;
@@ -1230,6 +1275,7 @@ struct hl_device {
u8 init_done;
u8 device_cpu_disabled;
u8 dma_mask;
+ u8 in_debug;
/* Parameters for bring-up */
u8 mmu_enable;
@@ -1325,13 +1371,10 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
int hl_device_open(struct inode *inode, struct file *filp);
bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
enum hl_device_status hl_device_status(struct hl_device *hdev);
+int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
enum hl_asic_type asic_type, int minor);
void destroy_hdev(struct hl_device *hdev);
-int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, u32 timeout_us,
- u32 *val);
-int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
- u32 timeout_us, u32 *val);
int hl_hw_queues_create(struct hl_device *hdev);
void hl_hw_queues_destroy(struct hl_device *hdev);
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 5f4d155be767..6f6dbe93f1df 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -105,9 +105,17 @@ int hl_device_open(struct inode *inode, struct file *filp)
return -EPERM;
}
+ if (hdev->in_debug) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't open %s because it is being debugged by another user\n",
+ dev_name(hdev->dev));
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+ return -EPERM;
+ }
+
if (atomic_read(&hdev->fd_open_cnt)) {
dev_info_ratelimited(hdev->dev,
- "Device %s is already attached to application\n",
+ "Can't open %s because another user is working on it\n",
dev_name(hdev->dev));
mutex_unlock(&hdev->fd_open_cnt_lock);
return -EBUSY;
@@ -164,6 +172,17 @@ close_device:
return rc;
}
+static void set_driver_behavior_per_device(struct hl_device *hdev)
+{
+ hdev->mmu_enable = 1;
+ hdev->cpu_enable = 1;
+ hdev->fw_loading = 1;
+ hdev->cpu_queues_enable = 1;
+ hdev->heartbeat = 1;
+
+ hdev->reset_pcilink = 0;
+}
+
/*
* create_hdev - create habanalabs device instance
*
@@ -188,29 +207,25 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
if (!hdev)
return -ENOMEM;
+ /* First, we must find out which ASIC are we handling. This is needed
+ * to configure the behavior of the driver (kernel parameters)
+ */
+ if (pdev) {
+ hdev->asic_type = get_asic_type(pdev->device);
+ if (hdev->asic_type == ASIC_INVALID) {
+ dev_err(&pdev->dev, "Unsupported ASIC\n");
+ rc = -ENODEV;
+ goto free_hdev;
+ }
+ } else {
+ hdev->asic_type = asic_type;
+ }
+
hdev->major = hl_major;
hdev->reset_on_lockup = reset_on_lockup;
-
- /* Parameters for bring-up - set them to defaults */
- hdev->mmu_enable = 1;
- hdev->cpu_enable = 1;
- hdev->reset_pcilink = 0;
- hdev->cpu_queues_enable = 1;
- hdev->fw_loading = 1;
hdev->pldm = 0;
- hdev->heartbeat = 1;
-
- /* If CPU is disabled, no point in loading FW */
- if (!hdev->cpu_enable)
- hdev->fw_loading = 0;
- /* If we don't load FW, no need to initialize CPU queues */
- if (!hdev->fw_loading)
- hdev->cpu_queues_enable = 0;
-
- /* If CPU queues not enabled, no way to do heartbeat */
- if (!hdev->cpu_queues_enable)
- hdev->heartbeat = 0;
+ set_driver_behavior_per_device(hdev);
if (timeout_locked)
hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
@@ -220,17 +235,6 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->disabled = true;
hdev->pdev = pdev; /* can be NULL in case of simulator device */
- if (pdev) {
- hdev->asic_type = get_asic_type(pdev->device);
- if (hdev->asic_type == ASIC_INVALID) {
- dev_err(&pdev->dev, "Unsupported ASIC\n");
- rc = -ENODEV;
- goto free_hdev;
- }
- } else {
- hdev->asic_type = asic_type;
- }
-
/* Set default DMA mask to 32 bits */
hdev->dma_mask = 32;
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index b7a0eecf6b6c..07127576b3e8 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -119,7 +119,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, NULL, 0);
+ hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
+ &hw_idle.busy_engines_mask, NULL);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
@@ -254,10 +255,18 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
case HL_DEBUG_OP_BMON:
case HL_DEBUG_OP_SPMU:
case HL_DEBUG_OP_TIMESTAMP:
+ if (!hdev->in_debug) {
+ dev_err_ratelimited(hdev->dev,
+ "Rejecting debug configuration request because device not in debug mode\n");
+ return -EFAULT;
+ }
args->input_size =
min(args->input_size, hl_debug_struct_size[args->op]);
rc = debug_coresight(hdev, args);
break;
+ case HL_DEBUG_OP_SET_MODE:
+ rc = hl_device_set_debug_mode(hdev, (bool) args->enable);
+ break;
default:
dev_err(hdev->dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 2894d8975933..e3b5517897ea 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -265,7 +265,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
cq = &hdev->completion_queue[q->hw_queue_id];
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
- hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len,
+ hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
cq_addr,
__le32_to_cpu(cq_pkt.data),
q->hw_queue_id);
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h
new file mode 100644
index 000000000000..028143408401
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h
@@ -0,0 +1,418 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_0_MASKS_H_
+#define ASIC_REG_DMA_CH_0_MASKS_H_
+
+/*
+ *****************************************
+ * DMA_CH_0 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+/* DMA_CH_0_CFG0 */
+#define DMA_CH_0_CFG0_RD_MAX_OUTSTAND_SHIFT 0
+#define DMA_CH_0_CFG0_RD_MAX_OUTSTAND_MASK 0x3FF
+#define DMA_CH_0_CFG0_WR_MAX_OUTSTAND_SHIFT 16
+#define DMA_CH_0_CFG0_WR_MAX_OUTSTAND_MASK 0xFFF0000
+
+/* DMA_CH_0_CFG1 */
+#define DMA_CH_0_CFG1_RD_BUF_MAX_SIZE_SHIFT 0
+#define DMA_CH_0_CFG1_RD_BUF_MAX_SIZE_MASK 0x3FF
+
+/* DMA_CH_0_ERRMSG_ADDR_LO */
+#define DMA_CH_0_ERRMSG_ADDR_LO_VAL_SHIFT 0
+#define DMA_CH_0_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_ERRMSG_ADDR_HI */
+#define DMA_CH_0_ERRMSG_ADDR_HI_VAL_SHIFT 0
+#define DMA_CH_0_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_ERRMSG_WDATA */
+#define DMA_CH_0_ERRMSG_WDATA_VAL_SHIFT 0
+#define DMA_CH_0_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_RD_COMP_ADDR_LO */
+#define DMA_CH_0_RD_COMP_ADDR_LO_VAL_SHIFT 0
+#define DMA_CH_0_RD_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_RD_COMP_ADDR_HI */
+#define DMA_CH_0_RD_COMP_ADDR_HI_VAL_SHIFT 0
+#define DMA_CH_0_RD_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_RD_COMP_WDATA */
+#define DMA_CH_0_RD_COMP_WDATA_VAL_SHIFT 0
+#define DMA_CH_0_RD_COMP_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_WR_COMP_ADDR_LO */
+#define DMA_CH_0_WR_COMP_ADDR_LO_VAL_SHIFT 0
+#define DMA_CH_0_WR_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_WR_COMP_ADDR_HI */
+#define DMA_CH_0_WR_COMP_ADDR_HI_VAL_SHIFT 0
+#define DMA_CH_0_WR_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_WR_COMP_WDATA */
+#define DMA_CH_0_WR_COMP_WDATA_VAL_SHIFT 0
+#define DMA_CH_0_WR_COMP_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_LDMA_SRC_ADDR_LO */
+#define DMA_CH_0_LDMA_SRC_ADDR_LO_VAL_SHIFT 0
+#define DMA_CH_0_LDMA_SRC_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_LDMA_SRC_ADDR_HI */
+#define DMA_CH_0_LDMA_SRC_ADDR_HI_VAL_SHIFT 0
+#define DMA_CH_0_LDMA_SRC_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_LDMA_DST_ADDR_LO */
+#define DMA_CH_0_LDMA_DST_ADDR_LO_VAL_SHIFT 0
+#define DMA_CH_0_LDMA_DST_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_LDMA_DST_ADDR_HI */
+#define DMA_CH_0_LDMA_DST_ADDR_HI_VAL_SHIFT 0
+#define DMA_CH_0_LDMA_DST_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_LDMA_TSIZE */
+#define DMA_CH_0_LDMA_TSIZE_VAL_SHIFT 0
+#define DMA_CH_0_LDMA_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_COMIT_TRANSFER */
+#define DMA_CH_0_COMIT_TRANSFER_PCI_UPS_WKORDR_SHIFT 0
+#define DMA_CH_0_COMIT_TRANSFER_PCI_UPS_WKORDR_MASK 0x1
+#define DMA_CH_0_COMIT_TRANSFER_RD_COMP_EN_SHIFT 1
+#define DMA_CH_0_COMIT_TRANSFER_RD_COMP_EN_MASK 0x2
+#define DMA_CH_0_COMIT_TRANSFER_WR_COMP_EN_SHIFT 2
+#define DMA_CH_0_COMIT_TRANSFER_WR_COMP_EN_MASK 0x4
+#define DMA_CH_0_COMIT_TRANSFER_NOSNOOP_SHIFT 3
+#define DMA_CH_0_COMIT_TRANSFER_NOSNOOP_MASK 0x8
+#define DMA_CH_0_COMIT_TRANSFER_SRC_ADDR_INC_DIS_SHIFT 4
+#define DMA_CH_0_COMIT_TRANSFER_SRC_ADDR_INC_DIS_MASK 0x10
+#define DMA_CH_0_COMIT_TRANSFER_DST_ADDR_INC_DIS_SHIFT 5
+#define DMA_CH_0_COMIT_TRANSFER_DST_ADDR_INC_DIS_MASK 0x20
+#define DMA_CH_0_COMIT_TRANSFER_MEM_SET_SHIFT 6
+#define DMA_CH_0_COMIT_TRANSFER_MEM_SET_MASK 0x40
+#define DMA_CH_0_COMIT_TRANSFER_MOD_TENSOR_SHIFT 15
+#define DMA_CH_0_COMIT_TRANSFER_MOD_TENSOR_MASK 0x8000
+#define DMA_CH_0_COMIT_TRANSFER_CTL_SHIFT 16
+#define DMA_CH_0_COMIT_TRANSFER_CTL_MASK 0xFFFF0000
+
+/* DMA_CH_0_STS0 */
+#define DMA_CH_0_STS0_DMA_BUSY_SHIFT 0
+#define DMA_CH_0_STS0_DMA_BUSY_MASK 0x1
+#define DMA_CH_0_STS0_RD_STS_CTX_FULL_SHIFT 1
+#define DMA_CH_0_STS0_RD_STS_CTX_FULL_MASK 0x2
+#define DMA_CH_0_STS0_WR_STS_CTX_FULL_SHIFT 2
+#define DMA_CH_0_STS0_WR_STS_CTX_FULL_MASK 0x4
+
+/* DMA_CH_0_STS1 */
+#define DMA_CH_0_STS1_RD_STS_CTX_CNT_SHIFT 0
+#define DMA_CH_0_STS1_RD_STS_CTX_CNT_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_STS2 */
+#define DMA_CH_0_STS2_WR_STS_CTX_CNT_SHIFT 0
+#define DMA_CH_0_STS2_WR_STS_CTX_CNT_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_STS3 */
+#define DMA_CH_0_STS3_RD_STS_TRN_CNT_SHIFT 0
+#define DMA_CH_0_STS3_RD_STS_TRN_CNT_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_STS4 */
+#define DMA_CH_0_STS4_WR_STS_TRN_CNT_SHIFT 0
+#define DMA_CH_0_STS4_WR_STS_TRN_CNT_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_SRC_ADDR_LO_STS */
+#define DMA_CH_0_SRC_ADDR_LO_STS_VAL_SHIFT 0
+#define DMA_CH_0_SRC_ADDR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_SRC_ADDR_HI_STS */
+#define DMA_CH_0_SRC_ADDR_HI_STS_VAL_SHIFT 0
+#define DMA_CH_0_SRC_ADDR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_SRC_TSIZE_STS */
+#define DMA_CH_0_SRC_TSIZE_STS_VAL_SHIFT 0
+#define DMA_CH_0_SRC_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_DST_ADDR_LO_STS */
+#define DMA_CH_0_DST_ADDR_LO_STS_VAL_SHIFT 0
+#define DMA_CH_0_DST_ADDR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_DST_ADDR_HI_STS */
+#define DMA_CH_0_DST_ADDR_HI_STS_VAL_SHIFT 0
+#define DMA_CH_0_DST_ADDR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_DST_TSIZE_STS */
+#define DMA_CH_0_DST_TSIZE_STS_VAL_SHIFT 0
+#define DMA_CH_0_DST_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_RD_RATE_LIM_EN */
+#define DMA_CH_0_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define DMA_CH_0_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* DMA_CH_0_RD_RATE_LIM_RST_TOKEN */
+#define DMA_CH_0_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define DMA_CH_0_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* DMA_CH_0_RD_RATE_LIM_SAT */
+#define DMA_CH_0_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define DMA_CH_0_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* DMA_CH_0_RD_RATE_LIM_TOUT */
+#define DMA_CH_0_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define DMA_CH_0_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* DMA_CH_0_WR_RATE_LIM_EN */
+#define DMA_CH_0_WR_RATE_LIM_EN_VAL_SHIFT 0
+#define DMA_CH_0_WR_RATE_LIM_EN_VAL_MASK 0x1
+
+/* DMA_CH_0_WR_RATE_LIM_RST_TOKEN */
+#define DMA_CH_0_WR_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define DMA_CH_0_WR_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* DMA_CH_0_WR_RATE_LIM_SAT */
+#define DMA_CH_0_WR_RATE_LIM_SAT_VAL_SHIFT 0
+#define DMA_CH_0_WR_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* DMA_CH_0_WR_RATE_LIM_TOUT */
+#define DMA_CH_0_WR_RATE_LIM_TOUT_VAL_SHIFT 0
+#define DMA_CH_0_WR_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* DMA_CH_0_CFG2 */
+#define DMA_CH_0_CFG2_FORCE_WORD_SHIFT 0
+#define DMA_CH_0_CFG2_FORCE_WORD_MASK 0x1
+
+/* DMA_CH_0_TDMA_CTL */
+#define DMA_CH_0_TDMA_CTL_DTYPE_SHIFT 0
+#define DMA_CH_0_TDMA_CTL_DTYPE_MASK 0x7
+
+/* DMA_CH_0_TDMA_SRC_BASE_ADDR_LO */
+#define DMA_CH_0_TDMA_SRC_BASE_ADDR_LO_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_BASE_ADDR_HI */
+#define DMA_CH_0_TDMA_SRC_BASE_ADDR_HI_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_BASE_0 */
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_SIZE_0 */
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0 */
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_START_OFFSET_0 */
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_STRIDE_0 */
+#define DMA_CH_0_TDMA_SRC_STRIDE_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_STRIDE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_BASE_1 */
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_SIZE_1 */
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1 */
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_START_OFFSET_1 */
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_STRIDE_1 */
+#define DMA_CH_0_TDMA_SRC_STRIDE_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_STRIDE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_BASE_2 */
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_SIZE_2 */
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2 */
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_START_OFFSET_2 */
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_STRIDE_2 */
+#define DMA_CH_0_TDMA_SRC_STRIDE_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_STRIDE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_BASE_3 */
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_SIZE_3 */
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3 */
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_START_OFFSET_3 */
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_STRIDE_3 */
+#define DMA_CH_0_TDMA_SRC_STRIDE_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_STRIDE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_BASE_4 */
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_BASE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_ROI_SIZE_4 */
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_ROI_SIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4 */
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_START_OFFSET_4 */
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_START_OFFSET_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_SRC_STRIDE_4 */
+#define DMA_CH_0_TDMA_SRC_STRIDE_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_SRC_STRIDE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_BASE_ADDR_LO */
+#define DMA_CH_0_TDMA_DST_BASE_ADDR_LO_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_BASE_ADDR_HI */
+#define DMA_CH_0_TDMA_DST_BASE_ADDR_HI_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_BASE_0 */
+#define DMA_CH_0_TDMA_DST_ROI_BASE_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_BASE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_SIZE_0 */
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_0 */
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_START_OFFSET_0 */
+#define DMA_CH_0_TDMA_DST_START_OFFSET_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_START_OFFSET_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_STRIDE_0 */
+#define DMA_CH_0_TDMA_DST_STRIDE_0_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_STRIDE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_BASE_1 */
+#define DMA_CH_0_TDMA_DST_ROI_BASE_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_BASE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_SIZE_1 */
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_1 */
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_START_OFFSET_1 */
+#define DMA_CH_0_TDMA_DST_START_OFFSET_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_START_OFFSET_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_STRIDE_1 */
+#define DMA_CH_0_TDMA_DST_STRIDE_1_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_STRIDE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_BASE_2 */
+#define DMA_CH_0_TDMA_DST_ROI_BASE_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_BASE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_SIZE_2 */
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_2 */
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_START_OFFSET_2 */
+#define DMA_CH_0_TDMA_DST_START_OFFSET_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_START_OFFSET_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_STRIDE_2 */
+#define DMA_CH_0_TDMA_DST_STRIDE_2_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_STRIDE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_BASE_3 */
+#define DMA_CH_0_TDMA_DST_ROI_BASE_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_BASE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_SIZE_3 */
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_3 */
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_START_OFFSET_3 */
+#define DMA_CH_0_TDMA_DST_START_OFFSET_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_START_OFFSET_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_STRIDE_3 */
+#define DMA_CH_0_TDMA_DST_STRIDE_3_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_STRIDE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_BASE_4 */
+#define DMA_CH_0_TDMA_DST_ROI_BASE_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_BASE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_ROI_SIZE_4 */
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_ROI_SIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_4 */
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_START_OFFSET_4 */
+#define DMA_CH_0_TDMA_DST_START_OFFSET_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_START_OFFSET_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_TDMA_DST_STRIDE_4 */
+#define DMA_CH_0_TDMA_DST_STRIDE_4_VAL_SHIFT 0
+#define DMA_CH_0_TDMA_DST_STRIDE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA_CH_0_MEM_INIT_BUSY */
+#define DMA_CH_0_MEM_INIT_BUSY_SBC_DATA_SHIFT 0
+#define DMA_CH_0_MEM_INIT_BUSY_SBC_DATA_MASK 0xFF
+#define DMA_CH_0_MEM_INIT_BUSY_SBC_MD_SHIFT 8
+#define DMA_CH_0_MEM_INIT_BUSY_SBC_MD_MASK 0x100
+
+#endif /* ASIC_REG_DMA_CH_0_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index 506e71e201e1..19b0f0ef1d0b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -88,6 +88,7 @@
#include "psoc_global_conf_masks.h"
#include "dma_macro_masks.h"
#include "dma_qm_0_masks.h"
+#include "dma_ch_0_masks.h"
#include "tpc0_qm_masks.h"
#include "tpc0_cmdq_masks.h"
#include "mme_qm_masks.h"
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 693877e37fd8..42d237cae1dc 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -1657,17 +1657,10 @@ int hl_vm_init(struct hl_device *hdev)
struct hl_vm *vm = &hdev->vm;
int rc;
- rc = hl_mmu_init(hdev);
- if (rc) {
- dev_err(hdev->dev, "Failed to init MMU\n");
- return rc;
- }
-
vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
if (!vm->dram_pg_pool) {
dev_err(hdev->dev, "Failed to create dram page pool\n");
- rc = -ENOMEM;
- goto pool_create_err;
+ return -ENOMEM;
}
kref_init(&vm->dram_pg_pool_refcount);
@@ -1693,8 +1686,6 @@ int hl_vm_init(struct hl_device *hdev)
pool_add_err:
gen_pool_destroy(vm->dram_pg_pool);
-pool_create_err:
- hl_mmu_fini(hdev);
return rc;
}
@@ -1724,7 +1715,5 @@ void hl_vm_fini(struct hl_device *hdev)
dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
__func__);
- hl_mmu_fini(hdev);
-
vm->init_done = false;
}
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 10aee3141444..176c315836f1 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -241,8 +241,9 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
hop2_pte_addr, hop3_pte_addr, pte_val;
int rc, i, j, hop3_allocated = 0;
- if (!hdev->dram_supports_virtual_memory ||
- !hdev->dram_default_page_mapping)
+ if ((!hdev->dram_supports_virtual_memory) ||
+ (!hdev->dram_default_page_mapping) ||
+ (ctx->asid == HL_KERNEL_ASID_ID))
return 0;
num_of_hop3 = prop->dram_size_for_default_page_mapping;
@@ -340,8 +341,9 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
hop2_pte_addr, hop3_pte_addr;
int i, j;
- if (!hdev->dram_supports_virtual_memory ||
- !hdev->dram_default_page_mapping)
+ if ((!hdev->dram_supports_virtual_memory) ||
+ (!hdev->dram_default_page_mapping) ||
+ (ctx->asid == HL_KERNEL_ASID_ID))
return;
num_of_hop3 = prop->dram_size_for_default_page_mapping;
@@ -385,12 +387,8 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
* @hdev: habanalabs device structure.
*
* This function does the following:
- * - Allocate max_asid zeroed hop0 pgts so no mapping is available.
- * - Enable MMU in H/W.
- * - Invalidate the MMU cache.
* - Create a pool of pages for pgt_infos.
- *
- * This function depends on DMA QMAN to be working!
+ * - Create a shadow table for pgt
*
* Return: 0 for success, non-zero for failure.
*/
@@ -915,6 +913,10 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
return -EFAULT;
}
+ WARN_ONCE((phys_addr & (real_page_size - 1)),
+ "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
+ phys_addr, real_page_size);
+
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
real_phys_addr = phys_addr;
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/pci.c
index 0e78a04d63f4..c98d88c7a5c6 100644
--- a/drivers/misc/habanalabs/pci.c
+++ b/drivers/misc/habanalabs/pci.c
@@ -10,6 +10,8 @@
#include <linux/pci.h>
+#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10)
+
/**
* hl_pci_bars_map() - Map PCI BARs.
* @hdev: Pointer to hl_device structure.
@@ -88,8 +90,14 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
{
struct pci_dev *pdev = hdev->pdev;
ktime_t timeout;
+ u64 msec;
u32 val;
+ if (hdev->pldm)
+ msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
+ else
+ msec = HL_PCI_ELBI_TIMEOUT_MSEC;
+
/* Clear previous status */
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
@@ -98,7 +106,7 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
PCI_CONFIG_ELBI_CTRL_WRITE);
- timeout = ktime_add_ms(ktime_get(), 10);
+ timeout = ktime_add_ms(ktime_get(), msec);
for (;;) {
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
if (val & PCI_CONFIG_ELBI_STS_MASK)
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
index c900ab15cceb..25eb46d29d88 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -328,10 +328,6 @@ static ssize_t pci_addr_show(struct device *dev, struct device_attribute *attr,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- /* Use dummy, fixed address for simulator */
- if (!hdev->pdev)
- return sprintf(buf, "0000:%02d:00.0\n", hdev->id);
-
return sprintf(buf, "%04x:%02x:%02x.%x\n",
pci_domain_nr(hdev->pdev->bus),
hdev->pdev->bus->number,
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 3431a825f24e..c12406f610d5 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -3,7 +3,7 @@
* isl29003.c - Linux kernel module for
* Intersil ISL29003 ambient light sensor
*
- * See file:Documentation/misc-devices/isl29003
+ * See file:Documentation/misc-devices/isl29003.rst
*
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
*
@@ -377,7 +377,7 @@ static int isl29003_init_client(struct i2c_client *client)
static int isl29003_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct isl29003_data *data;
int err = 0;
diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig
index 4cfad45229c8..bb2fec4b5880 100644
--- a/drivers/misc/lis3lv02d/Kconfig
+++ b/drivers/misc/lis3lv02d/Kconfig
@@ -7,7 +7,6 @@ config SENSORS_LIS3_SPI
tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
depends on !ACPI && SPI_MASTER && INPUT
select SENSORS_LIS3LV02D
- default n
help
This driver provides support for the LIS3LV02Dx accelerometer connected
via SPI. The accelerometer data is readable via
@@ -24,7 +23,6 @@ config SENSORS_LIS3_I2C
tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
depends on I2C && INPUT
select SENSORS_LIS3LV02D
- default n
help
This driver provides support for the LIS3LV02Dx accelerometer connected
via I2C. The accelerometer data is readable via
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 951c984de61a..fb10eafe9bde 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -15,8 +15,7 @@ KCOV_INSTRUMENT_rodata.o := n
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
- --set-section-flags .text=alloc,readonly \
- --rename-section .text=.rodata
+ --rename-section .text=.rodata,alloc,readonly,load
targets += rodata.o rodata_objcopy.o
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
$(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index d9fcfd3b5af0..1606658b9b7e 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -266,3 +266,69 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
pr_err("FAIL: accessed page after stack!\n");
}
+
+void lkdtm_UNSET_SMEP(void)
+{
+#ifdef CONFIG_X86_64
+#define MOV_CR4_DEPTH 64
+ void (*direct_write_cr4)(unsigned long val);
+ unsigned char *insn;
+ unsigned long cr4;
+ int i;
+
+ cr4 = native_read_cr4();
+
+ if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
+ pr_err("FAIL: SMEP not in use\n");
+ return;
+ }
+ cr4 &= ~(X86_CR4_SMEP);
+
+ pr_info("trying to clear SMEP normally\n");
+ native_write_cr4(cr4);
+ if (cr4 == native_read_cr4()) {
+ pr_err("FAIL: pinning SMEP failed!\n");
+ cr4 |= X86_CR4_SMEP;
+ pr_info("restoring SMEP\n");
+ native_write_cr4(cr4);
+ return;
+ }
+ pr_info("ok: SMEP did not get cleared\n");
+
+ /*
+ * To test the post-write pinning verification we need to call
+ * directly into the middle of native_write_cr4() where the
+ * cr4 write happens, skipping any pinning. This searches for
+ * the cr4 writing instruction.
+ */
+ insn = (unsigned char *)native_write_cr4;
+ for (i = 0; i < MOV_CR4_DEPTH; i++) {
+ /* mov %rdi, %cr4 */
+ if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
+ break;
+ /* mov %rdi,%rax; mov %rax, %cr4 */
+ if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
+ insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
+ insn[i+4] == 0x22 && insn[i+5] == 0xe0)
+ break;
+ }
+ if (i >= MOV_CR4_DEPTH) {
+ pr_info("ok: cannot locate cr4 writing call gadget\n");
+ return;
+ }
+ direct_write_cr4 = (void *)(insn + i);
+
+ pr_info("trying to clear SMEP with call gadget\n");
+ direct_write_cr4(cr4);
+ if (native_read_cr4() & X86_CR4_SMEP) {
+ pr_info("ok: SMEP removal was reverted\n");
+ } else {
+ pr_err("FAIL: cleared SMEP not detected!\n");
+ cr4 |= X86_CR4_SMEP;
+ pr_info("restoring SMEP\n");
+ native_write_cr4(cr4);
+ }
+#else
+ pr_err("FAIL: this test is x86_64-only\n");
+#endif
+}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index bba49abb6750..756794ac8fd8 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -114,6 +114,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(CORRUPT_USER_DS),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
+ CRASHTYPE(UNSET_SMEP),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
CRASHTYPE(WRITE_AFTER_FREE),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 23dc565b4307..bbcd370786d4 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -26,6 +26,7 @@ void lkdtm_CORRUPT_LIST_DEL(void);
void lkdtm_CORRUPT_USER_DS(void);
void lkdtm_STACK_GUARD_PAGE_LEADING(void);
void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
+void lkdtm_UNSET_SMEP(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 0970142bcace..47cfd5005e1b 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <linux/mei.h>
@@ -15,104 +16,56 @@
#include "client.h"
#include "hw.h"
-static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int mei_dbgfs_meclients_show(struct seq_file *m, void *unused)
{
- struct mei_device *dev = fp->private_data;
+ struct mei_device *dev = m->private;
struct mei_me_client *me_cl;
- size_t bufsz = 1;
- char *buf;
int i = 0;
- int pos = 0;
- int ret;
-#define HDR \
-" |id|fix| UUID |con|msg len|sb|refc|\n"
+ if (!dev)
+ return -ENODEV;
down_read(&dev->me_clients_rwsem);
- list_for_each_entry(me_cl, &dev->me_clients, list)
- bufsz++;
- bufsz *= sizeof(HDR) + 1;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- up_read(&dev->me_clients_rwsem);
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, HDR);
-#undef HDR
+ seq_puts(m, " |id|fix| UUID |con|msg len|sb|refc|\n");
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
goto out;
list_for_each_entry(me_cl, &dev->me_clients, list) {
-
- if (mei_me_cl_get(me_cl)) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n",
- i++, me_cl->client_id,
- me_cl->props.fixed_address,
- &me_cl->props.protocol_name,
- me_cl->props.max_number_of_connections,
- me_cl->props.max_msg_length,
- me_cl->props.single_recv_buf,
- kref_read(&me_cl->refcnt));
-
- mei_me_cl_put(me_cl);
- }
+ if (!mei_me_cl_get(me_cl))
+ continue;
+
+ seq_printf(m, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n",
+ i++, me_cl->client_id,
+ me_cl->props.fixed_address,
+ &me_cl->props.protocol_name,
+ me_cl->props.max_number_of_connections,
+ me_cl->props.max_msg_length,
+ me_cl->props.single_recv_buf,
+ kref_read(&me_cl->refcnt));
+ mei_me_cl_put(me_cl);
}
out:
up_read(&dev->me_clients_rwsem);
- ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return 0;
}
+DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_meclients);
-static const struct file_operations mei_dbgfs_fops_meclients = {
- .open = simple_open,
- .read = mei_dbgfs_read_meclients,
- .llseek = generic_file_llseek,
-};
-
-static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int mei_dbgfs_active_show(struct seq_file *m, void *unused)
{
- struct mei_device *dev = fp->private_data;
+ struct mei_device *dev = m->private;
struct mei_cl *cl;
- size_t bufsz = 1;
- char *buf;
int i = 0;
- int pos = 0;
- int ret;
-
-#define HDR " |me|host|state|rd|wr|wrq\n"
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
- /*
- * if the driver is not enabled the list won't be consistent,
- * we output empty table
- */
- if (dev->dev_state == MEI_DEV_ENABLED)
- list_for_each_entry(cl, &dev->file_list, link)
- bufsz++;
-
- bufsz *= sizeof(HDR) + 1;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- mutex_unlock(&dev->device_lock);
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, HDR);
-#undef HDR
+ seq_puts(m, " |me|host|state|rd|wr|wrq\n");
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
@@ -120,76 +73,44 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
list_for_each_entry(cl, &dev->file_list, link) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "%3d|%2d|%4d|%5d|%2d|%2d|%3u\n",
- i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
- !list_empty(&cl->rd_completed), cl->writing_state,
- cl->tx_cb_queued);
+ seq_printf(m, "%3d|%2d|%4d|%5d|%2d|%2d|%3u\n",
+ i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
+ !list_empty(&cl->rd_completed), cl->writing_state,
+ cl->tx_cb_queued);
i++;
}
out:
mutex_unlock(&dev->device_lock);
- ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return 0;
}
+DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active);
-static const struct file_operations mei_dbgfs_fops_active = {
- .open = simple_open,
- .read = mei_dbgfs_read_active,
- .llseek = generic_file_llseek,
-};
-
-static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
{
- struct mei_device *dev = fp->private_data;
- const size_t bufsz = 1024;
- char *buf = kzalloc(bufsz, GFP_KERNEL);
- int pos = 0;
- int ret;
-
- if (!buf)
- return -ENOMEM;
+ struct mei_device *dev = m->private;
- pos += scnprintf(buf + pos, bufsz - pos, "dev: %s\n",
- mei_dev_state_str(dev->dev_state));
- pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
- mei_hbm_state_str(dev->hbm_state));
+ seq_printf(m, "dev: %s\n", mei_dev_state_str(dev->dev_state));
+ seq_printf(m, "hbm: %s\n", mei_hbm_state_str(dev->hbm_state));
if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS &&
dev->hbm_state <= MEI_HBM_STARTED) {
- pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
- dev->hbm_f_pg_supported);
- pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
- dev->hbm_f_dc_supported);
- pos += scnprintf(buf + pos, bufsz - pos, "\tIE: %01d\n",
- dev->hbm_f_ie_supported);
- pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
- dev->hbm_f_dot_supported);
- pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
- dev->hbm_f_ev_supported);
- pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
- dev->hbm_f_fa_supported);
- pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
- dev->hbm_f_os_supported);
- pos += scnprintf(buf + pos, bufsz - pos, "\tDR: %01d\n",
- dev->hbm_f_dr_supported);
+ seq_puts(m, "hbm features:\n");
+ seq_printf(m, "\tPG: %01d\n", dev->hbm_f_pg_supported);
+ seq_printf(m, "\tDC: %01d\n", dev->hbm_f_dc_supported);
+ seq_printf(m, "\tIE: %01d\n", dev->hbm_f_ie_supported);
+ seq_printf(m, "\tDOT: %01d\n", dev->hbm_f_dot_supported);
+ seq_printf(m, "\tEV: %01d\n", dev->hbm_f_ev_supported);
+ seq_printf(m, "\tFA: %01d\n", dev->hbm_f_fa_supported);
+ seq_printf(m, "\tOS: %01d\n", dev->hbm_f_os_supported);
+ seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported);
}
- pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
- mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
- mei_pg_state_str(mei_pg_state(dev)));
- ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
- kfree(buf);
- return ret;
+ seq_printf(m, "pg: %s, %s\n",
+ mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
+ mei_pg_state_str(mei_pg_state(dev)));
+ return 0;
}
-static const struct file_operations mei_dbgfs_fops_devstate = {
- .open = simple_open,
- .read = mei_dbgfs_read_devstate,
- .llseek = generic_file_llseek,
-};
+DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate);
static ssize_t mei_dbgfs_write_allow_fa(struct file *file,
const char __user *user_buf,
@@ -208,7 +129,7 @@ static ssize_t mei_dbgfs_write_allow_fa(struct file *file,
return ret;
}
-static const struct file_operations mei_dbgfs_fops_allow_fa = {
+static const struct file_operations mei_dbgfs_allow_fa_fops = {
.open = simple_open,
.read = debugfs_read_file_bool,
.write = mei_dbgfs_write_allow_fa,
@@ -247,26 +168,26 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
dev->dbgfs_dir = dir;
f = debugfs_create_file("meclients", S_IRUSR, dir,
- dev, &mei_dbgfs_fops_meclients);
+ dev, &mei_dbgfs_meclients_fops);
if (!f) {
dev_err(dev->dev, "meclients: registration failed\n");
goto err;
}
f = debugfs_create_file("active", S_IRUSR, dir,
- dev, &mei_dbgfs_fops_active);
+ dev, &mei_dbgfs_active_fops);
if (!f) {
dev_err(dev->dev, "active: registration failed\n");
goto err;
}
f = debugfs_create_file("devstate", S_IRUSR, dir,
- dev, &mei_dbgfs_fops_devstate);
+ dev, &mei_dbgfs_devstate_fops);
if (!f) {
dev_err(dev->dev, "devstate: registration failed\n");
goto err;
}
f = debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
&dev->allow_fixed_address,
- &mei_dbgfs_fops_allow_fa);
+ &mei_dbgfs_allow_fa_fops);
if (!f) {
dev_err(dev->dev, "allow_fixed_address: registration failed\n");
goto err;
@@ -276,4 +197,3 @@ err:
mei_dbgfs_deregister(dev);
return -ENODEV;
}
-
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index b07000202d4a..ed816939fb32 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -2,7 +2,7 @@
/*
* Copyright © 2019 Intel Corporation
*
- * Mei_hdcp.c: HDCP client driver for mei bus
+ * mei_hdcp.c: HDCP client driver for mei bus
*
* Author:
* Ramalingam C <ramalingam.c@intel.com>
@@ -11,12 +11,9 @@
/**
* DOC: MEI_HDCP Client Driver
*
- * This is a client driver to the mei_bus to make the HDCP2.2 services of
- * ME FW available for the interested consumers like I915.
- *
- * This module will act as a translation layer between HDCP protocol
- * implementor(I915) and ME FW by translating HDCP2.2 authentication
- * messages to ME FW command payloads and vice versa.
+ * The mei_hdcp driver acts as a translation layer between HDCP 2.2
+ * protocol implementer (I915) and ME FW by translating HDCP2.2
+ * negotiation messages to ME FW command payloads and vice versa.
*/
#include <linux/module.h>
diff --git a/drivers/misc/mic/scif/scif_main.c b/drivers/misc/mic/scif/scif_main.c
index 490e3bdc1941..e2278bf9f11d 100644
--- a/drivers/misc/mic/scif/scif_main.c
+++ b/drivers/misc/mic/scif/scif_main.c
@@ -133,6 +133,7 @@ static int scif_setup_scifdev(void)
static void scif_destroy_scifdev(void)
{
kfree(scif_dev);
+ scif_dev = NULL;
}
static int scif_probe(struct scif_hw_dev *sdev)
diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig
index 7fb6d39d4c5a..1916fa65f2f2 100644
--- a/drivers/misc/ocxl/Kconfig
+++ b/drivers/misc/ocxl/Kconfig
@@ -5,7 +5,6 @@
config OCXL_BASE
bool
- default n
select PPC_COPRO_BASE
config OCXL
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index bab9c9364184..994563a078eb 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -69,6 +69,7 @@ static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
{
int rc;
+ unsigned long pidr = 0;
// Locks both status & tidr
mutex_lock(&ctx->status_mutex);
@@ -77,9 +78,11 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
goto out;
}
- rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid,
- mm->context.id, ctx->tidr, amr, mm,
- xsl_fault_error, ctx);
+ if (mm)
+ pidr = mm->context.id;
+
+ rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr,
+ amr, mm, xsl_fault_error, ctx);
if (rc)
goto out;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index cce5b0d64505..58d111afd9f6 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -224,6 +224,17 @@ static irqreturn_t xsl_fault_handler(int irq, void *data)
ack_irq(spa, ADDRESS_ERROR);
return IRQ_HANDLED;
}
+
+ if (!pe_data->mm) {
+ /*
+ * translation fault from a kernel context - an OpenCAPI
+ * device tried to access a bad kernel address
+ */
+ rcu_read_unlock();
+ pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n");
+ ack_irq(spa, ADDRESS_ERROR);
+ return IRQ_HANDLED;
+ }
WARN_ON(pe_data->mm->context.id != pid);
if (mmget_not_zero(pe_data->mm)) {
@@ -523,7 +534,13 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
pe->amr = cpu_to_be64(amr);
pe->software_state = cpu_to_be32(SPA_PE_VALID);
- mm_context_add_copro(mm);
+ /*
+ * For user contexts, register a copro so that TLBIs are seen
+ * by the nest MMU. If we have a kernel context, TLBIs are
+ * already global.
+ */
+ if (mm)
+ mm_context_add_copro(mm);
/*
* Barrier is to make sure PE is visible in the SPA before it
* is used by the device. It also helps with the global TLBI
@@ -546,7 +563,8 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
* have a reference on mm_users. Incrementing mm_count solves
* the problem.
*/
- mmgrab(mm);
+ if (mm)
+ mmgrab(mm);
trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr);
unlock:
mutex_unlock(&spa->spa_lock);
@@ -652,8 +670,10 @@ int ocxl_link_remove_pe(void *link_handle, int pasid)
if (!pe_data) {
WARN(1, "Couldn't find pe data when removing PE\n");
} else {
- mm_context_remove_copro(pe_data->mm);
- mmdrop(pe_data->mm);
+ if (pe_data->mm) {
+ mm_context_remove_copro(pe_data->mm);
+ mmdrop(pe_data->mm);
+ }
kfree_rcu(pe_data, rcu);
}
unlock:
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 3eba1c420cc0..782ce95d3f17 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -70,7 +70,7 @@ xpc_get_rsvd_page_pa(int nasid)
unsigned long rp_pa = nasid; /* seed with nasid */
size_t len = 0;
size_t buf_len = 0;
- void *buf = buf;
+ void *buf = NULL;
void *buf_base = NULL;
enum xp_retval (*get_partition_rsvd_page_pa)
(void *, u64 *, unsigned long *, size_t *) =
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 5b7afd6190fe..09db397df287 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -336,7 +336,7 @@ static struct i2c_driver tsl2550_driver;
static int tsl2550_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct tsl2550_data *data;
int *opmode, err = 0;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index ad807d5a3141..043eed845246 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -28,6 +28,8 @@
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/mount.h>
+#include <linux/balloon_compaction.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <asm/hypervisor.h>
@@ -38,25 +40,20 @@ MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
-/*
- * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't allow wait
- * (__GFP_RECLAIM) for huge page allocations. Use __GFP_NOWARN, to suppress page
- * allocation failure warnings. Disallow access to emergency low-memory pools.
- */
-#define VMW_HUGE_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
- __GFP_NOMEMALLOC)
+static bool __read_mostly vmwballoon_shrinker_enable;
+module_param(vmwballoon_shrinker_enable, bool, 0444);
+MODULE_PARM_DESC(vmwballoon_shrinker_enable,
+ "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
-/*
- * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We allow lightweight
- * reclamation (__GFP_NORETRY). Use __GFP_NOWARN, to suppress page allocation
- * failure warnings. Disallow access to emergency low-memory pools.
- */
-#define VMW_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
- __GFP_NOMEMALLOC|__GFP_NORETRY)
+/* Delay in seconds after shrink before inflation. */
+#define VMBALLOON_SHRINK_DELAY (5)
/* Maximum number of refused pages we accumulate during inflation cycle */
#define VMW_BALLOON_MAX_REFUSED 16
+/* Magic number for the balloon mount-point */
+#define BALLOON_VMW_MAGIC 0x0ba11007
+
/*
* Hypervisor communication port definitions.
*/
@@ -229,29 +226,26 @@ enum vmballoon_stat_general {
VMW_BALLOON_STAT_TIMER,
VMW_BALLOON_STAT_DOORBELL,
VMW_BALLOON_STAT_RESET,
- VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_RESET
+ VMW_BALLOON_STAT_SHRINK,
+ VMW_BALLOON_STAT_SHRINK_FREE,
+ VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
};
#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
-
static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
struct vmballoon_ctl {
struct list_head pages;
struct list_head refused_pages;
+ struct list_head prealloc_pages;
unsigned int n_refused_pages;
unsigned int n_pages;
enum vmballoon_page_size_type page_size;
enum vmballoon_op op;
};
-struct vmballoon_page_size {
- /* list of reserved physical pages */
- struct list_head pages;
-};
-
/**
* struct vmballoon_batch_entry - a batch entry for lock or unlock.
*
@@ -266,8 +260,6 @@ struct vmballoon_batch_entry {
} __packed;
struct vmballoon {
- struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
-
/**
* @max_page_size: maximum supported page size for ballooning.
*
@@ -340,6 +332,15 @@ struct vmballoon {
*/
struct page *page;
+ /**
+ * @shrink_timeout: timeout until the next inflation.
+ *
+ * After an shrink event, indicates the time in jiffies after which
+ * inflation is allowed again. Can be written concurrently with reads,
+ * so must use READ_ONCE/WRITE_ONCE when accessing.
+ */
+ unsigned long shrink_timeout;
+
/* statistics */
struct vmballoon_stats *stats;
@@ -348,9 +349,21 @@ struct vmballoon {
struct dentry *dbg_entry;
#endif
+ /**
+ * @b_dev_info: balloon device information descriptor.
+ */
+ struct balloon_dev_info b_dev_info;
+
struct delayed_work dwork;
/**
+ * @huge_pages - list of the inflated 2MB pages.
+ *
+ * Protected by @b_dev_info.pages_lock .
+ */
+ struct list_head huge_pages;
+
+ /**
* @vmci_doorbell.
*
* Protected by @conf_sem.
@@ -368,6 +381,20 @@ struct vmballoon {
* Lock ordering: @conf_sem -> @comm_lock .
*/
spinlock_t comm_lock;
+
+ /**
+ * @shrinker: shrinker interface that is used to avoid over-inflation.
+ */
+ struct shrinker shrinker;
+
+ /**
+ * @shrinker_registered: whether the shrinker was registered.
+ *
+ * The shrinker interface does not handle gracefully the removal of
+ * shrinker that was not registered before. This indication allows to
+ * simplify the unregistration process.
+ */
+ bool shrinker_registered;
};
static struct vmballoon balloon;
@@ -642,15 +669,25 @@ static int vmballoon_alloc_page_list(struct vmballoon *b,
unsigned int i;
for (i = 0; i < req_n_pages; i++) {
- if (ctl->page_size == VMW_BALLOON_2M_PAGE)
- page = alloc_pages(VMW_HUGE_PAGE_ALLOC_FLAGS,
- VMW_BALLOON_2M_ORDER);
- else
- page = alloc_page(VMW_PAGE_ALLOC_FLAGS);
-
- /* Update statistics */
- vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
- ctl->page_size);
+ /*
+ * First check if we happen to have pages that were allocated
+ * before. This happens when 2MB page rejected during inflation
+ * by the hypervisor, and then split into 4KB pages.
+ */
+ if (!list_empty(&ctl->prealloc_pages)) {
+ page = list_first_entry(&ctl->prealloc_pages,
+ struct page, lru);
+ list_del(&page->lru);
+ } else {
+ if (ctl->page_size == VMW_BALLOON_2M_PAGE)
+ page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
+ __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
+ else
+ page = balloon_page_alloc();
+
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
+ ctl->page_size);
+ }
if (page) {
vmballoon_mark_page_offline(page, ctl->page_size);
@@ -896,7 +933,8 @@ static void vmballoon_release_page_list(struct list_head *page_list,
__free_pages(page, vmballoon_page_order(page_size));
}
- *n_pages = 0;
+ if (n_pages)
+ *n_pages = 0;
}
@@ -942,6 +980,10 @@ static int64_t vmballoon_change(struct vmballoon *b)
size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
return 0;
+ /* If an out-of-memory recently occurred, inflation is disallowed. */
+ if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
+ return 0;
+
return target - size;
}
@@ -961,9 +1003,22 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
unsigned int *n_pages,
enum vmballoon_page_size_type page_size)
{
- struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
+ unsigned long flags;
+
+ if (page_size == VMW_BALLOON_4K_PAGE) {
+ balloon_page_list_enqueue(&b->b_dev_info, pages);
+ } else {
+ /*
+ * Keep the huge pages in a local list which is not available
+ * for the balloon compaction mechanism.
+ */
+ spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+ list_splice_init(pages, &b->huge_pages);
+ __count_vm_events(BALLOON_INFLATE, *n_pages *
+ vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
+ spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
+ }
- list_splice_init(pages, &page_size_info->pages);
*n_pages = 0;
}
@@ -986,19 +1041,58 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
enum vmballoon_page_size_type page_size,
unsigned int n_req_pages)
{
- struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
struct page *page, *tmp;
unsigned int i = 0;
+ unsigned long flags;
+
+ /* In the case of 4k pages, use the compaction infrastructure */
+ if (page_size == VMW_BALLOON_4K_PAGE) {
+ *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
+ n_req_pages);
+ return;
+ }
- list_for_each_entry_safe(page, tmp, &page_size_info->pages, lru) {
+ /* 2MB pages */
+ spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+ list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
list_move(&page->lru, pages);
if (++i == n_req_pages)
break;
}
+
+ __count_vm_events(BALLOON_DEFLATE,
+ i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
+ spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
*n_pages = i;
}
/**
+ * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
+ *
+ * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
+ * due to one or few 4KB pages. These 2MB pages may keep being allocated and
+ * then being refused. To prevent this case, this function splits the refused
+ * pages into 4KB pages and adds them into @prealloc_pages list.
+ *
+ * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
+ */
+static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
+{
+ struct page *page, *tmp;
+ unsigned int i, order;
+
+ order = vmballoon_page_order(ctl->page_size);
+
+ list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
+ list_del(&page->lru);
+ split_page(page, order);
+ for (i = 0; i < (1 << order); i++)
+ list_add(&page[i].lru, &ctl->prealloc_pages);
+ }
+ ctl->n_refused_pages = 0;
+}
+
+/**
* vmballoon_inflate() - Inflate the balloon towards its target size.
*
* @b: pointer to the balloon.
@@ -1009,6 +1103,7 @@ static void vmballoon_inflate(struct vmballoon *b)
struct vmballoon_ctl ctl = {
.pages = LIST_HEAD_INIT(ctl.pages),
.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
+ .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
.page_size = b->max_page_size,
.op = VMW_BALLOON_INFLATE
};
@@ -1056,10 +1151,10 @@ static void vmballoon_inflate(struct vmballoon *b)
break;
/*
- * Ignore errors from locking as we now switch to 4k
- * pages and we might get different errors.
+ * Split the refused pages to 4k. This will also empty
+ * the refused pages list.
*/
- vmballoon_release_refused_pages(b, &ctl);
+ vmballoon_split_refused_pages(&ctl);
ctl.page_size--;
}
@@ -1073,6 +1168,8 @@ static void vmballoon_inflate(struct vmballoon *b)
*/
if (ctl.n_refused_pages != 0)
vmballoon_release_refused_pages(b, &ctl);
+
+ vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
}
/**
@@ -1411,6 +1508,90 @@ static void vmballoon_work(struct work_struct *work)
}
+/**
+ * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
+ * @shrinker: pointer to the balloon shrinker.
+ * @sc: page reclaim information.
+ *
+ * Returns: number of pages that were freed during deflation.
+ */
+static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct vmballoon *b = &balloon;
+ unsigned long deflated_frames;
+
+ pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
+
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
+
+ /*
+ * If the lock is also contended for read, we cannot easily reclaim and
+ * we bail out.
+ */
+ if (!down_read_trylock(&b->conf_sem))
+ return 0;
+
+ deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
+
+ vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
+ deflated_frames);
+
+ /*
+ * Delay future inflation for some time to mitigate the situations in
+ * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
+ * the access is asynchronous.
+ */
+ WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
+
+ up_read(&b->conf_sem);
+
+ return deflated_frames;
+}
+
+/**
+ * vmballoon_shrinker_count() - return the number of ballooned pages.
+ * @shrinker: pointer to the balloon shrinker.
+ * @sc: page reclaim information.
+ *
+ * Returns: number of 4k pages that are allocated for the balloon and can
+ * therefore be reclaimed under pressure.
+ */
+static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct vmballoon *b = &balloon;
+
+ return atomic64_read(&b->size);
+}
+
+static void vmballoon_unregister_shrinker(struct vmballoon *b)
+{
+ if (b->shrinker_registered)
+ unregister_shrinker(&b->shrinker);
+ b->shrinker_registered = false;
+}
+
+static int vmballoon_register_shrinker(struct vmballoon *b)
+{
+ int r;
+
+ /* Do nothing if the shrinker is not enabled */
+ if (!vmwballoon_shrinker_enable)
+ return 0;
+
+ b->shrinker.scan_objects = vmballoon_shrinker_scan;
+ b->shrinker.count_objects = vmballoon_shrinker_count;
+ b->shrinker.seeks = DEFAULT_SEEKS;
+
+ r = register_shrinker(&b->shrinker);
+
+ if (r == 0)
+ b->shrinker_registered = true;
+
+ return r;
+}
+
/*
* DEBUGFS Interface
*/
@@ -1428,6 +1609,8 @@ static const char * const vmballoon_stat_names[] = {
[VMW_BALLOON_STAT_TIMER] = "timer",
[VMW_BALLOON_STAT_DOORBELL] = "doorbell",
[VMW_BALLOON_STAT_RESET] = "reset",
+ [VMW_BALLOON_STAT_SHRINK] = "shrink",
+ [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree"
};
static int vmballoon_enable_stats(struct vmballoon *b)
@@ -1552,9 +1735,204 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_BALLOON_COMPACTION
+
+static struct dentry *vmballoon_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data)
+{
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+
+ return mount_pseudo(fs_type, "balloon-vmware:", NULL, &ops,
+ BALLOON_VMW_MAGIC);
+}
+
+static struct file_system_type vmballoon_fs = {
+ .name = "balloon-vmware",
+ .mount = vmballoon_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static struct vfsmount *vmballoon_mnt;
+
+/**
+ * vmballoon_migratepage() - migrates a balloon page.
+ * @b_dev_info: balloon device information descriptor.
+ * @newpage: the page to which @page should be migrated.
+ * @page: a ballooned page that should be migrated.
+ * @mode: migration mode, ignored.
+ *
+ * This function is really open-coded, but that is according to the interface
+ * that balloon_compaction provides.
+ *
+ * Return: zero on success, -EAGAIN when migration cannot be performed
+ * momentarily, and -EBUSY if migration failed and should be retried
+ * with that specific page.
+ */
+static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
+{
+ unsigned long status, flags;
+ struct vmballoon *b;
+ int ret;
+
+ b = container_of(b_dev_info, struct vmballoon, b_dev_info);
+
+ /*
+ * If the semaphore is taken, there is ongoing configuration change
+ * (i.e., balloon reset), so try again.
+ */
+ if (!down_read_trylock(&b->conf_sem))
+ return -EAGAIN;
+
+ spin_lock(&b->comm_lock);
+ /*
+ * We must start by deflating and not inflating, as otherwise the
+ * hypervisor may tell us that it has enough memory and the new page is
+ * not needed. Since the old page is isolated, we cannot use the list
+ * interface to unlock it, as the LRU field is used for isolation.
+ * Instead, we use the native interface directly.
+ */
+ vmballoon_add_page(b, 0, page);
+ status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
+ VMW_BALLOON_DEFLATE);
+
+ if (status == VMW_BALLOON_SUCCESS)
+ status = vmballoon_status_page(b, 0, &page);
+
+ /*
+ * If a failure happened, let the migration mechanism know that it
+ * should not retry.
+ */
+ if (status != VMW_BALLOON_SUCCESS) {
+ spin_unlock(&b->comm_lock);
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ /*
+ * The page is isolated, so it is safe to delete it without holding
+ * @pages_lock . We keep holding @comm_lock since we will need it in a
+ * second.
+ */
+ balloon_page_delete(page);
+
+ put_page(page);
+
+ /* Inflate */
+ vmballoon_add_page(b, 0, newpage);
+ status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
+ VMW_BALLOON_INFLATE);
+
+ if (status == VMW_BALLOON_SUCCESS)
+ status = vmballoon_status_page(b, 0, &newpage);
+
+ spin_unlock(&b->comm_lock);
+
+ if (status != VMW_BALLOON_SUCCESS) {
+ /*
+ * A failure happened. While we can deflate the page we just
+ * inflated, this deflation can also encounter an error. Instead
+ * we will decrease the size of the balloon to reflect the
+ * change and report failure.
+ */
+ atomic64_dec(&b->size);
+ ret = -EBUSY;
+ } else {
+ /*
+ * Success. Take a reference for the page, and we will add it to
+ * the list after acquiring the lock.
+ */
+ get_page(newpage);
+ ret = MIGRATEPAGE_SUCCESS;
+ }
+
+ /* Update the balloon list under the @pages_lock */
+ spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+
+ /*
+ * On inflation success, we already took a reference for the @newpage.
+ * If we succeed just insert it to the list and update the statistics
+ * under the lock.
+ */
+ if (ret == MIGRATEPAGE_SUCCESS) {
+ balloon_page_insert(&b->b_dev_info, newpage);
+ __count_vm_event(BALLOON_MIGRATE);
+ }
+
+ /*
+ * We deflated successfully, so regardless to the inflation success, we
+ * need to reduce the number of isolated_pages.
+ */
+ b->b_dev_info.isolated_pages--;
+ spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
+
+out_unlock:
+ up_read(&b->conf_sem);
+ return ret;
+}
+
+/**
+ * vmballoon_compaction_deinit() - removes compaction related data.
+ *
+ * @b: pointer to the balloon.
+ */
+static void vmballoon_compaction_deinit(struct vmballoon *b)
+{
+ if (!IS_ERR(b->b_dev_info.inode))
+ iput(b->b_dev_info.inode);
+
+ b->b_dev_info.inode = NULL;
+ kern_unmount(vmballoon_mnt);
+ vmballoon_mnt = NULL;
+}
+
+/**
+ * vmballoon_compaction_init() - initialized compaction for the balloon.
+ *
+ * @b: pointer to the balloon.
+ *
+ * If during the initialization a failure occurred, this function does not
+ * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
+ * case.
+ *
+ * Return: zero on success or error code on failure.
+ */
+static __init int vmballoon_compaction_init(struct vmballoon *b)
+{
+ vmballoon_mnt = kern_mount(&vmballoon_fs);
+ if (IS_ERR(vmballoon_mnt))
+ return PTR_ERR(vmballoon_mnt);
+
+ b->b_dev_info.migratepage = vmballoon_migratepage;
+ b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
+
+ if (IS_ERR(b->b_dev_info.inode))
+ return PTR_ERR(b->b_dev_info.inode);
+
+ b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
+ return 0;
+}
+
+#else /* CONFIG_BALLOON_COMPACTION */
+
+static void vmballoon_compaction_deinit(struct vmballoon *b)
+{
+}
+
+static int vmballoon_compaction_init(struct vmballoon *b)
+{
+ return 0;
+}
+
+#endif /* CONFIG_BALLOON_COMPACTION */
+
static int __init vmballoon_init(void)
{
- enum vmballoon_page_size_type page_size;
int error;
/*
@@ -1564,17 +1942,26 @@ static int __init vmballoon_init(void)
if (x86_hyper_type != X86_HYPER_VMWARE)
return -ENODEV;
- for (page_size = VMW_BALLOON_4K_PAGE;
- page_size <= VMW_BALLOON_LAST_SIZE; page_size++)
- INIT_LIST_HEAD(&balloon.page_sizes[page_size].pages);
-
-
INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
+ error = vmballoon_register_shrinker(&balloon);
+ if (error)
+ goto fail;
+
error = vmballoon_debugfs_init(&balloon);
if (error)
- return error;
+ goto fail;
+
+ /*
+ * Initialization of compaction must be done after the call to
+ * balloon_devinfo_init() .
+ */
+ balloon_devinfo_init(&balloon.b_dev_info);
+ error = vmballoon_compaction_init(&balloon);
+ if (error)
+ goto fail;
+ INIT_LIST_HEAD(&balloon.huge_pages);
spin_lock_init(&balloon.comm_lock);
init_rwsem(&balloon.conf_sem);
balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
@@ -1585,6 +1972,10 @@ static int __init vmballoon_init(void)
queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
return 0;
+fail:
+ vmballoon_unregister_shrinker(&balloon);
+ vmballoon_compaction_deinit(&balloon);
+ return error;
}
/*
@@ -1597,6 +1988,7 @@ late_initcall(vmballoon_init);
static void __exit vmballoon_exit(void)
{
+ vmballoon_unregister_shrinker(&balloon);
vmballoon_vmci_cleanup(&balloon);
cancel_delayed_work_sync(&balloon.dwork);
@@ -1609,5 +2001,8 @@ static void __exit vmballoon_exit(void)
*/
vmballoon_send_start(&balloon, 0);
vmballoon_pop(&balloon);
+
+ /* Only once we popped the balloon, compaction can be deinit */
+ vmballoon_compaction_deinit(&balloon);
}
module_exit(vmballoon_exit);
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index 300ed69fe2c7..16695366ec92 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -21,6 +21,9 @@
#include "vmci_driver.h"
#include "vmci_event.h"
+/* Use a wide upper bound for the maximum contexts. */
+#define VMCI_MAX_CONTEXTS 2000
+
/*
* List of current VMCI contexts. Contexts can be added by
* vmci_ctx_create() and removed via vmci_ctx_destroy().
@@ -117,19 +120,22 @@ struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
/* Initialize host-specific VMCI context. */
init_waitqueue_head(&context->host_context.wait_queue);
- context->queue_pair_array = vmci_handle_arr_create(0);
+ context->queue_pair_array =
+ vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT);
if (!context->queue_pair_array) {
error = -ENOMEM;
goto err_free_ctx;
}
- context->doorbell_array = vmci_handle_arr_create(0);
+ context->doorbell_array =
+ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->doorbell_array) {
error = -ENOMEM;
goto err_free_qp_array;
}
- context->pending_doorbell_array = vmci_handle_arr_create(0);
+ context->pending_doorbell_array =
+ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->pending_doorbell_array) {
error = -ENOMEM;
goto err_free_db_array;
@@ -204,7 +210,7 @@ static int ctx_fire_notification(u32 context_id, u32 priv_flags)
* We create an array to hold the subscribers we find when
* scanning through all contexts.
*/
- subscriber_array = vmci_handle_arr_create(0);
+ subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS);
if (subscriber_array == NULL)
return VMCI_ERROR_NO_MEM;
@@ -623,20 +629,26 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
spin_lock(&context->lock);
- list_for_each_entry(n, &context->notifier_list, node) {
- if (vmci_handle_is_equal(n->handle, notifier->handle)) {
- exists = true;
- break;
+ if (context->n_notifiers < VMCI_MAX_CONTEXTS) {
+ list_for_each_entry(n, &context->notifier_list, node) {
+ if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+ exists = true;
+ break;
+ }
}
- }
- if (exists) {
- kfree(notifier);
- result = VMCI_ERROR_ALREADY_EXISTS;
+ if (exists) {
+ kfree(notifier);
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ } else {
+ list_add_tail_rcu(&notifier->node,
+ &context->notifier_list);
+ context->n_notifiers++;
+ result = VMCI_SUCCESS;
+ }
} else {
- list_add_tail_rcu(&notifier->node, &context->notifier_list);
- context->n_notifiers++;
- result = VMCI_SUCCESS;
+ kfree(notifier);
+ result = VMCI_ERROR_NO_MEM;
}
spin_unlock(&context->lock);
@@ -721,8 +733,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
u32 *buf_size, void **pbuf)
{
struct dbell_cpt_state *dbells;
- size_t n_doorbells;
- int i;
+ u32 i, n_doorbells;
n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
if (n_doorbells > 0) {
@@ -860,7 +871,8 @@ int vmci_ctx_rcv_notifications_get(u32 context_id,
spin_lock(&context->lock);
*db_handle_array = context->pending_doorbell_array;
- context->pending_doorbell_array = vmci_handle_arr_create(0);
+ context->pending_doorbell_array =
+ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->pending_doorbell_array) {
context->pending_doorbell_array = *db_handle_array;
*db_handle_array = NULL;
@@ -942,12 +954,11 @@ int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
return VMCI_ERROR_NOT_FOUND;
spin_lock(&context->lock);
- if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
- vmci_handle_arr_append_entry(&context->doorbell_array, handle);
- result = VMCI_SUCCESS;
- } else {
+ if (!vmci_handle_arr_has_entry(context->doorbell_array, handle))
+ result = vmci_handle_arr_append_entry(&context->doorbell_array,
+ handle);
+ else
result = VMCI_ERROR_DUPLICATE_ENTRY;
- }
spin_unlock(&context->lock);
vmci_ctx_put(context);
@@ -1083,15 +1094,16 @@ int vmci_ctx_notify_dbell(u32 src_cid,
if (!vmci_handle_arr_has_entry(
dst_context->pending_doorbell_array,
handle)) {
- vmci_handle_arr_append_entry(
+ result = vmci_handle_arr_append_entry(
&dst_context->pending_doorbell_array,
handle);
-
- ctx_signal_notify(dst_context);
- wake_up(&dst_context->host_context.wait_queue);
-
+ if (result == VMCI_SUCCESS) {
+ ctx_signal_notify(dst_context);
+ wake_up(&dst_context->host_context.wait_queue);
+ }
+ } else {
+ result = VMCI_SUCCESS;
}
- result = VMCI_SUCCESS;
}
spin_unlock(&dst_context->lock);
}
@@ -1118,13 +1130,11 @@ int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
if (context == NULL || vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
- if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
- vmci_handle_arr_append_entry(&context->queue_pair_array,
- handle);
- result = VMCI_SUCCESS;
- } else {
+ if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle))
+ result = vmci_handle_arr_append_entry(
+ &context->queue_pair_array, handle);
+ else
result = VMCI_ERROR_DUPLICATE_ENTRY;
- }
return result;
}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
index c527388f5d7b..de7fee7ead1b 100644
--- a/drivers/misc/vmw_vmci/vmci_handle_array.c
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -8,24 +8,29 @@
#include <linux/slab.h>
#include "vmci_handle_array.h"
-static size_t handle_arr_calc_size(size_t capacity)
+static size_t handle_arr_calc_size(u32 capacity)
{
- return sizeof(struct vmci_handle_arr) +
+ return VMCI_HANDLE_ARRAY_HEADER_SIZE +
capacity * sizeof(struct vmci_handle);
}
-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
+struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
{
struct vmci_handle_arr *array;
+ if (max_capacity == 0 || capacity > max_capacity)
+ return NULL;
+
if (capacity == 0)
- capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
+ capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
+ max_capacity);
array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
if (!array)
return NULL;
array->capacity = capacity;
+ array->max_capacity = max_capacity;
array->size = 0;
return array;
@@ -36,27 +41,34 @@ void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
kfree(array);
}
-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
- struct vmci_handle handle)
+int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle)
{
struct vmci_handle_arr *array = *array_ptr;
if (unlikely(array->size >= array->capacity)) {
/* reallocate. */
struct vmci_handle_arr *new_array;
- size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
- size_t new_size = handle_arr_calc_size(new_capacity);
+ u32 capacity_bump = min(array->max_capacity - array->capacity,
+ array->capacity);
+ size_t new_size = handle_arr_calc_size(array->capacity +
+ capacity_bump);
+
+ if (array->size >= array->max_capacity)
+ return VMCI_ERROR_NO_MEM;
new_array = krealloc(array, new_size, GFP_ATOMIC);
if (!new_array)
- return;
+ return VMCI_ERROR_NO_MEM;
- new_array->capacity = new_capacity;
+ new_array->capacity += capacity_bump;
*array_ptr = array = new_array;
}
array->entries[array->size] = handle;
array->size++;
+
+ return VMCI_SUCCESS;
}
/*
@@ -66,7 +78,7 @@ struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
struct vmci_handle entry_handle)
{
struct vmci_handle handle = VMCI_INVALID_HANDLE;
- size_t i;
+ u32 i;
for (i = 0; i < array->size; i++) {
if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
@@ -101,7 +113,7 @@ struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
* Handle at given index, VMCI_INVALID_HANDLE if invalid index.
*/
struct vmci_handle
-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index)
{
if (unlikely(index >= array->size))
return VMCI_INVALID_HANDLE;
@@ -112,7 +124,7 @@ vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
struct vmci_handle entry_handle)
{
- size_t i;
+ u32 i;
for (i = 0; i < array->size; i++)
if (vmci_handle_is_equal(array->entries[i], entry_handle))
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
index bd1559a548e9..96193f85be5b 100644
--- a/drivers/misc/vmw_vmci/vmci_handle_array.h
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -9,32 +9,41 @@
#define _VMCI_HANDLE_ARRAY_H_
#include <linux/vmw_vmci_defs.h>
+#include <linux/limits.h>
#include <linux/types.h>
-#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
-#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
-
struct vmci_handle_arr {
- size_t capacity;
- size_t size;
+ u32 capacity;
+ u32 max_capacity;
+ u32 size;
+ u32 pad;
struct vmci_handle entries[];
};
-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
+#define VMCI_HANDLE_ARRAY_HEADER_SIZE \
+ offsetof(struct vmci_handle_arr, entries)
+/* Select a default capacity that results in a 64 byte sized array */
+#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6
+/* Make sure that the max array size can be expressed by a u32 */
+#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \
+ ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \
+ sizeof(struct vmci_handle))
+
+struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity);
void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
- struct vmci_handle handle);
+int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle);
struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
struct vmci_handle
entry_handle);
struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
struct vmci_handle
-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index);
bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
struct vmci_handle entry_handle);
struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
-static inline size_t vmci_handle_arr_get_size(
+static inline u32 vmci_handle_arr_get_size(
const struct vmci_handle_arr *array)
{
return array->size;
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
new file mode 100644
index 000000000000..f257d3812110
--- /dev/null
+++ b/drivers/misc/xilinx_sdfec.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx SDFEC
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Description:
+ * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
+ * IP. It exposes a char device which supports file operations
+ * like open(), close() and ioctl().
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#define DEV_NAME_LEN 12
+
+static struct idr dev_idr;
+static struct mutex dev_idr_lock;
+
+/**
+ * struct xsdfec_clks - For managing SD-FEC clocks
+ * @core_clk: Main processing clock for core
+ * @axi_clk: AXI4-Lite memory-mapped clock
+ * @din_words_clk: DIN Words AXI4-Stream Slave clock
+ * @din_clk: DIN AXI4-Stream Slave clock
+ * @dout_clk: DOUT Words AXI4-Stream Slave clock
+ * @dout_words_clk: DOUT AXI4-Stream Slave clock
+ * @ctrl_clk: Control AXI4-Stream Slave clock
+ * @status_clk: Status AXI4-Stream Slave clock
+ */
+struct xsdfec_clks {
+ struct clk *core_clk;
+ struct clk *axi_clk;
+ struct clk *din_words_clk;
+ struct clk *din_clk;
+ struct clk *dout_clk;
+ struct clk *dout_words_clk;
+ struct clk *ctrl_clk;
+ struct clk *status_clk;
+};
+
+/**
+ * struct xsdfec_dev - Driver data for SDFEC
+ * @regs: device physical base address
+ * @dev: pointer to device struct
+ * @miscdev: Misc device handle
+ * @error_data_lock: Error counter and states spinlock
+ * @clks: Clocks managed by the SDFEC driver
+ * @dev_name: Device name
+ * @dev_id: Device ID
+ *
+ * This structure contains necessary state for SDFEC driver to operate
+ */
+struct xsdfec_dev {
+ void __iomem *regs;
+ struct device *dev;
+ struct miscdevice miscdev;
+ /* Spinlock to protect state_updated and stats_updated */
+ spinlock_t error_data_lock;
+ struct xsdfec_clks clks;
+ char dev_name[DEV_NAME_LEN];
+ int dev_id;
+};
+
+static const struct file_operations xsdfec_fops = {
+ .owner = THIS_MODULE,
+};
+
+static int xsdfec_clk_init(struct platform_device *pdev,
+ struct xsdfec_clks *clks)
+{
+ int err;
+
+ clks->core_clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(clks->core_clk)) {
+ dev_err(&pdev->dev, "failed to get core_clk");
+ return PTR_ERR(clks->core_clk);
+ }
+
+ clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(clks->axi_clk)) {
+ dev_err(&pdev->dev, "failed to get axi_clk");
+ return PTR_ERR(clks->axi_clk);
+ }
+
+ clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk");
+ if (IS_ERR(clks->din_words_clk)) {
+ if (PTR_ERR(clks->din_words_clk) != -ENOENT) {
+ err = PTR_ERR(clks->din_words_clk);
+ return err;
+ }
+ clks->din_words_clk = NULL;
+ }
+
+ clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk");
+ if (IS_ERR(clks->din_clk)) {
+ if (PTR_ERR(clks->din_clk) != -ENOENT) {
+ err = PTR_ERR(clks->din_clk);
+ return err;
+ }
+ clks->din_clk = NULL;
+ }
+
+ clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk");
+ if (IS_ERR(clks->dout_clk)) {
+ if (PTR_ERR(clks->dout_clk) != -ENOENT) {
+ err = PTR_ERR(clks->dout_clk);
+ return err;
+ }
+ clks->dout_clk = NULL;
+ }
+
+ clks->dout_words_clk =
+ devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk");
+ if (IS_ERR(clks->dout_words_clk)) {
+ if (PTR_ERR(clks->dout_words_clk) != -ENOENT) {
+ err = PTR_ERR(clks->dout_words_clk);
+ return err;
+ }
+ clks->dout_words_clk = NULL;
+ }
+
+ clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk");
+ if (IS_ERR(clks->ctrl_clk)) {
+ if (PTR_ERR(clks->ctrl_clk) != -ENOENT) {
+ err = PTR_ERR(clks->ctrl_clk);
+ return err;
+ }
+ clks->ctrl_clk = NULL;
+ }
+
+ clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk");
+ if (IS_ERR(clks->status_clk)) {
+ if (PTR_ERR(clks->status_clk) != -ENOENT) {
+ err = PTR_ERR(clks->status_clk);
+ return err;
+ }
+ clks->status_clk = NULL;
+ }
+
+ err = clk_prepare_enable(clks->core_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable core_clk (%d)", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(clks->axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err);
+ goto err_disable_core_clk;
+ }
+
+ err = clk_prepare_enable(clks->din_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable din_clk (%d)", err);
+ goto err_disable_axi_clk;
+ }
+
+ err = clk_prepare_enable(clks->din_words_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err);
+ goto err_disable_din_clk;
+ }
+
+ err = clk_prepare_enable(clks->dout_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err);
+ goto err_disable_din_words_clk;
+ }
+
+ err = clk_prepare_enable(clks->dout_words_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)",
+ err);
+ goto err_disable_dout_clk;
+ }
+
+ err = clk_prepare_enable(clks->ctrl_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err);
+ goto err_disable_dout_words_clk;
+ }
+
+ err = clk_prepare_enable(clks->status_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err);
+ goto err_disable_ctrl_clk;
+ }
+
+ return err;
+
+err_disable_ctrl_clk:
+ clk_disable_unprepare(clks->ctrl_clk);
+err_disable_dout_words_clk:
+ clk_disable_unprepare(clks->dout_words_clk);
+err_disable_dout_clk:
+ clk_disable_unprepare(clks->dout_clk);
+err_disable_din_words_clk:
+ clk_disable_unprepare(clks->din_words_clk);
+err_disable_din_clk:
+ clk_disable_unprepare(clks->din_clk);
+err_disable_axi_clk:
+ clk_disable_unprepare(clks->axi_clk);
+err_disable_core_clk:
+ clk_disable_unprepare(clks->core_clk);
+
+ return err;
+}
+
+static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
+{
+ clk_disable_unprepare(clks->status_clk);
+ clk_disable_unprepare(clks->ctrl_clk);
+ clk_disable_unprepare(clks->dout_words_clk);
+ clk_disable_unprepare(clks->dout_clk);
+ clk_disable_unprepare(clks->din_words_clk);
+ clk_disable_unprepare(clks->din_clk);
+ clk_disable_unprepare(clks->core_clk);
+ clk_disable_unprepare(clks->axi_clk);
+}
+
+static void xsdfec_idr_remove(struct xsdfec_dev *xsdfec)
+{
+ mutex_lock(&dev_idr_lock);
+ idr_remove(&dev_idr, xsdfec->dev_id);
+ mutex_unlock(&dev_idr_lock);
+}
+
+static int xsdfec_probe(struct platform_device *pdev)
+{
+ struct xsdfec_dev *xsdfec;
+ struct device *dev;
+ struct resource *res;
+ int err;
+
+ xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
+ if (!xsdfec)
+ return -ENOMEM;
+
+ xsdfec->dev = &pdev->dev;
+ spin_lock_init(&xsdfec->error_data_lock);
+
+ err = xsdfec_clk_init(pdev, &xsdfec->clks);
+ if (err)
+ return err;
+
+ dev = xsdfec->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xsdfec->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(xsdfec->regs)) {
+ err = PTR_ERR(xsdfec->regs);
+ goto err_xsdfec_dev;
+ }
+
+ /* Save driver private data */
+ platform_set_drvdata(pdev, xsdfec);
+
+ mutex_lock(&dev_idr_lock);
+ err = idr_alloc(&dev_idr, xsdfec->dev_name, 0, 0, GFP_KERNEL);
+ mutex_unlock(&dev_idr_lock);
+ if (err < 0)
+ goto err_xsdfec_dev;
+ xsdfec->dev_id = err;
+
+ snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id);
+ xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR;
+ xsdfec->miscdev.name = xsdfec->dev_name;
+ xsdfec->miscdev.fops = &xsdfec_fops;
+ xsdfec->miscdev.parent = dev;
+ err = misc_register(&xsdfec->miscdev);
+ if (err) {
+ dev_err(dev, "error:%d. Unable to register device", err);
+ goto err_xsdfec_idr;
+ }
+ return 0;
+
+err_xsdfec_idr:
+ xsdfec_idr_remove(xsdfec);
+err_xsdfec_dev:
+ xsdfec_disable_all_clks(&xsdfec->clks);
+ return err;
+}
+
+static int xsdfec_remove(struct platform_device *pdev)
+{
+ struct xsdfec_dev *xsdfec;
+
+ xsdfec = platform_get_drvdata(pdev);
+ misc_deregister(&xsdfec->miscdev);
+ xsdfec_idr_remove(xsdfec);
+ xsdfec_disable_all_clks(&xsdfec->clks);
+ return 0;
+}
+
+static const struct of_device_id xsdfec_of_match[] = {
+ {
+ .compatible = "xlnx,sd-fec-1.1",
+ },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xsdfec_of_match);
+
+static struct platform_driver xsdfec_driver = {
+ .driver = {
+ .name = "xilinx-sdfec",
+ .of_match_table = xsdfec_of_match,
+ },
+ .probe = xsdfec_probe,
+ .remove = xsdfec_remove,
+};
+
+static int __init xsdfec_init(void)
+{
+ int err;
+
+ mutex_init(&dev_idr_lock);
+ idr_init(&dev_idr);
+ err = platform_driver_register(&xsdfec_driver);
+ if (err < 0) {
+ pr_err("%s Unabled to register SDFEC driver", __func__);
+ return err;
+ }
+ return 0;
+}
+
+static void __exit xsdfec_exit(void)
+{
+ platform_driver_unregister(&xsdfec_driver);
+ idr_destroy(&dev_idr);
+}
+
+module_init(xsdfec_init);
+module_exit(xsdfec_exit);
+
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 7659d6c5f718..e5c571fd232c 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -46,14 +46,14 @@ config MUX_GPIO
be called mux-gpio.
config MUX_MMIO
- tristate "MMIO register bitfield-controlled Multiplexer"
- depends on (OF && MFD_SYSCON) || COMPILE_TEST
+ tristate "MMIO/Regmap register bitfield-controlled Multiplexer"
+ depends on OF || COMPILE_TEST
help
- MMIO register bitfield-controlled Multiplexer controller.
+ MMIO/Regmap register bitfield-controlled Multiplexer controller.
- The driver builds multiplexer controllers for bitfields in a syscon
- register. For N bit wide bitfields, there will be 2^N possible
- multiplexer states.
+ The driver builds multiplexer controllers for bitfields in either
+ a syscon register or a driver regmap register. For N bit wide
+ bitfields, there will be 2^N possible multiplexer states.
To compile the driver as a module, choose M here: the module will
be called mux-mmio.
diff --git a/drivers/mux/mmio.c b/drivers/mux/mmio.c
index 935ac44aa209..44a7a0e885b8 100644
--- a/drivers/mux/mmio.c
+++ b/drivers/mux/mmio.c
@@ -28,6 +28,7 @@ static const struct mux_control_ops mux_mmio_ops = {
static const struct of_device_id mux_mmio_dt_ids[] = {
{ .compatible = "mmio-mux", },
+ { .compatible = "reg-mux", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids);
@@ -43,7 +44,10 @@ static int mux_mmio_probe(struct platform_device *pdev)
int ret;
int i;
- regmap = syscon_node_to_regmap(np->parent);
+ if (of_device_is_compatible(np, "mmio-mux"))
+ regmap = syscon_node_to_regmap(np->parent);
+ else
+ regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(dev, "failed to get regmap: %d\n", ret);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index afa4335e0a20..c2ec750cae6e 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -47,6 +47,13 @@ config NVMEM_IMX_OCOTP
This driver can also be built as a module. If so, the module
will be called nvmem-imx-ocotp.
+config NVMEM_IMX_OCOTP_SCU
+ tristate "i.MX8 SCU On-Chip OTP Controller support"
+ depends on IMX_SCU
+ help
+ This is a driver for the SCU On-Chip OTP Controller (OCOTP)
+ available on i.MX8 SoCs.
+
config NVMEM_LPC18XX_EEPROM
tristate "NXP LPC18XX EEPROM Memory Support"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -188,7 +195,7 @@ config MESON_MX_EFUSE
config NVMEM_SNVS_LPGPR
tristate "Support for Low Power General Purpose Register"
- depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST
+ depends on ARCH_MXC || COMPILE_TEST
help
This is a driver for Low Power General Purpose Register (LPGPR) available on
i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index c1fe4768dfef..e5c153d99a67 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o
nvmem-imx-iim-y := imx-iim.o
obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
nvmem-imx-ocotp-y := imx-ocotp.o
+obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o
+nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
new file mode 100644
index 000000000000..d9dc482ecb2f
--- /dev/null
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX8 OCOTP fusebox driver
+ *
+ * Copyright 2019 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/firmware/imx/sci.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+enum ocotp_devtype {
+ IMX8QXP,
+};
+
+struct ocotp_devtype_data {
+ int devtype;
+ int nregs;
+};
+
+struct ocotp_priv {
+ struct device *dev;
+ const struct ocotp_devtype_data *data;
+ struct imx_sc_ipc *nvmem_ipc;
+};
+
+struct imx_sc_msg_misc_fuse_read {
+ struct imx_sc_rpc_msg hdr;
+ u32 word;
+} __packed;
+
+static struct ocotp_devtype_data imx8qxp_data = {
+ .devtype = IMX8QXP,
+ .nregs = 800,
+};
+
+static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
+ u32 *val)
+{
+ struct imx_sc_msg_misc_fuse_read msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_OTP_FUSE_READ;
+ hdr->size = 2;
+
+ msg.word = word;
+
+ ret = imx_scu_call_rpc(ipc, &msg, true);
+ if (ret)
+ return ret;
+
+ *val = msg.word;
+
+ return 0;
+}
+
+static int imx_scu_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ u32 count, index, num_bytes;
+ u32 *buf;
+ void *p;
+ int i, ret;
+
+ index = offset >> 2;
+ num_bytes = round_up((offset % 4) + bytes, 4);
+ count = num_bytes >> 2;
+
+ if (count > (priv->data->nregs - index))
+ count = priv->data->nregs - index;
+
+ p = kzalloc(num_bytes, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ buf = p;
+
+ for (i = index; i < (index + count); i++) {
+ if (priv->data->devtype == IMX8QXP) {
+ if ((i > 271) && (i < 544)) {
+ *buf++ = 0;
+ continue;
+ }
+ }
+
+ ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf);
+ if (ret) {
+ kfree(p);
+ return ret;
+ }
+ buf++;
+ }
+
+ memcpy(val, (u8 *)p + offset % 4, bytes);
+
+ kfree(p);
+
+ return 0;
+}
+
+static struct nvmem_config imx_scu_ocotp_nvmem_config = {
+ .name = "imx-scu-ocotp",
+ .read_only = true,
+ .word_size = 4,
+ .stride = 1,
+ .owner = THIS_MODULE,
+ .reg_read = imx_scu_ocotp_read,
+};
+
+static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids);
+
+static int imx_scu_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocotp_priv *priv;
+ struct nvmem_device *nvmem;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = imx_scu_get_handle(&priv->nvmem_ipc);
+ if (ret)
+ return ret;
+
+ priv->data = of_device_get_match_data(dev);
+ priv->dev = dev;
+ imx_scu_ocotp_nvmem_config.size = 4 * priv->data->nregs;
+ imx_scu_ocotp_nvmem_config.dev = dev;
+ imx_scu_ocotp_nvmem_config.priv = priv;
+ nvmem = devm_nvmem_register(dev, &imx_scu_ocotp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver imx_scu_ocotp_driver = {
+ .probe = imx_scu_ocotp_probe,
+ .driver = {
+ .name = "imx_scu_ocotp",
+ .of_match_table = imx_scu_ocotp_dt_ids,
+ },
+};
+module_platform_driver(imx_scu_ocotp_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("i.MX8 SCU OCOTP fuse box driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index bd016b928589..42d4451e7d67 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -39,12 +39,14 @@
#define IMX_OCOTP_ADDR_DATA2 0x0040
#define IMX_OCOTP_ADDR_DATA3 0x0050
-#define IMX_OCOTP_BM_CTRL_ADDR 0x0000007F
+#define IMX_OCOTP_BM_CTRL_ADDR 0x000000FF
#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
-#define DEF_RELAX 20 /* > 16.5ns */
+#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */
+#define TIMING_STROBE_READ_NS 37 /* Min time before read */
+#define TIMING_RELAX_NS 17
#define DEF_FSOURCE 1001 /* > 1000 ns */
#define DEF_STROBE_PROG 10000 /* IPG clocks */
#define IMX_OCOTP_WR_UNLOCK 0x3E770000
@@ -176,14 +178,41 @@ static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
* fields with timing values to match the current frequency of the
* ipg_clk. OTP writes will work at maximum bus frequencies as long
* as the HW_OCOTP_TIMING parameters are set correctly.
+ *
+ * Note: there are minimum timings required to ensure an OTP fuse burns
+ * correctly that are independent of the ipg_clk. Those values are not
+ * formally documented anywhere however, working from the minimum
+ * timings given in u-boot we can say:
+ *
+ * - Minimum STROBE_PROG time is 10 microseconds. Intuitively 10
+ * microseconds feels about right as representative of a minimum time
+ * to physically burn out a fuse.
+ *
+ * - Minimum STROBE_READ i.e. the time to wait post OTP fuse burn before
+ * performing another read is 37 nanoseconds
+ *
+ * - Minimum RELAX timing is 17 nanoseconds. This final RELAX minimum
+ * timing is not entirely clear the documentation says "This
+ * count value specifies the time to add to all default timing
+ * parameters other than the Tpgm and Trd. It is given in number
+ * of ipg_clk periods." where Tpgm and Trd refer to STROBE_PROG
+ * and STROBE_READ respectively. What the other timing parameters
+ * are though, is not specified. Experience shows a zero RELAX
+ * value will mess up a re-load of the shadow registers post OTP
+ * burn.
*/
clk_rate = clk_get_rate(priv->clk);
- relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
- strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
- strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
+ relax = DIV_ROUND_UP(clk_rate * TIMING_RELAX_NS, 1000000000) - 1;
+ strobe_read = DIV_ROUND_UP(clk_rate * TIMING_STROBE_READ_NS,
+ 1000000000);
+ strobe_read += 2 * (relax + 1) - 1;
+ strobe_prog = DIV_ROUND_CLOSEST(clk_rate * TIMING_STROBE_PROG_US,
+ 1000000);
+ strobe_prog += 2 * (relax + 1) - 1;
- timing = strobe_prog & 0x00000FFF;
+ timing = readl(priv->base + IMX_OCOTP_ADDR_TIMING) & 0x0FC00000;
+ timing |= strobe_prog & 0x00000FFF;
timing |= (relax << 12) & 0x0000F000;
timing |= (strobe_read << 16) & 0x003F0000;
@@ -440,8 +469,14 @@ static const struct ocotp_params imx7ulp_params = {
static const struct ocotp_params imx8mq_params = {
.nregs = 256,
- .bank_address_words = 4,
- .set_timing = imx_ocotp_set_imx7_timing,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx8mm_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
@@ -454,6 +489,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
{ .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
{ .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
+ { .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b7e5cee2aa26..6053d0158b3b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -341,7 +341,7 @@ config HP_ACCEL
Support for a led indicating disk protection will be provided as
hp::hddprotect. For more information on the feature, refer to
- Documentation/misc-devices/lis3lv02d.
+ Documentation/misc-devices/lis3lv02d.rst.
To compile this driver as a module, choose M here: the module will
be called hp_accel.
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index b2f07d2043eb..526e3215d8fe 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -98,11 +98,6 @@ static int slim_device_remove(struct device *dev)
static int slim_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct slim_device *sbdev = to_slim_device(dev);
- int ret;
-
- ret = of_device_uevent_modalias(dev, env);
- if (ret != -ENODEV)
- return ret;
return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev));
}
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index ad3e2e23f56e..a444badd8df5 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -528,10 +528,8 @@ static int qcom_slim_probe(struct platform_device *pdev)
slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem);
- if (IS_ERR(ctrl->base)) {
- dev_err(&pdev->dev, "IOremap failed\n");
+ if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
- }
sctrl->set_laddr = qcom_set_laddr;
sctrl->xfer_msg = qcom_xfer_msg;
diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c
index 2fa05324ed07..75f87b3d8b95 100644
--- a/drivers/slimbus/stream.c
+++ b/drivers/slimbus/stream.c
@@ -84,7 +84,7 @@ static const int slim_presence_rate_table[] = {
512000,
};
-/*
+/**
* slim_stream_allocate() - Allocate a new SLIMbus Stream
* @dev:Slim device to be associated with
* @name: name of the stream
@@ -189,7 +189,7 @@ static int slim_get_prate_code(int rate)
return -EINVAL;
}
-/*
+/**
* slim_stream_prepare() - Prepare a SLIMbus Stream
*
* @rt: instance of slim stream runtime to configure
@@ -336,7 +336,7 @@ static int slim_activate_channel(struct slim_stream_runtime *stream,
return slim_do_transfer(sdev->ctrl, &txn);
}
-/*
+/**
* slim_stream_enable() - Enable a prepared SLIMbus Stream
*
* @stream: instance of slim stream runtime to enable
@@ -389,7 +389,7 @@ int slim_stream_enable(struct slim_stream_runtime *stream)
}
EXPORT_SYMBOL_GPL(slim_stream_enable);
-/*
+/**
* slim_stream_disable() - Disable a SLIMbus Stream
*
* @stream: instance of slim stream runtime to disable
@@ -423,7 +423,7 @@ int slim_stream_disable(struct slim_stream_runtime *stream)
}
EXPORT_SYMBOL_GPL(slim_stream_disable);
-/*
+/**
* slim_stream_unprepare() - Un-prepare a SLIMbus Stream
*
* @stream: instance of slim stream runtime to unprepare
@@ -449,7 +449,7 @@ int slim_stream_unprepare(struct slim_stream_runtime *stream)
}
EXPORT_SYMBOL_GPL(slim_stream_unprepare);
-/*
+/**
* slim_stream_free() - Free a SLIMbus Stream
*
* @stream: instance of slim stream runtime to free
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index aac35fc3cf22..fe745830a261 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -87,7 +87,7 @@ int sdw_add_bus_master(struct sdw_bus *bus)
/*
* Initialize clock values based on Master properties. The max
- * frequency is read from max_freq property. Current assumption
+ * frequency is read from max_clk_freq property. Current assumption
* is that the bus will start at highest clock frequency when
* powered on.
*
@@ -95,7 +95,7 @@ int sdw_add_bus_master(struct sdw_bus *bus)
* to start with bank 0 (Table 40 of Spec)
*/
prop = &bus->prop;
- bus->params.max_dr_freq = prop->max_freq * SDW_DOUBLE_RATE_FACTOR;
+ bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
bus->params.curr_dr_freq = bus->params.max_dr_freq;
bus->params.curr_bank = SDW_BANK0;
bus->params.next_bank = SDW_BANK1;
@@ -648,7 +648,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
return 0;
/* Enable DP0 interrupts */
- val = prop->dp0_prop->device_interrupts;
+ val = prop->dp0_prop->imp_def_interrupts;
val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 682789bb8ab3..ff4badc9b3de 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/soundwire/sdw_registers.h>
@@ -236,19 +237,19 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
for (i = 0; i < count; i++) {
if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
no_ack = 1;
- dev_dbg(cdns->dev, "Msg Ack not received\n");
+ dev_dbg_ratelimited(cdns->dev, "Msg Ack not received\n");
if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
nack = 1;
- dev_err(cdns->dev, "Msg NACK received\n");
+ dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
}
}
}
if (nack) {
- dev_err(cdns->dev, "Msg NACKed for Slave %d\n", msg->dev_num);
+ dev_err_ratelimited(cdns->dev, "Msg NACKed for Slave %d\n", msg->dev_num);
return SDW_CMD_FAIL;
} else if (no_ack) {
- dev_dbg(cdns->dev, "Msg ignored for Slave %d\n", msg->dev_num);
+ dev_dbg_ratelimited(cdns->dev, "Msg ignored for Slave %d\n", msg->dev_num);
return SDW_CMD_IGNORED;
}
@@ -356,12 +357,12 @@ cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
/* For NACK, NO ack, don't return err if we are in Broadcast mode */
if (nack) {
- dev_err(cdns->dev,
- "SCP_addrpage NACKed for Slave %d\n", msg->dev_num);
+ dev_err_ratelimited(cdns->dev,
+ "SCP_addrpage NACKed for Slave %d\n", msg->dev_num);
return SDW_CMD_FAIL;
} else if (no_ack) {
- dev_dbg(cdns->dev,
- "SCP_addrpage ignored for Slave %d\n", msg->dev_num);
+ dev_dbg_ratelimited(cdns->dev,
+ "SCP_addrpage ignored for Slave %d\n", msg->dev_num);
return SDW_CMD_IGNORED;
}
@@ -486,7 +487,8 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
{
enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
bool is_slave = false;
- u64 slave, mask;
+ u64 slave;
+ u32 mask;
int i, set_status;
/* combine the two status */
@@ -524,9 +526,9 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
/* first check if Slave reported multiple status */
if (set_status > 1) {
- dev_warn(cdns->dev,
- "Slave reported multiple Status: %d\n",
- status[i]);
+ dev_warn_ratelimited(cdns->dev,
+ "Slave reported multiple Status: %d\n",
+ mask);
/*
* TODO: we need to reread the status here by
* issuing a PING cmd
@@ -612,7 +614,7 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
struct sdw_cdns *cdns = dev_id;
u32 slave0, slave1;
- dev_dbg(cdns->dev, "Slave status change\n");
+ dev_dbg_ratelimited(cdns->dev, "Slave status change\n");
slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
@@ -716,6 +718,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
stream = &cdns->pcm;
/* First two PDIs are reserved for bulk transfers */
+ if (stream->num_bd < CDNS_PCM_PDI_OFFSET)
+ return -EINVAL;
stream->num_bd -= CDNS_PCM_PDI_OFFSET;
offset = CDNS_PCM_PDI_OFFSET;
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 60293a00a14e..317873bc0555 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -263,6 +263,9 @@ static void intel_pdi_init(struct sdw_intel *sdw,
config->pcm_out = (pcm_cap & SDW_SHIM_PCMSCAP_OSS) >>
SDW_REG_SHIFT(SDW_SHIM_PCMSCAP_OSS);
+ dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
+ config->pcm_bd, config->pcm_in, config->pcm_out);
+
/* PDM Stream Capability */
pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
@@ -272,6 +275,9 @@ static void intel_pdi_init(struct sdw_intel *sdw,
SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_ISS);
config->pdm_out = (pdm_cap & SDW_SHIM_PDMSCAP_OSS) >>
SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_OSS);
+
+ dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n",
+ config->pdm_bd, config->pdm_in, config->pdm_out);
}
static int
@@ -796,13 +802,14 @@ static int intel_prop_read(struct sdw_bus *bus)
sdw_master_read_prop(bus);
/* BIOS is not giving some values correctly. So, lets override them */
- bus->prop.num_freq = 1;
- bus->prop.freq = devm_kcalloc(bus->dev, bus->prop.num_freq,
- sizeof(*bus->prop.freq), GFP_KERNEL);
- if (!bus->prop.freq)
+ bus->prop.num_clk_freq = 1;
+ bus->prop.clk_freq = devm_kcalloc(bus->dev, bus->prop.num_clk_freq,
+ sizeof(*bus->prop.clk_freq),
+ GFP_KERNEL);
+ if (!bus->prop.clk_freq)
return -ENOMEM;
- bus->prop.freq[0] = bus->prop.max_freq;
+ bus->prop.clk_freq[0] = bus->prop.max_clk_freq;
bus->prop.err_threshold = 5;
return 0;
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index 71050e5f643d..d923b6262330 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -5,7 +5,7 @@
#define __SDW_INTEL_LOCAL_H
/**
- * struct sdw_intel_res - Soundwire link resources
+ * struct sdw_intel_link_res - Soundwire link resources
* @registers: Link IO registers base
* @shim: Audio shim pointer
* @alh: ALH (Audio Link Hub) pointer
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index d3d6b54c5791..70637a0383d2 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -14,6 +14,7 @@
#include <linux/soundwire/sdw_intel.h>
#include "intel.h"
+#define SDW_LINK_TYPE 4 /* from Intel ACPI documentation */
#define SDW_MAX_LINKS 4
#define SDW_SHIM_LCAP 0x0
#define SDW_SHIM_BASE 0x2C000
@@ -80,6 +81,7 @@ static struct sdw_intel_ctx
/* Check SNDWLCAP.LCOUNT */
caps = ioread32(res->mmio_base + SDW_SHIM_BASE + SDW_SHIM_LCAP);
+ caps &= GENMASK(2, 0);
/* Check HW supported vs property value and use min of two */
count = min_t(u8, caps, count);
@@ -89,6 +91,9 @@ static struct sdw_intel_ctx
dev_err(&adev->dev, "Link count %d exceeds max %d\n",
count, SDW_MAX_LINKS);
return NULL;
+ } else if (!count) {
+ dev_warn(&adev->dev, "No SoundWire links detected\n");
+ return NULL;
}
dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);
@@ -150,6 +155,12 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
{
struct sdw_intel_res *res = cdata;
struct acpi_device *adev;
+ acpi_status status;
+ u64 adr;
+
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* keep going */
if (acpi_bus_get_device(handle, &adev)) {
pr_err("%s: Couldn't find ACPI handle\n", __func__);
@@ -157,7 +168,19 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
}
res->handle = handle;
- return AE_OK;
+
+ /*
+ * On some Intel platforms, multiple children of the HDAS
+ * device can be found, but only one of them is the SoundWire
+ * controller. The SNDW device is always exposed with
+ * Name(_ADR, 0x40000000), with bits 31..28 representing the
+ * SoundWire link so filter accordingly
+ */
+ if ((adr & GENMASK(31, 28)) >> 28 != SDW_LINK_TYPE)
+ return AE_OK; /* keep going */
+
+ /* device found, stop namespace walk */
+ return AE_CTRL_TERMINATE;
}
/**
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index c1f51d6a23d2..79fee1b21ab6 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -40,7 +40,7 @@ int sdw_master_read_prop(struct sdw_bus *bus)
/* Find master handle */
snprintf(name, sizeof(name),
- "mipi-sdw-master-%d-subproperties", bus->link_id);
+ "mipi-sdw-link-%d-subproperties", bus->link_id);
link = device_get_named_child_node(bus->dev, name);
if (!link) {
@@ -50,39 +50,40 @@ int sdw_master_read_prop(struct sdw_bus *bus)
if (fwnode_property_read_bool(link,
"mipi-sdw-clock-stop-mode0-supported"))
- prop->clk_stop_mode = SDW_CLK_STOP_MODE0;
+ prop->clk_stop_modes |= BIT(SDW_CLK_STOP_MODE0);
if (fwnode_property_read_bool(link,
"mipi-sdw-clock-stop-mode1-supported"))
- prop->clk_stop_mode |= SDW_CLK_STOP_MODE1;
+ prop->clk_stop_modes |= BIT(SDW_CLK_STOP_MODE1);
fwnode_property_read_u32(link,
"mipi-sdw-max-clock-frequency",
- &prop->max_freq);
+ &prop->max_clk_freq);
nval = fwnode_property_read_u32_array(link,
"mipi-sdw-clock-frequencies-supported", NULL, 0);
if (nval > 0) {
- prop->num_freq = nval;
- prop->freq = devm_kcalloc(bus->dev, prop->num_freq,
- sizeof(*prop->freq), GFP_KERNEL);
- if (!prop->freq)
+ prop->num_clk_freq = nval;
+ prop->clk_freq = devm_kcalloc(bus->dev, prop->num_clk_freq,
+ sizeof(*prop->clk_freq),
+ GFP_KERNEL);
+ if (!prop->clk_freq)
return -ENOMEM;
fwnode_property_read_u32_array(link,
"mipi-sdw-clock-frequencies-supported",
- prop->freq, prop->num_freq);
+ prop->clk_freq, prop->num_clk_freq);
}
/*
* Check the frequencies supported. If FW doesn't provide max
* freq, then populate here by checking values.
*/
- if (!prop->max_freq && prop->freq) {
- prop->max_freq = prop->freq[0];
- for (i = 1; i < prop->num_freq; i++) {
- if (prop->freq[i] > prop->max_freq)
- prop->max_freq = prop->freq[i];
+ if (!prop->max_clk_freq && prop->clk_freq) {
+ prop->max_clk_freq = prop->clk_freq[0];
+ for (i = 1; i < prop->num_clk_freq; i++) {
+ if (prop->clk_freq[i] > prop->max_clk_freq)
+ prop->max_clk_freq = prop->clk_freq[i];
}
}
@@ -149,13 +150,13 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave,
dp0->words, dp0->num_words);
}
- dp0->flow_controlled = fwnode_property_read_bool(port,
+ dp0->BRA_flow_controlled = fwnode_property_read_bool(port,
"mipi-sdw-bra-flow-controlled");
dp0->simple_ch_prep_sm = fwnode_property_read_bool(port,
"mipi-sdw-simplified-channel-prepare-sm");
- dp0->device_interrupts = fwnode_property_read_bool(port,
+ dp0->imp_def_interrupts = fwnode_property_read_bool(port,
"mipi-sdw-imp-def-dp0-interrupts-supported");
return 0;
@@ -224,7 +225,7 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
fwnode_property_read_u32(node,
"mipi-sdw-imp-def-dpn-interrupts-supported",
- &dpn[i].device_interrupts);
+ &dpn[i].imp_def_interrupts);
fwnode_property_read_u32(node, "mipi-sdw-min-channel-number",
&dpn[i].min_ch);
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 1d5294b8783b..a0476755a459 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -439,7 +439,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
prep_ch.bank = bus->params.next_bank;
- if (dpn_prop->device_interrupts || !dpn_prop->simple_ch_prep_sm)
+ if (dpn_prop->imp_def_interrupts || !dpn_prop->simple_ch_prep_sm)
intr = true;
/*
@@ -449,7 +449,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
*/
if (prep && intr) {
ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep,
- dpn_prop->device_interrupts);
+ dpn_prop->imp_def_interrupts);
if (ret < 0)
return ret;
}
@@ -493,7 +493,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
/* Disable interrupt after Port de-prepare */
if (!prep && intr)
ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep,
- dpn_prop->device_interrupts);
+ dpn_prop->imp_def_interrupts);
return ret;
}
@@ -1473,7 +1473,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
memcpy(&params, &bus->params, sizeof(params));
/* TODO: Support Asynchronous mode */
- if ((prop->max_freq % stream->params.rate) != 0) {
+ if ((prop->max_clk_freq % stream->params.rate) != 0) {
dev_err(bus->dev, "Async mode not supported\n");
return -EINVAL;
}
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index 5ae74d5545e6..f1fb18afbcea 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -22,12 +22,17 @@
#define W1_F3A_FUNC_PIO_ACCESS_READ 0xF5
#define W1_F3A_FUNC_PIO_ACCESS_WRITE 0x5A
#define W1_F3A_SUCCESS_CONFIRM_BYTE 0xAA
+#define W1_F3A_INVALID_PIO_STATE 0xFF
static ssize_t state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ unsigned int retries = W1_F3A_RETRIES;
+ ssize_t bytes_read = -EIO;
+ u8 state;
+
dev_dbg(&sl->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
@@ -40,22 +45,37 @@ static ssize_t state_read(struct file *filp, struct kobject *kobj,
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
- if (w1_reset_select_slave(sl)) {
- mutex_unlock(&sl->master->bus_mutex);
- return -EIO;
- }
+next:
+ if (w1_reset_select_slave(sl))
+ goto out;
+
+ while (retries--) {
+ w1_write_8(sl->master, W1_F3A_FUNC_PIO_ACCESS_READ);
+
+ state = w1_read_8(sl->master);
+ if ((state & 0x0F) == ((~state >> 4) & 0x0F)) {
+ /* complement is correct */
+ *buf = state;
+ bytes_read = 1;
+ goto out;
+ } else if (state == W1_F3A_INVALID_PIO_STATE) {
+ /* slave didn't respond, try to select it again */
+ dev_warn(&sl->dev, "slave device did not respond to PIO_ACCESS_READ, " \
+ "reselecting, retries left: %d\n", retries);
+ goto next;
+ }
- w1_write_8(sl->master, W1_F3A_FUNC_PIO_ACCESS_READ);
- *buf = w1_read_8(sl->master);
+ if (w1_reset_resume_command(sl->master))
+ goto out; /* unrecoverable error */
- mutex_unlock(&sl->master->bus_mutex);
- dev_dbg(&sl->dev, "mutex unlocked");
+ dev_warn(&sl->dev, "PIO_ACCESS_READ error, retries left: %d\n", retries);
+ }
- /* check for correct complement */
- if ((*buf & 0x0F) != ((~*buf >> 4) & 0x0F))
- return -EIO;
- else
- return 1;
+out:
+ mutex_unlock(&sl->master->bus_mutex);
+ dev_dbg(&sl->dev, "%s, mutex unlocked, retries: %d\n",
+ (bytes_read > 0) ? "succeeded" : "error", retries);
+ return bytes_read;
}
static BIN_ATTR_RO(state, 1);
@@ -67,6 +87,7 @@ static ssize_t output_write(struct file *filp, struct kobject *kobj,
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[3];
unsigned int retries = W1_F3A_RETRIES;
+ ssize_t bytes_written = -EIO;
if (count != 1 || off != 0)
return -EFAULT;
@@ -76,7 +97,7 @@ static ssize_t output_write(struct file *filp, struct kobject *kobj,
dev_dbg(&sl->dev, "mutex locked");
if (w1_reset_select_slave(sl))
- goto error;
+ goto out;
/* according to the DS2413 datasheet the most significant 6 bits
should be set to "1"s, so do it now */
@@ -89,18 +110,20 @@ static ssize_t output_write(struct file *filp, struct kobject *kobj,
w1_write_block(sl->master, w1_buf, 3);
if (w1_read_8(sl->master) == W1_F3A_SUCCESS_CONFIRM_BYTE) {
- mutex_unlock(&sl->master->bus_mutex);
- dev_dbg(&sl->dev, "mutex unlocked, retries:%d", retries);
- return 1;
+ bytes_written = 1;
+ goto out;
}
if (w1_reset_resume_command(sl->master))
- goto error;
+ goto out; /* unrecoverable error */
+
+ dev_warn(&sl->dev, "PIO_ACCESS_WRITE error, retries left: %d\n", retries);
}
-error:
+out:
mutex_unlock(&sl->master->bus_mutex);
- dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
- return -EIO;
+ dev_dbg(&sl->dev, "%s, mutex unlocked, retries: %d\n",
+ (bytes_written > 0) ? "succeeded" : "error", retries);
+ return bytes_written;
}
static BIN_ATTR(output, S_IRUGO | S_IWUSR | S_IWGRP, NULL, output_write, 1);
diff --git a/drivers/w1/slaves/w1_ds2805.c b/drivers/w1/slaves/w1_ds2805.c
index ee1ec9867a78..ccb753a474b1 100644
--- a/drivers/w1/slaves/w1_ds2805.c
+++ b/drivers/w1/slaves/w1_ds2805.c
@@ -286,7 +286,7 @@ static struct w1_family_ops w1_f0d_fops = {
.remove_slave = w1_f0d_remove_slave,
};
-static struct w1_family w1_family_2d = {
+static struct w1_family w1_family_0d = {
.fid = W1_EEPROM_DS2805,
.fops = &w1_f0d_fops,
};
@@ -294,13 +294,13 @@ static struct w1_family w1_family_2d = {
static int __init w1_f0d_init(void)
{
pr_info("%s()\n", __func__);
- return w1_register_family(&w1_family_2d);
+ return w1_register_family(&w1_family_0d);
}
static void __exit w1_f0d_fini(void)
{
pr_info("%s()\n", __func__);
- w1_unregister_family(&w1_family_2d);
+ w1_unregister_family(&w1_family_0d);
}
module_init(w1_f0d_init);