From e02fb7264d8a31dddb9a80fbde603feb502d6478 Mon Sep 17 00:00:00 2001 From: stuart hayes Date: Thu, 26 May 2016 11:38:41 -0500 Subject: nfit: add Microsoft NVDIMM DSM command set to white list Add the Microsoft _DSM command set to the white list of NVDIMM command sets. This command set is documented at: https://msdn.microsoft.com/library/windows/hardware/mt604741 Cc: Pavel Machek [pavel: fix up braces] Signed-off-by: Stuart Hayes Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 11 +++++++---- drivers/acpi/nfit.h | 4 ++++ 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 2215fc847fa9..da14c89f4667 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1130,11 +1130,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, } /* - * Until standardization materializes we need to consider up to 3 + * Until standardization materializes we need to consider 4 * different command sets. Note, that checking for function0 (bit0) * tells us if any commands are reachable through this uuid. */ - for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++) + for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++) if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) break; @@ -1144,12 +1144,14 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dsm_mask = 0x3fe; if (disable_vendor_specific) dsm_mask &= ~(1 << ND_CMD_VENDOR); - } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) + } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { dsm_mask = 0x1c3c76; - else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { + } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { dsm_mask = 0x1fe; if (disable_vendor_specific) dsm_mask &= ~(1 << 8); + } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { + dsm_mask = 0xffffffff; } else { dev_err(dev, "unknown dimm command family\n"); nfit_mem->family = -1; @@ -2692,6 +2694,7 @@ static __init int nfit_init(void) acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); + acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); nfit_wq = create_singlethread_workqueue("nfit"); if (!nfit_wq) diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 11cb38348aef..f06fa91c5abf 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -31,6 +31,9 @@ #define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" #define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e" +/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ +#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" + #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ | ACPI_NFIT_MEM_NOT_ARMED) @@ -40,6 +43,7 @@ enum nfit_uuids { NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL, NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, + NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, NFIT_SPA_VOLATILE, NFIT_SPA_PM, NFIT_SPA_DCR, -- cgit v1.2.3 From d932dd2ccda0d890d318428466794540e081b9c0 Mon Sep 17 00:00:00 2001 From: "Sajjan, Vikas C" Date: Mon, 4 Jul 2016 10:02:51 +0530 Subject: nfit: use devm_add_action_or_reset() If devm_add_action() fails, we are explicitly calling the cleanup to free the resources allocated. Lets use the helper devm_add_action_or_reset() and return directly in case of error, since the cleanup function has been already called by the helper if there was any error. Signed-off-by: Vikas C Sajjan Reviewed-by: Johannes Thumshirn Reviewed-by: Lee, Chun-Yi Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index da14c89f4667..3e54157f02cc 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1920,11 +1920,11 @@ static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, if (ret) return ret; - ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res); - if (ret) { - remove_resource(res); + ret = devm_add_action_or_reset(acpi_desc->dev, + acpi_nfit_remove_resource, + res); + if (ret) return ret; - } return 0; } -- cgit v1.2.3 From ad9ac5e1957531a826ff6d71dc105f01fa665d2e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 26 May 2016 11:38:08 -0700 Subject: nfit: always associate flush hints Before enabling use of flush hints for pmem regions, we need to make sure they are always associated. Move the initialization of nfit_flush out of the block-window specific init path to the general init path. Cc: Ross Zwisler Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 3e54157f02cc..d79837b9d07e 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -614,7 +614,6 @@ static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, { u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; struct nfit_memdev *nfit_memdev; - struct nfit_flush *nfit_flush; struct nfit_bdw *nfit_bdw; struct nfit_idt *nfit_idt; u16 idt_idx, range_index; @@ -647,14 +646,6 @@ static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, nfit_mem->idt_bdw = nfit_idt->idt; break; } - - list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { - if (nfit_flush->flush->device_handle != - nfit_memdev->memdev->device_handle) - continue; - nfit_mem->nfit_flush = nfit_flush; - break; - } break; } } @@ -675,6 +666,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, } list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + struct nfit_flush *nfit_flush; struct nfit_dcr *nfit_dcr; u32 device_handle; u16 dcr; @@ -721,6 +713,13 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, break; } + list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { + if (nfit_flush->flush->device_handle != device_handle) + continue; + nfit_mem->nfit_flush = nfit_flush; + break; + } + if (dcr && !nfit_mem->dcr) { dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", spa->range_index, dcr); -- cgit v1.2.3 From 81ed4e3670853e4cebad88aeffc0ba1d90d4d6ed Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 10 Jun 2016 18:20:53 -0700 Subject: nfit: don't override return value of nfit_mem_init We were needlessly converting nfit_mem_init() errors to -ENOMEM. Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index d79837b9d07e..f8c1a850effc 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -2422,10 +2422,9 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) if (rc) goto out_unlock; - if (nfit_mem_init(acpi_desc) != 0) { - rc = -ENOMEM; + rc = nfit_mem_init(acpi_desc); + if (rc) goto out_unlock; - } acpi_nfit_init_dsms(acpi_desc); -- cgit v1.2.3 From 29b9aa0aa3837c93ecd804dd3ada39b8cc75607d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 6 Jun 2016 17:42:38 -0700 Subject: libnvdimm: introduce devm_nvdimm_memremap(), convert nfit_spa_map() users In preparation for generically mapping flush hint addresses for both the BLK and PMEM use case, provide a generic / reference counted mapping api. Given the fact that a dimm may belong to multiple regions (PMEM and BLK), the flush hint addresses need to be held valid as long as any region associated with the dimm is active. This is similar to the existing BLK-region case where multiple BLK-regions may share an aperture mapping. Up-level this shared / reference-counted mapping capability from the nfit driver to a core nvdimm capability. This eliminates the need for the nd_blk_region.disable() callback. Note that the removal of nfit_spa_map() and related infrastructure is deferred to a later patch. Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index f8c1a850effc..b047dbe13bed 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1616,7 +1616,8 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, * when all region devices referencing the same mapping are disabled / * unbound. */ -static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, +static __maybe_unused void __iomem *nfit_spa_map( + struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa, enum spa_map_type type) { void __iomem *iomem; @@ -1669,7 +1670,6 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, struct device *dev) { struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct nd_blk_region *ndbr = to_nd_blk_region(dev); struct nfit_flush *nfit_flush; struct nfit_blk_mmio *mmio; @@ -1697,8 +1697,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, /* map block aperture memory */ nfit_blk->bdw_offset = nfit_mem->bdw->offset; mmio = &nfit_blk->mmio[BDW]; - mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, - SPA_MAP_APERTURE); + mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, + nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM); if (!mmio->addr.base) { dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, nvdimm_name(nvdimm)); @@ -1720,8 +1720,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; nfit_blk->stat_offset = nfit_mem->dcr->status_offset; mmio = &nfit_blk->mmio[DCR]; - mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, - SPA_MAP_CONTROL); + mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, + nfit_mem->spa_dcr->length); if (!mmio->addr.base) { dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, nvdimm_name(nvdimm)); @@ -1748,7 +1748,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, nfit_flush = nfit_mem->nfit_flush; if (nfit_flush && nfit_flush->flush->hint_count != 0) { - nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev, + nfit_blk->nvdimm_flush = devm_nvdimm_ioremap(dev, nfit_flush->flush->hint_address[0], 8); if (!nfit_blk->nvdimm_flush) return -ENOMEM; -- cgit v1.2.3 From a8a6d2e04c4ffda055db70814c50bd106e44730f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 7 Jun 2016 16:38:04 -0700 Subject: libnvdimm, nfit: remove nfit_spa_map() infrastructure Now that all shared mappings are handled by devm_nvdimm_memremap() we no longer need nfit_spa_map() nor do we need to trigger a callback to the bus provider at region disable time. Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 146 ---------------------------------------------------- drivers/acpi/nfit.h | 21 -------- 2 files changed, 167 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index b047dbe13bed..b76c95981547 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1509,126 +1509,6 @@ static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, return rc; } -static void nfit_spa_mapping_release(struct kref *kref) -{ - struct nfit_spa_mapping *spa_map = to_spa_map(kref); - struct acpi_nfit_system_address *spa = spa_map->spa; - struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc; - - WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); - dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); - if (spa_map->type == SPA_MAP_APERTURE) - memunmap((void __force *)spa_map->addr.aperture); - else - iounmap(spa_map->addr.base); - release_mem_region(spa->address, spa->length); - list_del(&spa_map->list); - kfree(spa_map); -} - -static struct nfit_spa_mapping *find_spa_mapping( - struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa) -{ - struct nfit_spa_mapping *spa_map; - - WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); - list_for_each_entry(spa_map, &acpi_desc->spa_maps, list) - if (spa_map->spa == spa) - return spa_map; - - return NULL; -} - -static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa) -{ - struct nfit_spa_mapping *spa_map; - - mutex_lock(&acpi_desc->spa_map_mutex); - spa_map = find_spa_mapping(acpi_desc, spa); - - if (spa_map) - kref_put(&spa_map->kref, nfit_spa_mapping_release); - mutex_unlock(&acpi_desc->spa_map_mutex); -} - -static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa, enum spa_map_type type) -{ - resource_size_t start = spa->address; - resource_size_t n = spa->length; - struct nfit_spa_mapping *spa_map; - struct resource *res; - - WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); - - spa_map = find_spa_mapping(acpi_desc, spa); - if (spa_map) { - kref_get(&spa_map->kref); - return spa_map->addr.base; - } - - spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); - if (!spa_map) - return NULL; - - INIT_LIST_HEAD(&spa_map->list); - spa_map->spa = spa; - kref_init(&spa_map->kref); - spa_map->acpi_desc = acpi_desc; - - res = request_mem_region(start, n, dev_name(acpi_desc->dev)); - if (!res) - goto err_mem; - - spa_map->type = type; - if (type == SPA_MAP_APERTURE) - spa_map->addr.aperture = (void __pmem *)memremap(start, n, - ARCH_MEMREMAP_PMEM); - else - spa_map->addr.base = ioremap_nocache(start, n); - - - if (!spa_map->addr.base) - goto err_map; - - list_add_tail(&spa_map->list, &acpi_desc->spa_maps); - return spa_map->addr.base; - - err_map: - release_mem_region(start, n); - err_mem: - kfree(spa_map); - return NULL; -} - -/** - * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges - * @nvdimm_bus: NFIT-bus that provided the spa table entry - * @nfit_spa: spa table to map - * @type: aperture or control region - * - * In the case where block-data-window apertures and - * dimm-control-regions are interleaved they will end up sharing a - * single request_mem_region() + ioremap() for the address range. In - * the style of devm nfit_spa_map() mappings are automatically dropped - * when all region devices referencing the same mapping are disabled / - * unbound. - */ -static __maybe_unused void __iomem *nfit_spa_map( - struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa, enum spa_map_type type) -{ - void __iomem *iomem; - - mutex_lock(&acpi_desc->spa_map_mutex); - iomem = __nfit_spa_map(acpi_desc, spa, type); - mutex_unlock(&acpi_desc->spa_map_mutex); - - return iomem; -} - static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, struct acpi_nfit_interleave *idt, u16 interleave_ways) { @@ -1773,29 +1653,6 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, return 0; } -static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, - struct device *dev) -{ - struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - struct nd_blk_region *ndbr = to_nd_blk_region(dev); - struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); - int i; - - if (!nfit_blk) - return; /* never enabled */ - - /* auto-free BLK spa mappings */ - for (i = 0; i < 2; i++) { - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; - - if (mmio->addr.base) - nfit_spa_unmap(acpi_desc, mmio->spa); - } - nd_blk_region_set_provider_data(ndbr, NULL); - /* devm will free nfit_blk */ -} - static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) { @@ -1969,7 +1826,6 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, ndr_desc->num_mappings = blk_valid; ndbr_desc = to_blk_region_desc(ndr_desc); ndbr_desc->enable = acpi_nfit_blk_region_enable; - ndbr_desc->disable = acpi_nfit_blk_region_disable; ndbr_desc->do_io = acpi_desc->blk_do_io; nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc); @@ -2509,7 +2365,6 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) nd_desc->clear_to_send = acpi_nfit_clear_to_send; nd_desc->attr_groups = acpi_nfit_attribute_groups; - INIT_LIST_HEAD(&acpi_desc->spa_maps); INIT_LIST_HEAD(&acpi_desc->spas); INIT_LIST_HEAD(&acpi_desc->dcrs); INIT_LIST_HEAD(&acpi_desc->bdws); @@ -2517,7 +2372,6 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) INIT_LIST_HEAD(&acpi_desc->flushes); INIT_LIST_HEAD(&acpi_desc->memdevs); INIT_LIST_HEAD(&acpi_desc->dimms); - mutex_init(&acpi_desc->spa_map_mutex); mutex_init(&acpi_desc->init_mutex); INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); } diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index f06fa91c5abf..52078475d969 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -135,9 +135,7 @@ struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; struct acpi_table_header acpi_header; struct acpi_nfit_header *nfit; - struct mutex spa_map_mutex; struct mutex init_mutex; - struct list_head spa_maps; struct list_head memdevs; struct list_head flushes; struct list_head dimms; @@ -188,25 +186,6 @@ struct nfit_blk { u32 dimm_flags; }; -enum spa_map_type { - SPA_MAP_CONTROL, - SPA_MAP_APERTURE, -}; - -struct nfit_spa_mapping { - struct acpi_nfit_desc *acpi_desc; - struct acpi_nfit_system_address *spa; - struct list_head list; - struct kref kref; - enum spa_map_type type; - struct nd_blk_addr addr; -}; - -static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref) -{ - return container_of(kref, struct nfit_spa_mapping, kref); -} - static inline struct acpi_nfit_memory_map *__to_nfit_memdev( struct nfit_mem *nfit_mem) { -- cgit v1.2.3 From e5ae3b252c6732f838f5695170bbf2ea9fb5b9ff Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 7 Jun 2016 17:00:04 -0700 Subject: libnvdimm, nfit: move flush hint mapping to region-device driver-data In preparation for triggering flushes of a DIMM's writes-posted-queue (WPQ) via the pmem driver move mapping of flush hint addresses to the region driver. Since this uses devm_nvdimm_memremap() the flush addresses will remain mapped while any region to which the dimm belongs is active. We need to communicate more information to the nvdimm core to facilitate this mapping, namely each dimm object now carries an array of flush hint address resources. Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 21 ++++++++++++++++++++- drivers/acpi/nfit.h | 1 + 2 files changed, 21 insertions(+), 1 deletion(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index b76c95981547..6796f780870a 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -714,9 +714,24 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, } list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { + struct acpi_nfit_flush_address *flush; + u16 i; + if (nfit_flush->flush->device_handle != device_handle) continue; nfit_mem->nfit_flush = nfit_flush; + flush = nfit_flush->flush; + nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, + flush->hint_count + * sizeof(struct resource), GFP_KERNEL); + if (!nfit_mem->flush_wpq) + return -ENOMEM; + for (i = 0; i < flush->hint_count; i++) { + struct resource *res = &nfit_mem->flush_wpq[i]; + + res->start = flush->hint_address[i]; + res->end = res->start + 8 - 1; + } break; } @@ -1171,6 +1186,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) int dimm_count = 0; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { + struct acpi_nfit_flush_address *flush; unsigned long flags = 0, cmd_mask; struct nvdimm *nvdimm; u32 device_handle; @@ -1204,9 +1220,12 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) if (nfit_mem->family == NVDIMM_FAMILY_INTEL) cmd_mask |= nfit_mem->dsm_mask; + flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush + : NULL; nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, acpi_nfit_dimm_attribute_groups, - flags, cmd_mask); + flags, cmd_mask, flush ? flush->hint_count : 0, + nfit_mem->flush_wpq); if (!nvdimm) return -ENOMEM; diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 52078475d969..9282eb324dcc 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -127,6 +127,7 @@ struct nfit_mem { struct list_head list; struct acpi_device *adev; struct acpi_nfit_desc *acpi_desc; + struct resource *flush_wpq; unsigned long dsm_mask; int family; }; -- cgit v1.2.3 From f284a4f23752d0334e482d04e0a584d19c9c8cd0 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 7 Jul 2016 19:44:50 -0700 Subject: libnvdimm: introduce nvdimm_flush() and nvdimm_has_flush() nvdimm_flush() is a replacement for the x86 'pcommit' instruction. It is an optional write flushing mechanism that an nvdimm bus can provide for the pmem driver to consume. In the case of the NFIT nvdimm-bus-provider nvdimm_flush() is implemented as a series of flush-hint-address [1] writes to each dimm in the interleave set (region) that backs the namespace. The nvdimm_has_flush() routine relies on platform firmware to describe the flushing capabilities of a platform. It uses the heuristic of whether an nvdimm bus provider provides flush address data to return a ternary result: 1: flush addresses defined 0: dimm topology described without flush addresses (assume ADR) -errno: no topology information, unable to determine flush mechanism The pmem driver is expected to take the following actions on this ternary result: 1: nvdimm_flush() in response to REQ_FUA / REQ_FLUSH and shutdown 0: do not set, WC or FUA on the queue, take no further action -errno: warn and then operate as if nvdimm_has_flush() returned '0' The caveat of this heuristic is that it can not distinguish the "dimm does not have flush address" case from the "platform firmware is broken and failed to describe a flush address". Given we are already explicitly trusting the NFIT there's not much more we can do beyond blacklisting broken firmwares if they are ever encountered. Cc: Ross Zwisler Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 33 +++------------------------------ drivers/acpi/nfit.h | 1 - 2 files changed, 3 insertions(+), 31 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 6796f780870a..0497175ee6cb 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1393,24 +1393,6 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) return mmio->base_offset + line_offset + table_offset + sub_line_offset; } -static void wmb_blk(struct nfit_blk *nfit_blk) -{ - - if (nfit_blk->nvdimm_flush) { - /* - * The first wmb() is needed to 'sfence' all previous writes - * such that they are architecturally visible for the platform - * buffer flush. Note that we've already arranged for pmem - * writes to avoid the cache via arch_memcpy_to_pmem(). The - * final wmb() ensures ordering for the NVDIMM flush write. - */ - wmb(); - writeq(1, nfit_blk->nvdimm_flush); - wmb(); - } else - wmb_pmem(); -} - static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) { struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; @@ -1445,7 +1427,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, offset = to_interleave_offset(offset, mmio); writeq(cmd, mmio->addr.base + offset); - wmb_blk(nfit_blk); + nvdimm_flush(nfit_blk->nd_region); if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) readq(mmio->addr.base + offset); @@ -1496,7 +1478,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, } if (rw) - wmb_blk(nfit_blk); + nvdimm_flush(nfit_blk->nd_region); rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; return rc; @@ -1570,7 +1552,6 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, { struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct nd_blk_region *ndbr = to_nd_blk_region(dev); - struct nfit_flush *nfit_flush; struct nfit_blk_mmio *mmio; struct nfit_blk *nfit_blk; struct nfit_mem *nfit_mem; @@ -1645,15 +1626,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, return rc; } - nfit_flush = nfit_mem->nfit_flush; - if (nfit_flush && nfit_flush->flush->hint_count != 0) { - nfit_blk->nvdimm_flush = devm_nvdimm_ioremap(dev, - nfit_flush->flush->hint_address[0], 8); - if (!nfit_blk->nvdimm_flush) - return -ENOMEM; - } - - if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) + if (nvdimm_has_flush(nfit_blk->nd_region) < 0) dev_warn(dev, "unable to guarantee persistence of writes\n"); if (mmio->line_size == 0) diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 9282eb324dcc..9fda77cf81da 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -183,7 +183,6 @@ struct nfit_blk { u64 bdw_offset; /* post interleave offset */ u64 stat_offset; u64 cmd_offset; - void __iomem *nvdimm_flush; u32 dimm_flags; }; -- cgit v1.2.3 From 7a9eb20666317794d0279843fbd091af93907780 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 3 Jun 2016 18:06:47 -0700 Subject: pmem: kill __pmem address space The __pmem address space was meant to annotate codepaths that touch persistent memory and need to coordinate a call to wmb_pmem(). Now that wmb_pmem() is gone, there is little need to keep this annotation. Cc: Christoph Hellwig Cc: Ross Zwisler Signed-off-by: Dan Williams --- drivers/acpi/nfit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 9fda77cf81da..80fb2c0ac8bf 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -164,7 +164,7 @@ enum nd_blk_mmio_selector { struct nd_blk_addr { union { void __iomem *base; - void __pmem *aperture; + void *aperture; }; }; -- cgit v1.2.3 From c2f32acdf848ddc784b73189033592a3134ac651 Mon Sep 17 00:00:00 2001 From: "Lee, Chun-Yi" Date: Fri, 15 Jul 2016 12:05:35 +0800 Subject: acpi, nfit: treat virtual ramdisk SPA as pmem region This patch adds logic to treat virtual ramdisk SPA as pmem region, then ramdisk's /dev/pmem* device can be mounted with iso9660. It's useful to work with the httpboot in EFI firmware to pull a remote ISO file to the local memory region for booting and installation. Wiki page of UEFI HTTPBoot with OVMF: https://en.opensuse.org/UEFI_HTTPBoot_with_OVMF The ramdisk function in EDK2/OVMF generates a ACPI0012 root device that it contains empty _STA but without _DSM: DefinitionBlock ("ssdt2.aml", "SSDT", 2, "INTEL ", "RamDisk ", 0x00001000) { Scope (\_SB) { Device (NVDR) { Name (_HID, "ACPI0012") // _HID: Hardware ID Name (_STR, Unicode ("NVDIMM Root Device")) // _STR: Description String Method (_STA, 0, NotSerialized) // _STA: Status { Return (0x0F) } } } } In section 5.2.25.2 of ACPI 6.1 spec, it mentions that the "SPA Range Structure Index" of virtual SPA shall be set to zero. That means virtual SPA will not be associated by any NVDIMM region mapping. The VCD's SPA Range Structure in NFIT is similar to virtual disk region as following: [028h 0040 2] Subtable Type : 0000 [System Physical Address Range] [02Ah 0042 2] Length : 0038 [02Ch 0044 2] Range Index : 0000 [02Eh 0046 2] Flags (decoded below) : 0000 Add/Online Operation Only : 0 Proximity Domain Valid : 0 [030h 0048 4] Reserved : 00000000 [034h 0052 4] Proximity Domain : 00000000 [038h 0056 16] Address Range GUID : 77AB535A-45FC-624B-5560-F7B281D1F96E [048h 0072 8] Address Range Base : 00000000B6ABD018 [050h 0080 8] Address Range Length : 0000000005500000 [058h 0088 8] Memory Map Attribute : 0000000000000000 The way to not associate a SPA range is to never reference it from a "flush hint", "interleave", or "control region" table. After testing on OVMF, pmem driver can support the region that it doesn't assoicate to any NVDIMM mapping. So, treat VCD like pmem is a idea to get a pmem block device that it contains iso. v4: Instoduce nfit_spa_is_virtual() to check virtual ramdisk SPA and create pmem region. v3: To simplify patch, removed useless VCD region in libnvdimm. v2: Removed the code for setting VCD to a read-only region. Cc: Gary Lin Cc: Dan Williams Cc: Ross Zwisler Cc: "Rafael J. Wysocki" Cc: Linda Knippers Signed-off-by: Lee, Chun-Yi Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 0497175ee6cb..d89a02d9ed10 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -1829,6 +1829,14 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, return 0; } +static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) +{ + return (nfit_spa_type(spa) == NFIT_SPA_VDISK || + nfit_spa_type(spa) == NFIT_SPA_VCD || + nfit_spa_type(spa) == NFIT_SPA_PDISK || + nfit_spa_type(spa) == NFIT_SPA_PCD); +} + static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { @@ -1844,7 +1852,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, if (nfit_spa->nd_region) return 0; - if (spa->range_index == 0) { + if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", __func__); return 0; @@ -1908,6 +1916,11 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, ndr_desc); if (!nfit_spa->nd_region) rc = -ENOMEM; + } else if (nfit_spa_is_virtual(spa)) { + nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, + ndr_desc); + if (!nfit_spa->nd_region) + rc = -ENOMEM; } out: -- cgit v1.2.3 From 3193204149de4d563519d6847aba638bb4d9662b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 14 Jul 2016 17:22:48 -0700 Subject: nfit: fix _FIT evaluation memory leak + use after free acpi_evaluate_object() allocates memory. Free the buffer allocated during acpi_nfit_add(). In order for this memory to be freed acpi_nfit_init() needs to be converted to duplicate the nfit contents in its internal allocation. Use zero-length arrays to minimize the thrash with the rest of the nfit driver implementation. All of the add_() routines now validate a minimum table size and expect hotplugged tables to match the size of the original table to count as a duplicate. For variable length tables, like 'idt' and 'flush', we calculate the dynamic size. Note that hotplug by definition cannot change the interleave as it would cause data corruption of in-use namespaces. Cc: Vishal Verma Reported-by: Xiao Guangrong Reported-by: Haozhong Zhang Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 127 ++++++++++++++++++++++++++++++++++++---------------- drivers/acpi/nfit.h | 12 ++--- 2 files changed, 95 insertions(+), 44 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index d89a02d9ed10..54a4ee0a90e0 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -374,22 +374,25 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_system_address *spa) { - size_t length = min_t(size_t, sizeof(*spa), spa->header.length); struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; + if (spa->header.length != sizeof(*spa)) + return false; + list_for_each_entry(nfit_spa, &prev->spas, list) { - if (memcmp(nfit_spa->spa, spa, length) == 0) { + if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { list_move_tail(&nfit_spa->list, &acpi_desc->spas); return true; } } - nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL); + nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), + GFP_KERNEL); if (!nfit_spa) return false; INIT_LIST_HEAD(&nfit_spa->list); - nfit_spa->spa = spa; + memcpy(nfit_spa->spa, spa, sizeof(*spa)); list_add_tail(&nfit_spa->list, &acpi_desc->spas); dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, spa->range_index, @@ -401,21 +404,24 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_memory_map *memdev) { - size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length); struct device *dev = acpi_desc->dev; struct nfit_memdev *nfit_memdev; + if (memdev->header.length != sizeof(*memdev)) + return false; + list_for_each_entry(nfit_memdev, &prev->memdevs, list) - if (memcmp(nfit_memdev->memdev, memdev, length) == 0) { + if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); return true; } - nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL); + nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), + GFP_KERNEL); if (!nfit_memdev) return false; INIT_LIST_HEAD(&nfit_memdev->list); - nfit_memdev->memdev = memdev; + memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", __func__, memdev->device_handle, memdev->range_index, @@ -423,25 +429,42 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc, return true; } +/* + * An implementation may provide a truncated control region if no block windows + * are defined. + */ +static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) +{ + if (dcr->header.length < offsetof(struct acpi_nfit_control_region, + window_size)) + return 0; + if (dcr->windows) + return sizeof(*dcr); + return offsetof(struct acpi_nfit_control_region, window_size); +} + static bool add_dcr(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_control_region *dcr) { - size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length); struct device *dev = acpi_desc->dev; struct nfit_dcr *nfit_dcr; + if (!sizeof_dcr(dcr)) + return false; + list_for_each_entry(nfit_dcr, &prev->dcrs, list) - if (memcmp(nfit_dcr->dcr, dcr, length) == 0) { + if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); return true; } - nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL); + nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), + GFP_KERNEL); if (!nfit_dcr) return false; INIT_LIST_HEAD(&nfit_dcr->list); - nfit_dcr->dcr = dcr; + memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, dcr->region_index, dcr->windows); @@ -452,71 +475,102 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_data_region *bdw) { - size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length); struct device *dev = acpi_desc->dev; struct nfit_bdw *nfit_bdw; + if (bdw->header.length != sizeof(*bdw)) + return false; list_for_each_entry(nfit_bdw, &prev->bdws, list) - if (memcmp(nfit_bdw->bdw, bdw, length) == 0) { + if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); return true; } - nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL); + nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), + GFP_KERNEL); if (!nfit_bdw) return false; INIT_LIST_HEAD(&nfit_bdw->list); - nfit_bdw->bdw = bdw; + memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, bdw->region_index, bdw->windows); return true; } +static size_t sizeof_idt(struct acpi_nfit_interleave *idt) +{ + if (idt->header.length < sizeof(*idt)) + return 0; + return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); +} + static bool add_idt(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_interleave *idt) { - size_t length = min_t(size_t, sizeof(*idt), idt->header.length); struct device *dev = acpi_desc->dev; struct nfit_idt *nfit_idt; - list_for_each_entry(nfit_idt, &prev->idts, list) - if (memcmp(nfit_idt->idt, idt, length) == 0) { + if (!sizeof_idt(idt)) + return false; + + list_for_each_entry(nfit_idt, &prev->idts, list) { + if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) + continue; + + if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { list_move_tail(&nfit_idt->list, &acpi_desc->idts); return true; } + } - nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL); + nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), + GFP_KERNEL); if (!nfit_idt) return false; INIT_LIST_HEAD(&nfit_idt->list); - nfit_idt->idt = idt; + memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); list_add_tail(&nfit_idt->list, &acpi_desc->idts); dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, idt->interleave_index, idt->line_count); return true; } +static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) +{ + if (flush->header.length < sizeof(*flush)) + return 0; + return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); +} + static bool add_flush(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_flush_address *flush) { - size_t length = min_t(size_t, sizeof(*flush), flush->header.length); struct device *dev = acpi_desc->dev; struct nfit_flush *nfit_flush; - list_for_each_entry(nfit_flush, &prev->flushes, list) - if (memcmp(nfit_flush->flush, flush, length) == 0) { + if (!sizeof_flush(flush)) + return false; + + list_for_each_entry(nfit_flush, &prev->flushes, list) { + if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) + continue; + + if (memcmp(nfit_flush->flush, flush, + sizeof_flush(flush)) == 0) { list_move_tail(&nfit_flush->list, &acpi_desc->flushes); return true; } + } - nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL); + nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) + + sizeof_flush(flush), GFP_KERNEL); if (!nfit_flush) return false; INIT_LIST_HEAD(&nfit_flush->list); - nfit_flush->flush = flush; + memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); list_add_tail(&nfit_flush->list, &acpi_desc->flushes); dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, flush->device_handle, flush->hint_count); @@ -2390,7 +2444,7 @@ static int acpi_nfit_add(struct acpi_device *adev) struct acpi_table_header *tbl; acpi_status status = AE_OK; acpi_size sz; - int rc; + int rc = 0; status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz); if (ACPI_FAILURE(status)) { @@ -2427,12 +2481,15 @@ static int acpi_nfit_add(struct acpi_device *adev) acpi_desc->nfit = (struct acpi_nfit_header *)obj->buffer.pointer; sz = obj->buffer.length; + rc = acpi_nfit_init(acpi_desc, sz); } else dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", __func__, (int) obj->type); - } + acpi_desc->nfit = NULL; + kfree(buf.pointer); + } else + rc = acpi_nfit_init(acpi_desc, sz); - rc = acpi_nfit_init(acpi_desc, sz); if (rc) { nvdimm_bus_unregister(acpi_desc->nvdimm_bus); return rc; @@ -2454,7 +2511,6 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_nfit_header *nfit_saved; union acpi_object *obj; struct device *dev = &adev->dev; acpi_status status; @@ -2492,21 +2548,16 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) goto out_unlock; } - nfit_saved = acpi_desc->nfit; obj = buf.pointer; if (obj->type == ACPI_TYPE_BUFFER) { acpi_desc->nfit = (struct acpi_nfit_header *)obj->buffer.pointer; ret = acpi_nfit_init(acpi_desc, obj->buffer.length); - if (ret) { - /* Merge failed, restore old nfit, and exit */ - acpi_desc->nfit = nfit_saved; + if (ret) dev_err(dev, "failed to merge updated NFIT\n"); - } - } else { - /* Bad _FIT, restore old nfit */ + } else dev_err(dev, "Invalid _FIT\n"); - } + acpi_desc->nfit = NULL; kfree(buf.pointer); out_unlock: diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 80fb2c0ac8bf..402f8c31adc7 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -78,37 +78,37 @@ enum { }; struct nfit_spa { - struct acpi_nfit_system_address *spa; struct list_head list; struct nd_region *nd_region; unsigned int ars_done:1; u32 clear_err_unit; u32 max_ars; + struct acpi_nfit_system_address spa[0]; }; struct nfit_dcr { - struct acpi_nfit_control_region *dcr; struct list_head list; + struct acpi_nfit_control_region dcr[0]; }; struct nfit_bdw { - struct acpi_nfit_data_region *bdw; struct list_head list; + struct acpi_nfit_data_region bdw[0]; }; struct nfit_idt { - struct acpi_nfit_interleave *idt; struct list_head list; + struct acpi_nfit_interleave idt[0]; }; struct nfit_flush { - struct acpi_nfit_flush_address *flush; struct list_head list; + struct acpi_nfit_flush_address flush[0]; }; struct nfit_memdev { - struct acpi_nfit_memory_map *memdev; struct list_head list; + struct acpi_nfit_memory_map memdev[0]; }; /* assembled tables for a given dimm/memory-device */ -- cgit v1.2.3 From e7a11b449e6e2e2caadf6792c7afeecd68800651 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 14 Jul 2016 16:19:55 -0700 Subject: nfit: cleanup acpi_nfit_init calling convention Pass the nfit buffer as a parameter rather than hanging it off of acpi_desc. Reviewed-by: "Lee, Chun-Yi" Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 48 +++++++++++++++++------------------------------- drivers/acpi/nfit.h | 3 +-- 2 files changed, 18 insertions(+), 33 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 54a4ee0a90e0..fb80f32db525 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -2291,12 +2291,11 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, return 0; } -int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) +int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) { struct device *dev = acpi_desc->dev; struct nfit_table_prev prev; const void *end; - u8 *data; int rc; mutex_lock(&acpi_desc->init_mutex); @@ -2321,7 +2320,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) list_cut_position(&prev.flushes, &acpi_desc->flushes, acpi_desc->flushes.prev); - data = (u8 *) acpi_desc->nfit; end = data + sz; while (!IS_ERR_OR_NULL(data)) data = add_table(acpi_desc, &prev, data, end); @@ -2461,40 +2459,30 @@ static int acpi_nfit_add(struct acpi_device *adev) if (!acpi_desc->nvdimm_bus) return -ENOMEM; - /* - * Save the acpi header for later and then skip it, - * making nfit point to the first nfit table header. - */ + /* Save the acpi header for exporting the revision via sysfs */ acpi_desc->acpi_header = *tbl; - acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit); - sz -= sizeof(struct acpi_table_nfit); /* Evaluate _FIT and override with that if present */ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); if (ACPI_SUCCESS(status) && buf.length > 0) { - union acpi_object *obj; - /* - * Adjust for the acpi_object header of the _FIT - */ - obj = buf.pointer; - if (obj->type == ACPI_TYPE_BUFFER) { - acpi_desc->nfit = - (struct acpi_nfit_header *)obj->buffer.pointer; - sz = obj->buffer.length; - rc = acpi_nfit_init(acpi_desc, sz); - } else + union acpi_object *obj = buf.pointer; + + if (obj->type == ACPI_TYPE_BUFFER) + rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, + obj->buffer.length); + else dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", __func__, (int) obj->type); - acpi_desc->nfit = NULL; kfree(buf.pointer); } else - rc = acpi_nfit_init(acpi_desc, sz); + /* skip over the lead-in header table */ + rc = acpi_nfit_init(acpi_desc, (void *) tbl + + sizeof(struct acpi_table_nfit), + sz - sizeof(struct acpi_table_nfit)); - if (rc) { + if (rc) nvdimm_bus_unregister(acpi_desc->nvdimm_bus); - return rc; - } - return 0; + return rc; } static int acpi_nfit_remove(struct acpi_device *adev) @@ -2511,8 +2499,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; struct device *dev = &adev->dev; + union acpi_object *obj; acpi_status status; int ret; @@ -2550,14 +2538,12 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) obj = buf.pointer; if (obj->type == ACPI_TYPE_BUFFER) { - acpi_desc->nfit = - (struct acpi_nfit_header *)obj->buffer.pointer; - ret = acpi_nfit_init(acpi_desc, obj->buffer.length); + ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, + obj->buffer.length); if (ret) dev_err(dev, "failed to merge updated NFIT\n"); } else dev_err(dev, "Invalid _FIT\n"); - acpi_desc->nfit = NULL; kfree(buf.pointer); out_unlock: diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 402f8c31adc7..b63a583a678f 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -135,7 +135,6 @@ struct nfit_mem { struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; struct acpi_table_header acpi_header; - struct acpi_nfit_header *nfit; struct mutex init_mutex; struct list_head memdevs; struct list_head flushes; @@ -201,6 +200,6 @@ static inline struct acpi_nfit_desc *to_acpi_desc( } const u8 *to_nfit_uuid(enum nfit_uuids id); -int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz); +int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); #endif /* __NFIT_H__ */ -- cgit v1.2.3 From bc9775d8697f57b333b6b316fb5145d6ca9dc36d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 21 Jul 2016 20:03:19 -0700 Subject: libnvdimm: move ->module to struct nvdimm_bus_descriptor Let the provider module be explicitly passed in rather than implicitly assumed by the module that calls nvdimm_bus_register(). This is in preparation for unifying the nfit and nfit_test driver teardown paths. Reviewed-by: Lee, Chun-Yi Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index fb80f32db525..e7eb3b6f1514 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -2417,6 +2417,7 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; nd_desc = &acpi_desc->nd_desc; nd_desc->provider_name = "ACPI.NFIT"; + nd_desc->module = THIS_MODULE; nd_desc->ndctl = acpi_nfit_ctl; nd_desc->flush_probe = acpi_nfit_flush_probe; nd_desc->clear_to_send = acpi_nfit_clear_to_send; -- cgit v1.2.3 From 58cd71b4747432b0ef3b86db1b09c12e6c97204b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 21 Jul 2016 18:05:36 -0700 Subject: nfit, tools/testing/nvdimm/: unify shutdown paths While testing the new on-demand ARS patches we discovered that differences between the nfit_test and normal nfit driver shutdown paths can leak resources. Unify the shutdown paths to trigger via a devm_ callback when the acpi_desc->dev is unbound from its driver. Reviewed-by: Lee, Chun-Yi Reported-by: Vishal Verma Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index e7eb3b6f1514..be7c2fde16e7 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -2291,6 +2291,16 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, return 0; } +static void acpi_nfit_destruct(void *data) +{ + struct acpi_nfit_desc *acpi_desc = data; + + acpi_desc->cancel = 1; + flush_workqueue(nfit_wq); + nvdimm_bus_unregister(acpi_desc->nvdimm_bus); + acpi_desc->nvdimm_bus = NULL; +} + int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) { struct device *dev = acpi_desc->dev; @@ -2298,6 +2308,17 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) const void *end; int rc; + if (!acpi_desc->nvdimm_bus) { + acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, + &acpi_desc->nd_desc); + if (!acpi_desc->nvdimm_bus) + return -ENOMEM; + rc = devm_add_action_or_reset(dev, acpi_nfit_destruct, + acpi_desc); + if (rc) + return rc; + } + mutex_lock(&acpi_desc->init_mutex); INIT_LIST_HEAD(&prev.spas); @@ -2456,9 +2477,6 @@ static int acpi_nfit_add(struct acpi_device *adev) if (!acpi_desc) return -ENOMEM; acpi_nfit_desc_init(acpi_desc, &adev->dev); - acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc); - if (!acpi_desc->nvdimm_bus) - return -ENOMEM; /* Save the acpi header for exporting the revision via sysfs */ acpi_desc->acpi_header = *tbl; @@ -2480,19 +2498,12 @@ static int acpi_nfit_add(struct acpi_device *adev) rc = acpi_nfit_init(acpi_desc, (void *) tbl + sizeof(struct acpi_table_nfit), sz - sizeof(struct acpi_table_nfit)); - - if (rc) - nvdimm_bus_unregister(acpi_desc->nvdimm_bus); return rc; } static int acpi_nfit_remove(struct acpi_device *adev) { - struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); - - acpi_desc->cancel = 1; - flush_workqueue(nfit_wq); - nvdimm_bus_unregister(acpi_desc->nvdimm_bus); + /* see acpi_nfit_destruct */ return 0; } @@ -2519,9 +2530,6 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) if (!acpi_desc) goto out_unlock; acpi_nfit_desc_init(acpi_desc, &adev->dev); - acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc); - if (!acpi_desc->nvdimm_bus) - goto out_unlock; } else { /* * Finish previous registration before considering new -- cgit v1.2.3 From 37b137ff8c833385b75ff2baf4bace25e52247d2 Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Sat, 23 Jul 2016 21:51:42 -0700 Subject: nfit, libnvdimm: allow an ARS scrub to be triggered on demand Normally, an ARS (Address Range Scrub) only happens at boot/initialization time. There can however arise situations where a bus-wide rescan is needed - notably, in the case of discovering a latent media error, we should do a full rescan to figure out what other sectors are bad, and thus potentially avoid triggering an mce on them in the future. Also provide a sysfs trigger to start a bus-wide scrub. Cc: Rafael J. Wysocki Signed-off-by: Vishal Verma Signed-off-by: Dan Williams --- drivers/acpi/nfit.c | 161 +++++++++++++++++++++++++++++++++++++++++++++++++--- drivers/acpi/nfit.h | 4 +- 2 files changed, 157 insertions(+), 8 deletions(-) (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index be7c2fde16e7..19d0dfdf9633 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -874,14 +875,87 @@ static ssize_t revision_show(struct device *dev, } static DEVICE_ATTR_RO(revision); +/* + * This shows the number of full Address Range Scrubs that have been + * completed since driver load time. Userspace can wait on this using + * select/poll etc. A '+' at the end indicates an ARS is in progress + */ +static ssize_t scrub_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus_descriptor *nd_desc; + ssize_t rc = -ENXIO; + + device_lock(dev); + nd_desc = dev_get_drvdata(dev); + if (nd_desc) { + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, + (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); + } + device_unlock(dev); + return rc; +} + +static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc); + +static ssize_t scrub_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct nvdimm_bus_descriptor *nd_desc; + ssize_t rc; + long val; + + rc = kstrtol(buf, 0, &val); + if (rc) + return rc; + if (val != 1) + return -EINVAL; + + device_lock(dev); + nd_desc = dev_get_drvdata(dev); + if (nd_desc) { + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + rc = acpi_nfit_ars_rescan(acpi_desc); + } + device_unlock(dev); + if (rc) + return rc; + return size; +} +static DEVICE_ATTR_RW(scrub); + +static bool ars_supported(struct nvdimm_bus *nvdimm_bus) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START + | 1 << ND_CMD_ARS_STATUS; + + return (nd_desc->cmd_mask & mask) == mask; +} + +static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + + if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) + return 0; + return a->mode; +} + static struct attribute *acpi_nfit_attributes[] = { &dev_attr_revision.attr, + &dev_attr_scrub.attr, NULL, }; static struct attribute_group acpi_nfit_attribute_group = { .name = "nfit", .attrs = acpi_nfit_attributes, + .is_visible = nfit_visible, }; static const struct attribute_group *acpi_nfit_attribute_groups[] = { @@ -2054,7 +2128,7 @@ static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, unsigned int tmo = scrub_timeout; int rc; - if (nfit_spa->ars_done || !nfit_spa->nd_region) + if (!nfit_spa->ars_required || !nfit_spa->nd_region) return; rc = ars_start(acpi_desc, nfit_spa); @@ -2143,7 +2217,9 @@ static void acpi_nfit_scrub(struct work_struct *work) * firmware initiated scrubs to complete and then we go search for the * affected spa regions to mark them scanned. In the second phase we * initiate a directed scrub for every range that was not scrubbed in - * phase 1. + * phase 1. If we're called for a 'rescan', we harmlessly pass through + * the first phase, but really only care about running phase 2, where + * regions can be notified of new poison. */ /* process platform firmware initiated scrubs */ @@ -2246,14 +2322,17 @@ static void acpi_nfit_scrub(struct work_struct *work) * Flag all the ranges that still need scrubbing, but * register them now to make data available. */ - if (nfit_spa->nd_region) - nfit_spa->ars_done = 1; - else + if (!nfit_spa->nd_region) { + nfit_spa->ars_required = 1; acpi_nfit_register_region(acpi_desc, nfit_spa); + } } list_for_each_entry(nfit_spa, &acpi_desc->spas, list) acpi_nfit_async_scrub(acpi_desc, nfit_spa); + acpi_desc->scrub_count++; + if (acpi_desc->scrub_count_state) + sysfs_notify_dirent(acpi_desc->scrub_count_state); mutex_unlock(&acpi_desc->init_mutex); } @@ -2291,12 +2370,48 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, return 0; } +static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) +{ + struct device *dev = acpi_desc->dev; + struct kernfs_node *nfit; + struct device *bus_dev; + + if (!ars_supported(acpi_desc->nvdimm_bus)) + return 0; + + bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); + nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); + if (!nfit) { + dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); + return -ENODEV; + } + acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); + sysfs_put(nfit); + if (!acpi_desc->scrub_count_state) { + dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); + return -ENODEV; + } + + return 0; +} + static void acpi_nfit_destruct(void *data) { struct acpi_nfit_desc *acpi_desc = data; + struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); acpi_desc->cancel = 1; + /* + * Bounce the nvdimm bus lock to make sure any in-flight + * acpi_nfit_ars_rescan() submissions have had a chance to + * either submit or see ->cancel set. + */ + device_lock(bus_dev); + device_unlock(bus_dev); + flush_workqueue(nfit_wq); + if (acpi_desc->scrub_count_state) + sysfs_put(acpi_desc->scrub_count_state); nvdimm_bus_unregister(acpi_desc->nvdimm_bus); acpi_desc->nvdimm_bus = NULL; } @@ -2309,14 +2424,21 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) int rc; if (!acpi_desc->nvdimm_bus) { + acpi_nfit_init_dsms(acpi_desc); + acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc); if (!acpi_desc->nvdimm_bus) return -ENOMEM; + rc = devm_add_action_or_reset(dev, acpi_nfit_destruct, acpi_desc); if (rc) return rc; + + rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); + if (rc) + return rc; } mutex_lock(&acpi_desc->init_mutex); @@ -2360,8 +2482,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) if (rc) goto out_unlock; - acpi_nfit_init_dsms(acpi_desc); - rc = acpi_nfit_register_dimms(acpi_desc); if (rc) goto out_unlock; @@ -2429,6 +2549,33 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, return 0; } +static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) +{ + struct device *dev = acpi_desc->dev; + struct nfit_spa *nfit_spa; + + if (work_busy(&acpi_desc->work)) + return -EBUSY; + + if (acpi_desc->cancel) + return 0; + + mutex_lock(&acpi_desc->init_mutex); + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + struct acpi_nfit_system_address *spa = nfit_spa->spa; + + if (nfit_spa_type(spa) != NFIT_SPA_PM) + continue; + + nfit_spa->ars_required = 1; + } + queue_work(nfit_wq, &acpi_desc->work); + dev_dbg(dev, "%s: ars_scan triggered\n", __func__); + mutex_unlock(&acpi_desc->init_mutex); + + return 0; +} + void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) { struct nvdimm_bus_descriptor *nd_desc; diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index b63a583a678f..6ecf337c97aa 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -80,7 +80,7 @@ enum { struct nfit_spa { struct list_head list; struct nd_region *nd_region; - unsigned int ars_done:1; + unsigned int ars_required:1; u32 clear_err_unit; u32 max_ars; struct acpi_nfit_system_address spa[0]; @@ -148,6 +148,8 @@ struct acpi_nfit_desc { struct nd_cmd_ars_status *ars_status; size_t ars_status_size; struct work_struct work; + struct kernfs_node *scrub_count_state; + unsigned int scrub_count; unsigned int cancel:1; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; -- cgit v1.2.3 From bdf97013ced5f263da0dc9d559f5c09e922d8423 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 23 Jul 2016 21:24:19 -0700 Subject: nfit: move to nfit/ sub-directory With the arrival of x86-machine-check support the nfit driver will add a (conditionally-compiled) source file. Prepare for this by moving all nfit source to drivers/acpi/nfit/. This is pure code movement, no functional changes. Signed-off-by: Dan Williams --- drivers/acpi/Kconfig | 27 +- drivers/acpi/Makefile | 2 +- drivers/acpi/nfit.c | 2765 -------------------------------------------- drivers/acpi/nfit.h | 207 ---- drivers/acpi/nfit/Kconfig | 26 + drivers/acpi/nfit/Makefile | 2 + drivers/acpi/nfit/core.c | 2765 ++++++++++++++++++++++++++++++++++++++++++++ drivers/acpi/nfit/nfit.h | 207 ++++ 8 files changed, 3002 insertions(+), 2999 deletions(-) delete mode 100644 drivers/acpi/nfit.c delete mode 100644 drivers/acpi/nfit.h create mode 100644 drivers/acpi/nfit/Kconfig create mode 100644 drivers/acpi/nfit/Makefile create mode 100644 drivers/acpi/nfit/core.c create mode 100644 drivers/acpi/nfit/nfit.h (limited to 'drivers/acpi') diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index b7e2e776397d..415b148a8698 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -447,32 +447,7 @@ config ACPI_REDUCED_HARDWARE_ONLY If you are unsure what to do, do not enable this option. -config ACPI_NFIT - tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" - depends on PHYS_ADDR_T_64BIT - depends on BLK_DEV - depends on ARCH_HAS_MMIO_FLUSH - select LIBNVDIMM - help - Infrastructure to probe ACPI 6 compliant platforms for - NVDIMMs (NFIT) and register a libnvdimm device tree. In - addition to storage devices this also enables libnvdimm to pass - ACPI._DSM messages for platform/dimm configuration. - - To compile this driver as a module, choose M here: - the module will be called nfit. - -config ACPI_NFIT_DEBUG - bool "NFIT DSM debug" - depends on ACPI_NFIT - depends on DYNAMIC_DEBUG - default n - help - Enabling this option causes the nfit driver to dump the - input and output buffers of _DSM operations on the ACPI0012 - device and its children. This can be very verbose, so leave - it disabled unless you are debugging a hardware / firmware - issue. +source "drivers/acpi/nfit/Kconfig" source "drivers/acpi/apei/Kconfig" diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 251ce85a66fb..64a575a6f7ef 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -70,7 +70,7 @@ obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI) += container.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o -obj-$(CONFIG_ACPI_NFIT) += nfit.o +obj-$(CONFIG_ACPI_NFIT) += nfit/ obj-$(CONFIG_ACPI) += acpi_memhotplug.o obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o obj-$(CONFIG_ACPI_BATTERY) += battery.o diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c deleted file mode 100644 index 19d0dfdf9633..000000000000 --- a/drivers/acpi/nfit.c +++ /dev/null @@ -1,2765 +0,0 @@ -/* - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "nfit.h" - -/* - * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is - * irrelevant. - */ -#include - -static bool force_enable_dimms; -module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); - -static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT; -module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds"); - -/* after three payloads of overflow, it's dead jim */ -static unsigned int scrub_overflow_abort = 3; -module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(scrub_overflow_abort, - "Number of times we overflow ARS results before abort"); - -static bool disable_vendor_specific; -module_param(disable_vendor_specific, bool, S_IRUGO); -MODULE_PARM_DESC(disable_vendor_specific, - "Limit commands to the publicly specified set\n"); - -static struct workqueue_struct *nfit_wq; - -struct nfit_table_prev { - struct list_head spas; - struct list_head memdevs; - struct list_head dcrs; - struct list_head bdws; - struct list_head idts; - struct list_head flushes; -}; - -static u8 nfit_uuid[NFIT_UUID_MAX][16]; - -const u8 *to_nfit_uuid(enum nfit_uuids id) -{ - return nfit_uuid[id]; -} -EXPORT_SYMBOL(to_nfit_uuid); - -static struct acpi_nfit_desc *to_acpi_nfit_desc( - struct nvdimm_bus_descriptor *nd_desc) -{ - return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); -} - -static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) -{ - struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; - - /* - * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct - * acpi_device. - */ - if (!nd_desc->provider_name - || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) - return NULL; - - return to_acpi_device(acpi_desc->dev); -} - -static int xlat_status(void *buf, unsigned int cmd) -{ - struct nd_cmd_clear_error *clear_err; - struct nd_cmd_ars_status *ars_status; - struct nd_cmd_ars_start *ars_start; - struct nd_cmd_ars_cap *ars_cap; - u16 flags; - - switch (cmd) { - case ND_CMD_ARS_CAP: - ars_cap = buf; - if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE) - return -ENOTTY; - - /* Command failed */ - if (ars_cap->status & 0xffff) - return -EIO; - - /* No supported scan types for this range */ - flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; - if ((ars_cap->status >> 16 & flags) == 0) - return -ENOTTY; - break; - case ND_CMD_ARS_START: - ars_start = buf; - /* ARS is in progress */ - if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY) - return -EBUSY; - - /* Command failed */ - if (ars_start->status & 0xffff) - return -EIO; - break; - case ND_CMD_ARS_STATUS: - ars_status = buf; - /* Command failed */ - if (ars_status->status & 0xffff) - return -EIO; - /* Check extended status (Upper two bytes) */ - if (ars_status->status == NFIT_ARS_STATUS_DONE) - return 0; - - /* ARS is in progress */ - if (ars_status->status == NFIT_ARS_STATUS_BUSY) - return -EBUSY; - - /* No ARS performed for the current boot */ - if (ars_status->status == NFIT_ARS_STATUS_NONE) - return -EAGAIN; - - /* - * ARS interrupted, either we overflowed or some other - * agent wants the scan to stop. If we didn't overflow - * then just continue with the returned results. - */ - if (ars_status->status == NFIT_ARS_STATUS_INTR) { - if (ars_status->flags & NFIT_ARS_F_OVERFLOW) - return -ENOSPC; - return 0; - } - - /* Unknown status */ - if (ars_status->status >> 16) - return -EIO; - break; - case ND_CMD_CLEAR_ERROR: - clear_err = buf; - if (clear_err->status & 0xffff) - return -EIO; - if (!clear_err->cleared) - return -EIO; - if (clear_err->length > clear_err->cleared) - return clear_err->cleared; - break; - default: - break; - } - - return 0; -} - -static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, - struct nvdimm *nvdimm, unsigned int cmd, void *buf, - unsigned int buf_len, int *cmd_rc) -{ - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); - union acpi_object in_obj, in_buf, *out_obj; - const struct nd_cmd_desc *desc = NULL; - struct device *dev = acpi_desc->dev; - struct nd_cmd_pkg *call_pkg = NULL; - const char *cmd_name, *dimm_name; - unsigned long cmd_mask, dsm_mask; - acpi_handle handle; - unsigned int func; - const u8 *uuid; - u32 offset; - int rc, i; - - func = cmd; - if (cmd == ND_CMD_CALL) { - call_pkg = buf; - func = call_pkg->nd_command; - } - - if (nvdimm) { - struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct acpi_device *adev = nfit_mem->adev; - - if (!adev) - return -ENOTTY; - if (call_pkg && nfit_mem->family != call_pkg->nd_family) - return -ENOTTY; - - dimm_name = nvdimm_name(nvdimm); - cmd_name = nvdimm_cmd_name(cmd); - cmd_mask = nvdimm_cmd_mask(nvdimm); - dsm_mask = nfit_mem->dsm_mask; - desc = nd_cmd_dimm_desc(cmd); - uuid = to_nfit_uuid(nfit_mem->family); - handle = adev->handle; - } else { - struct acpi_device *adev = to_acpi_dev(acpi_desc); - - cmd_name = nvdimm_bus_cmd_name(cmd); - cmd_mask = nd_desc->cmd_mask; - dsm_mask = cmd_mask; - desc = nd_cmd_bus_desc(cmd); - uuid = to_nfit_uuid(NFIT_DEV_BUS); - handle = adev->handle; - dimm_name = "bus"; - } - - if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) - return -ENOTTY; - - if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) - return -ENOTTY; - - in_obj.type = ACPI_TYPE_PACKAGE; - in_obj.package.count = 1; - in_obj.package.elements = &in_buf; - in_buf.type = ACPI_TYPE_BUFFER; - in_buf.buffer.pointer = buf; - in_buf.buffer.length = 0; - - /* libnvdimm has already validated the input envelope */ - for (i = 0; i < desc->in_num; i++) - in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, - i, buf); - - if (call_pkg) { - /* skip over package wrapper */ - in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; - in_buf.buffer.length = call_pkg->nd_size_in; - } - - if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { - dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", - __func__, dimm_name, cmd, func, - in_buf.buffer.length); - print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, - in_buf.buffer.pointer, - min_t(u32, 256, in_buf.buffer.length), true); - } - - out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj); - if (!out_obj) { - dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, - cmd_name); - return -EINVAL; - } - - if (call_pkg) { - call_pkg->nd_fw_size = out_obj->buffer.length; - memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, - out_obj->buffer.pointer, - min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); - - ACPI_FREE(out_obj); - /* - * Need to support FW function w/o known size in advance. - * Caller can determine required size based upon nd_fw_size. - * If we return an error (like elsewhere) then caller wouldn't - * be able to rely upon data returned to make calculation. - */ - return 0; - } - - if (out_obj->package.type != ACPI_TYPE_BUFFER) { - dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", - __func__, dimm_name, cmd_name, out_obj->type); - rc = -EINVAL; - goto out; - } - - if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { - dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, - dimm_name, cmd_name, out_obj->buffer.length); - print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, - 4, out_obj->buffer.pointer, min_t(u32, 128, - out_obj->buffer.length), true); - } - - for (i = 0, offset = 0; i < desc->out_num; i++) { - u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, - (u32 *) out_obj->buffer.pointer); - - if (offset + out_size > out_obj->buffer.length) { - dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", - __func__, dimm_name, cmd_name, i); - break; - } - - if (in_buf.buffer.length + offset + out_size > buf_len) { - dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", - __func__, dimm_name, cmd_name, i); - rc = -ENXIO; - goto out; - } - memcpy(buf + in_buf.buffer.length + offset, - out_obj->buffer.pointer + offset, out_size); - offset += out_size; - } - if (offset + in_buf.buffer.length < buf_len) { - if (i >= 1) { - /* - * status valid, return the number of bytes left - * unfilled in the output buffer - */ - rc = buf_len - offset - in_buf.buffer.length; - if (cmd_rc) - *cmd_rc = xlat_status(buf, cmd); - } else { - dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", - __func__, dimm_name, cmd_name, buf_len, - offset); - rc = -ENXIO; - } - } else { - rc = 0; - if (cmd_rc) - *cmd_rc = xlat_status(buf, cmd); - } - - out: - ACPI_FREE(out_obj); - - return rc; -} - -static const char *spa_type_name(u16 type) -{ - static const char *to_name[] = { - [NFIT_SPA_VOLATILE] = "volatile", - [NFIT_SPA_PM] = "pmem", - [NFIT_SPA_DCR] = "dimm-control-region", - [NFIT_SPA_BDW] = "block-data-window", - [NFIT_SPA_VDISK] = "volatile-disk", - [NFIT_SPA_VCD] = "volatile-cd", - [NFIT_SPA_PDISK] = "persistent-disk", - [NFIT_SPA_PCD] = "persistent-cd", - - }; - - if (type > NFIT_SPA_PCD) - return "unknown"; - - return to_name[type]; -} - -static int nfit_spa_type(struct acpi_nfit_system_address *spa) -{ - int i; - - for (i = 0; i < NFIT_UUID_MAX; i++) - if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) - return i; - return -1; -} - -static bool add_spa(struct acpi_nfit_desc *acpi_desc, - struct nfit_table_prev *prev, - struct acpi_nfit_system_address *spa) -{ - struct device *dev = acpi_desc->dev; - struct nfit_spa *nfit_spa; - - if (spa->header.length != sizeof(*spa)) - return false; - - list_for_each_entry(nfit_spa, &prev->spas, list) { - if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { - list_move_tail(&nfit_spa->list, &acpi_desc->spas); - return true; - } - } - - nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), - GFP_KERNEL); - if (!nfit_spa) - return false; - INIT_LIST_HEAD(&nfit_spa->list); - memcpy(nfit_spa->spa, spa, sizeof(*spa)); - list_add_tail(&nfit_spa->list, &acpi_desc->spas); - dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, - spa->range_index, - spa_type_name(nfit_spa_type(spa))); - return true; -} - -static bool add_memdev(struct acpi_nfit_desc *acpi_desc, - struct nfit_table_prev *prev, - struct acpi_nfit_memory_map *memdev) -{ - struct device *dev = acpi_desc->dev; - struct nfit_memdev *nfit_memdev; - - if (memdev->header.length != sizeof(*memdev)) - return false; - - list_for_each_entry(nfit_memdev, &prev->memdevs, list) - if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { - list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); - return true; - } - - nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), - GFP_KERNEL); - if (!nfit_memdev) - return false; - INIT_LIST_HEAD(&nfit_memdev->list); - memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); - list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); - dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", - __func__, memdev->device_handle, memdev->range_index, - memdev->region_index); - return true; -} - -/* - * An implementation may provide a truncated control region if no block windows - * are defined. - */ -static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) -{ - if (dcr->header.length < offsetof(struct acpi_nfit_control_region, - window_size)) - return 0; - if (dcr->windows) - return sizeof(*dcr); - return offsetof(struct acpi_nfit_control_region, window_size); -} - -static bool add_dcr(struct acpi_nfit_desc *acpi_desc, - struct nfit_table_prev *prev, - struct acpi_nfit_control_region *dcr) -{ - struct device *dev = acpi_desc->dev; - struct nfit_dcr *nfit_dcr; - - if (!sizeof_dcr(dcr)) - return false; - - list_for_each_entry(nfit_dcr, &prev->dcrs, list) - if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { - list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); - return true; - } - - nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), - GFP_KERNEL); - if (!nfit_dcr) - return false; - INIT_LIST_HEAD(&nfit_dcr->list); - memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); - list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); - dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, - dcr->region_index, dcr->windows); - return true; -} - -static bool add_bdw(struct acpi_nfit_desc *acpi_desc, - struct nfit_table_prev *prev, - struct acpi_nfit_data_region *bdw) -{ - struct device *dev = acpi_desc->dev; - struct nfit_bdw *nfit_bdw; - - if (bdw->header.length != sizeof(*bdw)) - return false; - list_for_each_entry(nfit_bdw, &prev->bdws, list) - if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { - list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); - return true; - } - - nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), - GFP_KERNEL); - if (!nfit_bdw) - return false; - INIT_LIST_HEAD(&nfit_bdw->list); - memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); - list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); - dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, - bdw->region_index, bdw->windows); - return true; -} - -static size_t sizeof_idt(struct acpi_nfit_interleave *idt) -{ - if (idt->header.length < sizeof(*idt)) - return 0; - return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); -} - -static bool add_idt(struct acpi_nfit_desc *acpi_desc, - struct nfit_table_prev *prev, - struct acpi_nfit_interleave *idt) -{ - struct device *dev = acpi_desc->dev; - struct nfit_idt *nfit_idt; - - if (!sizeof_idt(idt)) - return false; - - list_for_each_entry(nfit_idt, &prev->idts, list) { - if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) - continue; - - if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { - list_move_tail(&nfit_idt->list, &acpi_desc->idts); - return true; - } - } - - nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), - GFP_KERNEL); - if (!nfit_idt) - return false; - INIT_LIST_HEAD(&nfit_idt->list); - memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); - list_add_tail(&nfit_idt->list, &acpi_desc->idts); - dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, - idt->interleave_index, idt->line_count); - return true; -} - -static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) -{ - if (flush->header.length < sizeof(*flush)) - return 0; - return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); -} - -static bool add_flush(struct acpi_nfit_desc *acpi_desc, - struct nfit_table_prev *prev, - struct acpi_nfit_flush_address *flush) -{ - struct device *dev = acpi_desc->dev; - struct nfit_flush *nfit_flush; - - if (!sizeof_flush(flush)) - return false; - - list_for_each_entry(nfit_flush, &prev->flushes, list) { - if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) - continue; - - if (memcmp(nfit_flush->flush, flush, - sizeof_flush(flush)) == 0) { - list_move_tail(&nfit_flush->list, &acpi_desc->flushes); - return true; - } - } - - nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) - + sizeof_flush(flush), GFP_KERNEL); - if (!nfit_flush) - return false; - INIT_LIST_HEAD(&nfit_flush->list); - memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); - list_add_tail(&nfit_flush->list, &acpi_desc->flushes); - dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, - flush->device_handle, flush->hint_count); - return true; -} - -static void *add_table(struct acpi_nfit_desc *acpi_desc, - struct nfit_table_prev *prev, void *table, const void *end) -{ - struct device *dev = acpi_desc->dev; - struct acpi_nfit_header *hdr; - void *err = ERR_PTR(-ENOMEM); - - if (table >= end) - return NULL; - - hdr = table; - if (!hdr->length) { - dev_warn(dev, "found a zero length table '%d' parsing nfit\n", - hdr->type); - return NULL; - } - - switch (hdr->type) { - case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: - if (!add_spa(acpi_desc, prev, table)) - return err; - break; - case ACPI_NFIT_TYPE_MEMORY_MAP: - if (!add_memdev(acpi_desc, prev, table)) - return err; - break; - case ACPI_NFIT_TYPE_CONTROL_REGION: - if (!add_dcr(acpi_desc, prev, table)) - return err; - break; - case ACPI_NFIT_TYPE_DATA_REGION: - if (!add_bdw(acpi_desc, prev, table)) - return err; - break; - case ACPI_NFIT_TYPE_INTERLEAVE: - if (!add_idt(acpi_desc, prev, table)) - return err; - break; - case ACPI_NFIT_TYPE_FLUSH_ADDRESS: - if (!add_flush(acpi_desc, prev, table)) - return err; - break; - case ACPI_NFIT_TYPE_SMBIOS: - dev_dbg(dev, "%s: smbios\n", __func__); - break; - default: - dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); - break; - } - - return table + hdr->length; -} - -static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, - struct nfit_mem *nfit_mem) -{ - u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; - u16 dcr = nfit_mem->dcr->region_index; - struct nfit_spa *nfit_spa; - - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - u16 range_index = nfit_spa->spa->range_index; - int type = nfit_spa_type(nfit_spa->spa); - struct nfit_memdev *nfit_memdev; - - if (type != NFIT_SPA_BDW) - continue; - - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { - if (nfit_memdev->memdev->range_index != range_index) - continue; - if (nfit_memdev->memdev->device_handle != device_handle) - continue; - if (nfit_memdev->memdev->region_index != dcr) - continue; - - nfit_mem->spa_bdw = nfit_spa->spa; - return; - } - } - - dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", - nfit_mem->spa_dcr->range_index); - nfit_mem->bdw = NULL; -} - -static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, - struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) -{ - u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; - struct nfit_memdev *nfit_memdev; - struct nfit_bdw *nfit_bdw; - struct nfit_idt *nfit_idt; - u16 idt_idx, range_index; - - list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { - if (nfit_bdw->bdw->region_index != dcr) - continue; - nfit_mem->bdw = nfit_bdw->bdw; - break; - } - - if (!nfit_mem->bdw) - return; - - nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); - - if (!nfit_mem->spa_bdw) - return; - - range_index = nfit_mem->spa_bdw->range_index; - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { - if (nfit_memdev->memdev->range_index != range_index || - nfit_memdev->memdev->region_index != dcr) - continue; - nfit_mem->memdev_bdw = nfit_memdev->memdev; - idt_idx = nfit_memdev->memdev->interleave_index; - list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { - if (nfit_idt->idt->interleave_index != idt_idx) - continue; - nfit_mem->idt_bdw = nfit_idt->idt; - break; - } - break; - } -} - -static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, - struct acpi_nfit_system_address *spa) -{ - struct nfit_mem *nfit_mem, *found; - struct nfit_memdev *nfit_memdev; - int type = nfit_spa_type(spa); - - switch (type) { - case NFIT_SPA_DCR: - case NFIT_SPA_PM: - break; - default: - return 0; - } - - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { - struct nfit_flush *nfit_flush; - struct nfit_dcr *nfit_dcr; - u32 device_handle; - u16 dcr; - - if (nfit_memdev->memdev->range_index != spa->range_index) - continue; - found = NULL; - dcr = nfit_memdev->memdev->region_index; - device_handle = nfit_memdev->memdev->device_handle; - list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) - if (__to_nfit_memdev(nfit_mem)->device_handle - == device_handle) { - found = nfit_mem; - break; - } - - if (found) - nfit_mem = found; - else { - nfit_mem = devm_kzalloc(acpi_desc->dev, - sizeof(*nfit_mem), GFP_KERNEL); - if (!nfit_mem) - return -ENOMEM; - INIT_LIST_HEAD(&nfit_mem->list); - nfit_mem->acpi_desc = acpi_desc; - list_add(&nfit_mem->list, &acpi_desc->dimms); - } - - list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { - if (nfit_dcr->dcr->region_index != dcr) - continue; - /* - * Record the control region for the dimm. For - * the ACPI 6.1 case, where there are separate - * control regions for the pmem vs blk - * interfaces, be sure to record the extended - * blk details. - */ - if (!nfit_mem->dcr) - nfit_mem->dcr = nfit_dcr->dcr; - else if (nfit_mem->dcr->windows == 0 - && nfit_dcr->dcr->windows) - nfit_mem->dcr = nfit_dcr->dcr; - break; - } - - list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { - struct acpi_nfit_flush_address *flush; - u16 i; - - if (nfit_flush->flush->device_handle != device_handle) - continue; - nfit_mem->nfit_flush = nfit_flush; - flush = nfit_flush->flush; - nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, - flush->hint_count - * sizeof(struct resource), GFP_KERNEL); - if (!nfit_mem->flush_wpq) - return -ENOMEM; - for (i = 0; i < flush->hint_count; i++) { - struct resource *res = &nfit_mem->flush_wpq[i]; - - res->start = flush->hint_address[i]; - res->end = res->start + 8 - 1; - } - break; - } - - if (dcr && !nfit_mem->dcr) { - dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", - spa->range_index, dcr); - return -ENODEV; - } - - if (type == NFIT_SPA_DCR) { - struct nfit_idt *nfit_idt; - u16 idt_idx; - - /* multiple dimms may share a SPA when interleaved */ - nfit_mem->spa_dcr = spa; - nfit_mem->memdev_dcr = nfit_memdev->memdev; - idt_idx = nfit_memdev->memdev->interleave_index; - list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { - if (nfit_idt->idt->interleave_index != idt_idx) - continue; - nfit_mem->idt_dcr = nfit_idt->idt; - break; - } - nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); - } else { - /* - * A single dimm may belong to multiple SPA-PM - * ranges, record at least one in addition to - * any SPA-DCR range. - */ - nfit_mem->memdev_pmem = nfit_memdev->memdev; - } - } - - return 0; -} - -static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) -{ - struct nfit_mem *a = container_of(_a, typeof(*a), list); - struct nfit_mem *b = container_of(_b, typeof(*b), list); - u32 handleA, handleB; - - handleA = __to_nfit_memdev(a)->device_handle; - handleB = __to_nfit_memdev(b)->device_handle; - if (handleA < handleB) - return -1; - else if (handleA > handleB) - return 1; - return 0; -} - -static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) -{ - struct nfit_spa *nfit_spa; - - /* - * For each SPA-DCR or SPA-PMEM address range find its - * corresponding MEMDEV(s). From each MEMDEV find the - * corresponding DCR. Then, if we're operating on a SPA-DCR, - * try to find a SPA-BDW and a corresponding BDW that references - * the DCR. Throw it all into an nfit_mem object. Note, that - * BDWs are optional. - */ - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - int rc; - - rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); - if (rc) - return rc; - } - - list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); - - return 0; -} - -static ssize_t revision_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); - struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - - return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); -} -static DEVICE_ATTR_RO(revision); - -/* - * This shows the number of full Address Range Scrubs that have been - * completed since driver load time. Userspace can wait on this using - * select/poll etc. A '+' at the end indicates an ARS is in progress - */ -static ssize_t scrub_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvdimm_bus_descriptor *nd_desc; - ssize_t rc = -ENXIO; - - device_lock(dev); - nd_desc = dev_get_drvdata(dev); - if (nd_desc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - - rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, - (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); - } - device_unlock(dev); - return rc; -} - -static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc); - -static ssize_t scrub_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct nvdimm_bus_descriptor *nd_desc; - ssize_t rc; - long val; - - rc = kstrtol(buf, 0, &val); - if (rc) - return rc; - if (val != 1) - return -EINVAL; - - device_lock(dev); - nd_desc = dev_get_drvdata(dev); - if (nd_desc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - - rc = acpi_nfit_ars_rescan(acpi_desc); - } - device_unlock(dev); - if (rc) - return rc; - return size; -} -static DEVICE_ATTR_RW(scrub); - -static bool ars_supported(struct nvdimm_bus *nvdimm_bus) -{ - struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START - | 1 << ND_CMD_ARS_STATUS; - - return (nd_desc->cmd_mask & mask) == mask; -} - -static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); - - if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) - return 0; - return a->mode; -} - -static struct attribute *acpi_nfit_attributes[] = { - &dev_attr_revision.attr, - &dev_attr_scrub.attr, - NULL, -}; - -static struct attribute_group acpi_nfit_attribute_group = { - .name = "nfit", - .attrs = acpi_nfit_attributes, - .is_visible = nfit_visible, -}; - -static const struct attribute_group *acpi_nfit_attribute_groups[] = { - &nvdimm_bus_attribute_group, - &acpi_nfit_attribute_group, - NULL, -}; - -static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) -{ - struct nvdimm *nvdimm = to_nvdimm(dev); - struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - - return __to_nfit_memdev(nfit_mem); -} - -static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) -{ - struct nvdimm *nvdimm = to_nvdimm(dev); - struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - - return nfit_mem->dcr; -} - -static ssize_t handle_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); - - return sprintf(buf, "%#x\n", memdev->device_handle); -} -static DEVICE_ATTR_RO(handle); - -static ssize_t phys_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); - - return sprintf(buf, "%#x\n", memdev->physical_id); -} -static DEVICE_ATTR_RO(phys_id); - -static ssize_t vendor_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); -} -static DEVICE_ATTR_RO(vendor); - -static ssize_t rev_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); -} -static DEVICE_ATTR_RO(rev_id); - -static ssize_t device_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); -} -static DEVICE_ATTR_RO(device); - -static ssize_t subsystem_vendor_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); -} -static DEVICE_ATTR_RO(subsystem_vendor); - -static ssize_t subsystem_rev_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - return sprintf(buf, "0x%04x\n", - be16_to_cpu(dcr->subsystem_revision_id)); -} -static DEVICE_ATTR_RO(subsystem_rev_id); - -static ssize_t subsystem_device_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); -} -static DEVICE_ATTR_RO(subsystem_device); - -static int num_nvdimm_formats(struct nvdimm *nvdimm) -{ - struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - int formats = 0; - - if (nfit_mem->memdev_pmem) - formats++; - if (nfit_mem->memdev_bdw) - formats++; - return formats; -} - -static ssize_t format_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code)); -} -static DEVICE_ATTR_RO(format); - -static ssize_t format1_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u32 handle; - ssize_t rc = -ENXIO; - struct nfit_mem *nfit_mem; - struct nfit_memdev *nfit_memdev; - struct acpi_nfit_desc *acpi_desc; - struct nvdimm *nvdimm = to_nvdimm(dev); - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - nfit_mem = nvdimm_provider_data(nvdimm); - acpi_desc = nfit_mem->acpi_desc; - handle = to_nfit_memdev(dev)->device_handle; - - /* assumes DIMMs have at most 2 published interface codes */ - mutex_lock(&acpi_desc->init_mutex); - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { - struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; - struct nfit_dcr *nfit_dcr; - - if (memdev->device_handle != handle) - continue; - - list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { - if (nfit_dcr->dcr->region_index != memdev->region_index) - continue; - if (nfit_dcr->dcr->code == dcr->code) - continue; - rc = sprintf(buf, "%#x\n", - be16_to_cpu(nfit_dcr->dcr->code)); - break; - } - if (rc != ENXIO) - break; - } - mutex_unlock(&acpi_desc->init_mutex); - return rc; -} -static DEVICE_ATTR_RO(format1); - -static ssize_t formats_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvdimm *nvdimm = to_nvdimm(dev); - - return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); -} -static DEVICE_ATTR_RO(formats); - -static ssize_t serial_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); -} -static DEVICE_ATTR_RO(serial); - -static ssize_t family_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvdimm *nvdimm = to_nvdimm(dev); - struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - - if (nfit_mem->family < 0) - return -ENXIO; - return sprintf(buf, "%d\n", nfit_mem->family); -} -static DEVICE_ATTR_RO(family); - -static ssize_t dsm_mask_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvdimm *nvdimm = to_nvdimm(dev); - struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - - if (nfit_mem->family < 0) - return -ENXIO; - return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); -} -static DEVICE_ATTR_RO(dsm_mask); - -static ssize_t flags_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u16 flags = to_nfit_memdev(dev)->flags; - - return sprintf(buf, "%s%s%s%s%s\n", - flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", - flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", - flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", - flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", - flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); -} -static DEVICE_ATTR_RO(flags); - -static ssize_t id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - - if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) - return sprintf(buf, "%04x-%02x-%04x-%08x\n", - be16_to_cpu(dcr->vendor_id), - dcr->manufacturing_location, - be16_to_cpu(dcr->manufacturing_date), - be32_to_cpu(dcr->serial_number)); - else - return sprintf(buf, "%04x-%08x\n", - be16_to_cpu(dcr->vendor_id), - be32_to_cpu(dcr->serial_number)); -} -static DEVICE_ATTR_RO(id); - -static struct attribute *acpi_nfit_dimm_attributes[] = { - &dev_attr_handle.attr, - &dev_attr_phys_id.attr, - &dev_attr_vendor.attr, - &dev_attr_device.attr, - &dev_attr_rev_id.attr, - &dev_attr_subsystem_vendor.attr, - &dev_attr_subsystem_device.attr, - &dev_attr_subsystem_rev_id.attr, - &dev_attr_format.attr, - &dev_attr_formats.attr, - &dev_attr_format1.attr, - &dev_attr_serial.attr, - &dev_attr_flags.attr, - &dev_attr_id.attr, - &dev_attr_family.attr, - &dev_attr_dsm_mask.attr, - NULL, -}; - -static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, - struct attribute *a, int n) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct nvdimm *nvdimm = to_nvdimm(dev); - - if (!to_nfit_dcr(dev)) - return 0; - if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) - return 0; - return a->mode; -} - -static struct attribute_group acpi_nfit_dimm_attribute_group = { - .name = "nfit", - .attrs = acpi_nfit_dimm_attributes, - .is_visible = acpi_nfit_dimm_attr_visible, -}; - -static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { - &nvdimm_attribute_group, - &nd_device_attribute_group, - &acpi_nfit_dimm_attribute_group, - NULL, -}; - -static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, - u32 device_handle) -{ - struct nfit_mem *nfit_mem; - - list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) - if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) - return nfit_mem->nvdimm; - - return NULL; -} - -static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, - struct nfit_mem *nfit_mem, u32 device_handle) -{ - struct acpi_device *adev, *adev_dimm; - struct device *dev = acpi_desc->dev; - unsigned long dsm_mask; - const u8 *uuid; - int i; - - /* nfit test assumes 1:1 relationship between commands and dsms */ - nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; - nfit_mem->family = NVDIMM_FAMILY_INTEL; - adev = to_acpi_dev(acpi_desc); - if (!adev) - return 0; - - adev_dimm = acpi_find_child_device(adev, device_handle, false); - nfit_mem->adev = adev_dimm; - if (!adev_dimm) { - dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", - device_handle); - return force_enable_dimms ? 0 : -ENODEV; - } - - /* - * Until standardization materializes we need to consider 4 - * different command sets. Note, that checking for function0 (bit0) - * tells us if any commands are reachable through this uuid. - */ - for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++) - if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) - break; - - /* limit the supported commands to those that are publicly documented */ - nfit_mem->family = i; - if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { - dsm_mask = 0x3fe; - if (disable_vendor_specific) - dsm_mask &= ~(1 << ND_CMD_VENDOR); - } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { - dsm_mask = 0x1c3c76; - } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { - dsm_mask = 0x1fe; - if (disable_vendor_specific) - dsm_mask &= ~(1 << 8); - } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { - dsm_mask = 0xffffffff; - } else { - dev_err(dev, "unknown dimm command family\n"); - nfit_mem->family = -1; - return force_enable_dimms ? 0 : -ENODEV; - } - - uuid = to_nfit_uuid(nfit_mem->family); - for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) - if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) - set_bit(i, &nfit_mem->dsm_mask); - - return 0; -} - -static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) -{ - struct nfit_mem *nfit_mem; - int dimm_count = 0; - - list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { - struct acpi_nfit_flush_address *flush; - unsigned long flags = 0, cmd_mask; - struct nvdimm *nvdimm; - u32 device_handle; - u16 mem_flags; - int rc; - - device_handle = __to_nfit_memdev(nfit_mem)->device_handle; - nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); - if (nvdimm) { - dimm_count++; - continue; - } - - if (nfit_mem->bdw && nfit_mem->memdev_pmem) - flags |= NDD_ALIASING; - - mem_flags = __to_nfit_memdev(nfit_mem)->flags; - if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) - flags |= NDD_UNARMED; - - rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); - if (rc) - continue; - - /* - * TODO: provide translation for non-NVDIMM_FAMILY_INTEL - * devices (i.e. from nd_cmd to acpi_dsm) to standardize the - * userspace interface. - */ - cmd_mask = 1UL << ND_CMD_CALL; - if (nfit_mem->family == NVDIMM_FAMILY_INTEL) - cmd_mask |= nfit_mem->dsm_mask; - - flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush - : NULL; - nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, - acpi_nfit_dimm_attribute_groups, - flags, cmd_mask, flush ? flush->hint_count : 0, - nfit_mem->flush_wpq); - if (!nvdimm) - return -ENOMEM; - - nfit_mem->nvdimm = nvdimm; - dimm_count++; - - if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) - continue; - - dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", - nvdimm_name(nvdimm), - mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", - mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", - mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", - mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); - - } - - return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); -} - -static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) -{ - struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; - const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); - struct acpi_device *adev; - int i; - - nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; - adev = to_acpi_dev(acpi_desc); - if (!adev) - return; - - for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) - if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) - set_bit(i, &nd_desc->cmd_mask); -} - -static ssize_t range_index_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nd_region *nd_region = to_nd_region(dev); - struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); - - return sprintf(buf, "%d\n", nfit_spa->spa->range_index); -} -static DEVICE_ATTR_RO(range_index); - -static struct attribute *acpi_nfit_region_attributes[] = { - &dev_attr_range_index.attr, - NULL, -}; - -static struct attribute_group acpi_nfit_region_attribute_group = { - .name = "nfit", - .attrs = acpi_nfit_region_attributes, -}; - -static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { - &nd_region_attribute_group, - &nd_mapping_attribute_group, - &nd_device_attribute_group, - &nd_numa_attribute_group, - &acpi_nfit_region_attribute_group, - NULL, -}; - -/* enough info to uniquely specify an interleave set */ -struct nfit_set_info { - struct nfit_set_info_map { - u64 region_offset; - u32 serial_number; - u32 pad; - } mapping[0]; -}; - -static size_t sizeof_nfit_set_info(int num_mappings) -{ - return sizeof(struct nfit_set_info) - + num_mappings * sizeof(struct nfit_set_info_map); -} - -static int cmp_map(const void *m0, const void *m1) -{ - const struct nfit_set_info_map *map0 = m0; - const struct nfit_set_info_map *map1 = m1; - - return memcmp(&map0->region_offset, &map1->region_offset, - sizeof(u64)); -} - -/* Retrieve the nth entry referencing this spa */ -static struct acpi_nfit_memory_map *memdev_from_spa( - struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) -{ - struct nfit_memdev *nfit_memdev; - - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) - if (nfit_memdev->memdev->range_index == range_index) - if (n-- == 0) - return nfit_memdev->memdev; - return NULL; -} - -static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, - struct nd_region_desc *ndr_desc, - struct acpi_nfit_system_address *spa) -{ - int i, spa_type = nfit_spa_type(spa); - struct device *dev = acpi_desc->dev; - struct nd_interleave_set *nd_set; - u16 nr = ndr_desc->num_mappings; - struct nfit_set_info *info; - - if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) - /* pass */; - else - return 0; - - nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); - if (!nd_set) - return -ENOMEM; - - info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); - if (!info) - return -ENOMEM; - for (i = 0; i < nr; i++) { - struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; - struct nfit_set_info_map *map = &info->mapping[i]; - struct nvdimm *nvdimm = nd_mapping->nvdimm; - struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, - spa->range_index, i); - - if (!memdev || !nfit_mem->dcr) { - dev_err(dev, "%s: failed to find DCR\n", __func__); - return -ENODEV; - } - - map->region_offset = memdev->region_offset; - map->serial_number = nfit_mem->dcr->serial_number; - } - - sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), - cmp_map, NULL); - nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); - ndr_desc->nd_set = nd_set; - devm_kfree(dev, info); - - return 0; -} - -static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) -{ - struct acpi_nfit_interleave *idt = mmio->idt; - u32 sub_line_offset, line_index, line_offset; - u64 line_no, table_skip_count, table_offset; - - line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); - table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); - line_offset = idt->line_offset[line_index] - * mmio->line_size; - table_offset = table_skip_count * mmio->table_size; - - return mmio->base_offset + line_offset + table_offset + sub_line_offset; -} - -static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) -{ - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; - u64 offset = nfit_blk->stat_offset + mmio->size * bw; - - if (mmio->num_lines) - offset = to_interleave_offset(offset, mmio); - - return readl(mmio->addr.base + offset); -} - -static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, - resource_size_t dpa, unsigned int len, unsigned int write) -{ - u64 cmd, offset; - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; - - enum { - BCW_OFFSET_MASK = (1ULL << 48)-1, - BCW_LEN_SHIFT = 48, - BCW_LEN_MASK = (1ULL << 8) - 1, - BCW_CMD_SHIFT = 56, - }; - - cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; - len = len >> L1_CACHE_SHIFT; - cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; - cmd |= ((u64) write) << BCW_CMD_SHIFT; - - offset = nfit_blk->cmd_offset + mmio->size * bw; - if (mmio->num_lines) - offset = to_interleave_offset(offset, mmio); - - writeq(cmd, mmio->addr.base + offset); - nvdimm_flush(nfit_blk->nd_region); - - if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) - readq(mmio->addr.base + offset); -} - -static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, - resource_size_t dpa, void *iobuf, size_t len, int rw, - unsigned int lane) -{ - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; - unsigned int copied = 0; - u64 base_offset; - int rc; - - base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES - + lane * mmio->size; - write_blk_ctl(nfit_blk, lane, dpa, len, rw); - while (len) { - unsigned int c; - u64 offset; - - if (mmio->num_lines) { - u32 line_offset; - - offset = to_interleave_offset(base_offset + copied, - mmio); - div_u64_rem(offset, mmio->line_size, &line_offset); - c = min_t(size_t, len, mmio->line_size - line_offset); - } else { - offset = base_offset + nfit_blk->bdw_offset; - c = len; - } - - if (rw) - memcpy_to_pmem(mmio->addr.aperture + offset, - iobuf + copied, c); - else { - if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) - mmio_flush_range((void __force *) - mmio->addr.aperture + offset, c); - - memcpy_from_pmem(iobuf + copied, - mmio->addr.aperture + offset, c); - } - - copied += c; - len -= c; - } - - if (rw) - nvdimm_flush(nfit_blk->nd_region); - - rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; - return rc; -} - -static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, - resource_size_t dpa, void *iobuf, u64 len, int rw) -{ - struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; - struct nd_region *nd_region = nfit_blk->nd_region; - unsigned int lane, copied = 0; - int rc = 0; - - lane = nd_region_acquire_lane(nd_region); - while (len) { - u64 c = min(len, mmio->size); - - rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, - iobuf + copied, c, rw, lane); - if (rc) - break; - - copied += c; - len -= c; - } - nd_region_release_lane(nd_region, lane); - - return rc; -} - -static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, - struct acpi_nfit_interleave *idt, u16 interleave_ways) -{ - if (idt) { - mmio->num_lines = idt->line_count; - mmio->line_size = idt->line_size; - if (interleave_ways == 0) - return -ENXIO; - mmio->table_size = mmio->num_lines * interleave_ways - * mmio->line_size; - } - - return 0; -} - -static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, - struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) -{ - struct nd_cmd_dimm_flags flags; - int rc; - - memset(&flags, 0, sizeof(flags)); - rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, - sizeof(flags), NULL); - - if (rc >= 0 && flags.status == 0) - nfit_blk->dimm_flags = flags.flags; - else if (rc == -ENOTTY) { - /* fall back to a conservative default */ - nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; - rc = 0; - } else - rc = -ENXIO; - - return rc; -} - -static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, - struct device *dev) -{ - struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - struct nd_blk_region *ndbr = to_nd_blk_region(dev); - struct nfit_blk_mmio *mmio; - struct nfit_blk *nfit_blk; - struct nfit_mem *nfit_mem; - struct nvdimm *nvdimm; - int rc; - - nvdimm = nd_blk_region_to_dimm(ndbr); - nfit_mem = nvdimm_provider_data(nvdimm); - if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { - dev_dbg(dev, "%s: missing%s%s%s\n", __func__, - nfit_mem ? "" : " nfit_mem", - (nfit_mem && nfit_mem->dcr) ? "" : " dcr", - (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); - return -ENXIO; - } - - nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); - if (!nfit_blk) - return -ENOMEM; - nd_blk_region_set_provider_data(ndbr, nfit_blk); - nfit_blk->nd_region = to_nd_region(dev); - - /* map block aperture memory */ - nfit_blk->bdw_offset = nfit_mem->bdw->offset; - mmio = &nfit_blk->mmio[BDW]; - mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, - nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM); - if (!mmio->addr.base) { - dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, - nvdimm_name(nvdimm)); - return -ENOMEM; - } - mmio->size = nfit_mem->bdw->size; - mmio->base_offset = nfit_mem->memdev_bdw->region_offset; - mmio->idt = nfit_mem->idt_bdw; - mmio->spa = nfit_mem->spa_bdw; - rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, - nfit_mem->memdev_bdw->interleave_ways); - if (rc) { - dev_dbg(dev, "%s: %s failed to init bdw interleave\n", - __func__, nvdimm_name(nvdimm)); - return rc; - } - - /* map block control memory */ - nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; - nfit_blk->stat_offset = nfit_mem->dcr->status_offset; - mmio = &nfit_blk->mmio[DCR]; - mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, - nfit_mem->spa_dcr->length); - if (!mmio->addr.base) { - dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, - nvdimm_name(nvdimm)); - return -ENOMEM; - } - mmio->size = nfit_mem->dcr->window_size; - mmio->base_offset = nfit_mem->memdev_dcr->region_offset; - mmio->idt = nfit_mem->idt_dcr; - mmio->spa = nfit_mem->spa_dcr; - rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, - nfit_mem->memdev_dcr->interleave_ways); - if (rc) { - dev_dbg(dev, "%s: %s failed to init dcr interleave\n", - __func__, nvdimm_name(nvdimm)); - return rc; - } - - rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); - if (rc < 0) { - dev_dbg(dev, "%s: %s failed get DIMM flags\n", - __func__, nvdimm_name(nvdimm)); - return rc; - } - - if (nvdimm_has_flush(nfit_blk->nd_region) < 0) - dev_warn(dev, "unable to guarantee persistence of writes\n"); - - if (mmio->line_size == 0) - return 0; - - if ((u32) nfit_blk->cmd_offset % mmio->line_size - + 8 > mmio->line_size) { - dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); - return -ENXIO; - } else if ((u32) nfit_blk->stat_offset % mmio->line_size - + 8 > mmio->line_size) { - dev_dbg(dev, "stat_offset crosses interleave boundary\n"); - return -ENXIO; - } - - return 0; -} - -static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, - struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) -{ - struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; - struct acpi_nfit_system_address *spa = nfit_spa->spa; - int cmd_rc, rc; - - cmd->address = spa->address; - cmd->length = spa->length; - rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, - sizeof(*cmd), &cmd_rc); - if (rc < 0) - return rc; - return cmd_rc; -} - -static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) -{ - int rc; - int cmd_rc; - struct nd_cmd_ars_start ars_start; - struct acpi_nfit_system_address *spa = nfit_spa->spa; - struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; - - memset(&ars_start, 0, sizeof(ars_start)); - ars_start.address = spa->address; - ars_start.length = spa->length; - if (nfit_spa_type(spa) == NFIT_SPA_PM) - ars_start.type = ND_ARS_PERSISTENT; - else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) - ars_start.type = ND_ARS_VOLATILE; - else - return -ENOTTY; - - rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, - sizeof(ars_start), &cmd_rc); - - if (rc < 0) - return rc; - return cmd_rc; -} - -static int ars_continue(struct acpi_nfit_desc *acpi_desc) -{ - int rc, cmd_rc; - struct nd_cmd_ars_start ars_start; - struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; - struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; - - memset(&ars_start, 0, sizeof(ars_start)); - ars_start.address = ars_status->restart_address; - ars_start.length = ars_status->restart_length; - ars_start.type = ars_status->type; - rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, - sizeof(ars_start), &cmd_rc); - if (rc < 0) - return rc; - return cmd_rc; -} - -static int ars_get_status(struct acpi_nfit_desc *acpi_desc) -{ - struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; - struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; - int rc, cmd_rc; - - rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, - acpi_desc->ars_status_size, &cmd_rc); - if (rc < 0) - return rc; - return cmd_rc; -} - -static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus, - struct nd_cmd_ars_status *ars_status) -{ - int rc; - u32 i; - - for (i = 0; i < ars_status->num_records; i++) { - rc = nvdimm_bus_add_poison(nvdimm_bus, - ars_status->records[i].err_address, - ars_status->records[i].length); - if (rc) - return rc; - } - - return 0; -} - -static void acpi_nfit_remove_resource(void *data) -{ - struct resource *res = data; - - remove_resource(res); -} - -static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, - struct nd_region_desc *ndr_desc) -{ - struct resource *res, *nd_res = ndr_desc->res; - int is_pmem, ret; - - /* No operation if the region is already registered as PMEM */ - is_pmem = region_intersects(nd_res->start, resource_size(nd_res), - IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); - if (is_pmem == REGION_INTERSECTS) - return 0; - - res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); - if (!res) - return -ENOMEM; - - res->name = "Persistent Memory"; - res->start = nd_res->start; - res->end = nd_res->end; - res->flags = IORESOURCE_MEM; - res->desc = IORES_DESC_PERSISTENT_MEMORY; - - ret = insert_resource(&iomem_resource, res); - if (ret) - return ret; - - ret = devm_add_action_or_reset(acpi_desc->dev, - acpi_nfit_remove_resource, - res); - if (ret) - return ret; - - return 0; -} - -static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, - struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, - struct acpi_nfit_memory_map *memdev, - struct nfit_spa *nfit_spa) -{ - struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, - memdev->device_handle); - struct acpi_nfit_system_address *spa = nfit_spa->spa; - struct nd_blk_region_desc *ndbr_desc; - struct nfit_mem *nfit_mem; - int blk_valid = 0; - - if (!nvdimm) { - dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", - spa->range_index, memdev->device_handle); - return -ENODEV; - } - - nd_mapping->nvdimm = nvdimm; - switch (nfit_spa_type(spa)) { - case NFIT_SPA_PM: - case NFIT_SPA_VOLATILE: - nd_mapping->start = memdev->address; - nd_mapping->size = memdev->region_size; - break; - case NFIT_SPA_DCR: - nfit_mem = nvdimm_provider_data(nvdimm); - if (!nfit_mem || !nfit_mem->bdw) { - dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", - spa->range_index, nvdimm_name(nvdimm)); - } else { - nd_mapping->size = nfit_mem->bdw->capacity; - nd_mapping->start = nfit_mem->bdw->start_address; - ndr_desc->num_lanes = nfit_mem->bdw->windows; - blk_valid = 1; - } - - ndr_desc->nd_mapping = nd_mapping; - ndr_desc->num_mappings = blk_valid; - ndbr_desc = to_blk_region_desc(ndr_desc); - ndbr_desc->enable = acpi_nfit_blk_region_enable; - ndbr_desc->do_io = acpi_desc->blk_do_io; - nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, - ndr_desc); - if (!nfit_spa->nd_region) - return -ENOMEM; - break; - } - - return 0; -} - -static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) -{ - return (nfit_spa_type(spa) == NFIT_SPA_VDISK || - nfit_spa_type(spa) == NFIT_SPA_VCD || - nfit_spa_type(spa) == NFIT_SPA_PDISK || - nfit_spa_type(spa) == NFIT_SPA_PCD); -} - -static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, - struct nfit_spa *nfit_spa) -{ - static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; - struct acpi_nfit_system_address *spa = nfit_spa->spa; - struct nd_blk_region_desc ndbr_desc; - struct nd_region_desc *ndr_desc; - struct nfit_memdev *nfit_memdev; - struct nvdimm_bus *nvdimm_bus; - struct resource res; - int count = 0, rc; - - if (nfit_spa->nd_region) - return 0; - - if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { - dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", - __func__); - return 0; - } - - memset(&res, 0, sizeof(res)); - memset(&nd_mappings, 0, sizeof(nd_mappings)); - memset(&ndbr_desc, 0, sizeof(ndbr_desc)); - res.start = spa->address; - res.end = res.start + spa->length - 1; - ndr_desc = &ndbr_desc.ndr_desc; - ndr_desc->res = &res; - ndr_desc->provider_data = nfit_spa; - ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; - if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) - ndr_desc->numa_node = acpi_map_pxm_to_online_node( - spa->proximity_domain); - else - ndr_desc->numa_node = NUMA_NO_NODE; - - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { - struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; - struct nd_mapping *nd_mapping; - - if (memdev->range_index != spa->range_index) - continue; - if (count >= ND_MAX_MAPPINGS) { - dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", - spa->range_index, ND_MAX_MAPPINGS); - return -ENXIO; - } - nd_mapping = &nd_mappings[count++]; - rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, - memdev, nfit_spa); - if (rc) - goto out; - } - - ndr_desc->nd_mapping = nd_mappings; - ndr_desc->num_mappings = count; - rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); - if (rc) - goto out; - - nvdimm_bus = acpi_desc->nvdimm_bus; - if (nfit_spa_type(spa) == NFIT_SPA_PM) { - rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); - if (rc) { - dev_warn(acpi_desc->dev, - "failed to insert pmem resource to iomem: %d\n", - rc); - goto out; - } - - nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, - ndr_desc); - if (!nfit_spa->nd_region) - rc = -ENOMEM; - } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { - nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, - ndr_desc); - if (!nfit_spa->nd_region) - rc = -ENOMEM; - } else if (nfit_spa_is_virtual(spa)) { - nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, - ndr_desc); - if (!nfit_spa->nd_region) - rc = -ENOMEM; - } - - out: - if (rc) - dev_err(acpi_desc->dev, "failed to register spa range %d\n", - nfit_spa->spa->range_index); - return rc; -} - -static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc, - u32 max_ars) -{ - struct device *dev = acpi_desc->dev; - struct nd_cmd_ars_status *ars_status; - - if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) { - memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size); - return 0; - } - - if (acpi_desc->ars_status) - devm_kfree(dev, acpi_desc->ars_status); - acpi_desc->ars_status = NULL; - ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL); - if (!ars_status) - return -ENOMEM; - acpi_desc->ars_status = ars_status; - acpi_desc->ars_status_size = max_ars; - return 0; -} - -static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc, - struct nfit_spa *nfit_spa) -{ - struct acpi_nfit_system_address *spa = nfit_spa->spa; - int rc; - - if (!nfit_spa->max_ars) { - struct nd_cmd_ars_cap ars_cap; - - memset(&ars_cap, 0, sizeof(ars_cap)); - rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); - if (rc < 0) - return rc; - nfit_spa->max_ars = ars_cap.max_ars_out; - nfit_spa->clear_err_unit = ars_cap.clear_err_unit; - /* check that the supported scrub types match the spa type */ - if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE && - ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0) - return -ENOTTY; - else if (nfit_spa_type(spa) == NFIT_SPA_PM && - ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0) - return -ENOTTY; - } - - if (ars_status_alloc(acpi_desc, nfit_spa->max_ars)) - return -ENOMEM; - - rc = ars_get_status(acpi_desc); - if (rc < 0 && rc != -ENOSPC) - return rc; - - if (ars_status_process_records(acpi_desc->nvdimm_bus, - acpi_desc->ars_status)) - return -ENOMEM; - - return 0; -} - -static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, - struct nfit_spa *nfit_spa) -{ - struct acpi_nfit_system_address *spa = nfit_spa->spa; - unsigned int overflow_retry = scrub_overflow_abort; - u64 init_ars_start = 0, init_ars_len = 0; - struct device *dev = acpi_desc->dev; - unsigned int tmo = scrub_timeout; - int rc; - - if (!nfit_spa->ars_required || !nfit_spa->nd_region) - return; - - rc = ars_start(acpi_desc, nfit_spa); - /* - * If we timed out the initial scan we'll still be busy here, - * and will wait another timeout before giving up permanently. - */ - if (rc < 0 && rc != -EBUSY) - return; - - do { - u64 ars_start, ars_len; - - if (acpi_desc->cancel) - break; - rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); - if (rc == -ENOTTY) - break; - if (rc == -EBUSY && !tmo) { - dev_warn(dev, "range %d ars timeout, aborting\n", - spa->range_index); - break; - } - - if (rc == -EBUSY) { - /* - * Note, entries may be appended to the list - * while the lock is dropped, but the workqueue - * being active prevents entries being deleted / - * freed. - */ - mutex_unlock(&acpi_desc->init_mutex); - ssleep(1); - tmo--; - mutex_lock(&acpi_desc->init_mutex); - continue; - } - - /* we got some results, but there are more pending... */ - if (rc == -ENOSPC && overflow_retry--) { - if (!init_ars_len) { - init_ars_len = acpi_desc->ars_status->length; - init_ars_start = acpi_desc->ars_status->address; - } - rc = ars_continue(acpi_desc); - } - - if (rc < 0) { - dev_warn(dev, "range %d ars continuation failed\n", - spa->range_index); - break; - } - - if (init_ars_len) { - ars_start = init_ars_start; - ars_len = init_ars_len; - } else { - ars_start = acpi_desc->ars_status->address; - ars_len = acpi_desc->ars_status->length; - } - dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n", - spa->range_index, ars_start, ars_len); - /* notify the region about new poison entries */ - nvdimm_region_notify(nfit_spa->nd_region, - NVDIMM_REVALIDATE_POISON); - break; - } while (1); -} - -static void acpi_nfit_scrub(struct work_struct *work) -{ - struct device *dev; - u64 init_scrub_length = 0; - struct nfit_spa *nfit_spa; - u64 init_scrub_address = 0; - bool init_ars_done = false; - struct acpi_nfit_desc *acpi_desc; - unsigned int tmo = scrub_timeout; - unsigned int overflow_retry = scrub_overflow_abort; - - acpi_desc = container_of(work, typeof(*acpi_desc), work); - dev = acpi_desc->dev; - - /* - * We scrub in 2 phases. The first phase waits for any platform - * firmware initiated scrubs to complete and then we go search for the - * affected spa regions to mark them scanned. In the second phase we - * initiate a directed scrub for every range that was not scrubbed in - * phase 1. If we're called for a 'rescan', we harmlessly pass through - * the first phase, but really only care about running phase 2, where - * regions can be notified of new poison. - */ - - /* process platform firmware initiated scrubs */ - retry: - mutex_lock(&acpi_desc->init_mutex); - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - struct nd_cmd_ars_status *ars_status; - struct acpi_nfit_system_address *spa; - u64 ars_start, ars_len; - int rc; - - if (acpi_desc->cancel) - break; - - if (nfit_spa->nd_region) - continue; - - if (init_ars_done) { - /* - * No need to re-query, we're now just - * reconciling all the ranges covered by the - * initial scrub - */ - rc = 0; - } else - rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); - - if (rc == -ENOTTY) { - /* no ars capability, just register spa and move on */ - acpi_nfit_register_region(acpi_desc, nfit_spa); - continue; - } - - if (rc == -EBUSY && !tmo) { - /* fallthrough to directed scrub in phase 2 */ - dev_warn(dev, "timeout awaiting ars results, continuing...\n"); - break; - } else if (rc == -EBUSY) { - mutex_unlock(&acpi_desc->init_mutex); - ssleep(1); - tmo--; - goto retry; - } - - /* we got some results, but there are more pending... */ - if (rc == -ENOSPC && overflow_retry--) { - ars_status = acpi_desc->ars_status; - /* - * Record the original scrub range, so that we - * can recall all the ranges impacted by the - * initial scrub. - */ - if (!init_scrub_length) { - init_scrub_length = ars_status->length; - init_scrub_address = ars_status->address; - } - rc = ars_continue(acpi_desc); - if (rc == 0) { - mutex_unlock(&acpi_desc->init_mutex); - goto retry; - } - } - - if (rc < 0) { - /* - * Initial scrub failed, we'll give it one more - * try below... - */ - break; - } - - /* We got some final results, record completed ranges */ - ars_status = acpi_desc->ars_status; - if (init_scrub_length) { - ars_start = init_scrub_address; - ars_len = ars_start + init_scrub_length; - } else { - ars_start = ars_status->address; - ars_len = ars_status->length; - } - spa = nfit_spa->spa; - - if (!init_ars_done) { - init_ars_done = true; - dev_dbg(dev, "init scrub %#llx + %#llx complete\n", - ars_start, ars_len); - } - if (ars_start <= spa->address && ars_start + ars_len - >= spa->address + spa->length) - acpi_nfit_register_region(acpi_desc, nfit_spa); - } - - /* - * For all the ranges not covered by an initial scrub we still - * want to see if there are errors, but it's ok to discover them - * asynchronously. - */ - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - /* - * Flag all the ranges that still need scrubbing, but - * register them now to make data available. - */ - if (!nfit_spa->nd_region) { - nfit_spa->ars_required = 1; - acpi_nfit_register_region(acpi_desc, nfit_spa); - } - } - - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) - acpi_nfit_async_scrub(acpi_desc, nfit_spa); - acpi_desc->scrub_count++; - if (acpi_desc->scrub_count_state) - sysfs_notify_dirent(acpi_desc->scrub_count_state); - mutex_unlock(&acpi_desc->init_mutex); -} - -static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) -{ - struct nfit_spa *nfit_spa; - int rc; - - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) - if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { - /* BLK regions don't need to wait for ars results */ - rc = acpi_nfit_register_region(acpi_desc, nfit_spa); - if (rc) - return rc; - } - - queue_work(nfit_wq, &acpi_desc->work); - return 0; -} - -static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, - struct nfit_table_prev *prev) -{ - struct device *dev = acpi_desc->dev; - - if (!list_empty(&prev->spas) || - !list_empty(&prev->memdevs) || - !list_empty(&prev->dcrs) || - !list_empty(&prev->bdws) || - !list_empty(&prev->idts) || - !list_empty(&prev->flushes)) { - dev_err(dev, "new nfit deletes entries (unsupported)\n"); - return -ENXIO; - } - return 0; -} - -static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) -{ - struct device *dev = acpi_desc->dev; - struct kernfs_node *nfit; - struct device *bus_dev; - - if (!ars_supported(acpi_desc->nvdimm_bus)) - return 0; - - bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); - nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); - if (!nfit) { - dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); - return -ENODEV; - } - acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); - sysfs_put(nfit); - if (!acpi_desc->scrub_count_state) { - dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); - return -ENODEV; - } - - return 0; -} - -static void acpi_nfit_destruct(void *data) -{ - struct acpi_nfit_desc *acpi_desc = data; - struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); - - acpi_desc->cancel = 1; - /* - * Bounce the nvdimm bus lock to make sure any in-flight - * acpi_nfit_ars_rescan() submissions have had a chance to - * either submit or see ->cancel set. - */ - device_lock(bus_dev); - device_unlock(bus_dev); - - flush_workqueue(nfit_wq); - if (acpi_desc->scrub_count_state) - sysfs_put(acpi_desc->scrub_count_state); - nvdimm_bus_unregister(acpi_desc->nvdimm_bus); - acpi_desc->nvdimm_bus = NULL; -} - -int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) -{ - struct device *dev = acpi_desc->dev; - struct nfit_table_prev prev; - const void *end; - int rc; - - if (!acpi_desc->nvdimm_bus) { - acpi_nfit_init_dsms(acpi_desc); - - acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, - &acpi_desc->nd_desc); - if (!acpi_desc->nvdimm_bus) - return -ENOMEM; - - rc = devm_add_action_or_reset(dev, acpi_nfit_destruct, - acpi_desc); - if (rc) - return rc; - - rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); - if (rc) - return rc; - } - - mutex_lock(&acpi_desc->init_mutex); - - INIT_LIST_HEAD(&prev.spas); - INIT_LIST_HEAD(&prev.memdevs); - INIT_LIST_HEAD(&prev.dcrs); - INIT_LIST_HEAD(&prev.bdws); - INIT_LIST_HEAD(&prev.idts); - INIT_LIST_HEAD(&prev.flushes); - - list_cut_position(&prev.spas, &acpi_desc->spas, - acpi_desc->spas.prev); - list_cut_position(&prev.memdevs, &acpi_desc->memdevs, - acpi_desc->memdevs.prev); - list_cut_position(&prev.dcrs, &acpi_desc->dcrs, - acpi_desc->dcrs.prev); - list_cut_position(&prev.bdws, &acpi_desc->bdws, - acpi_desc->bdws.prev); - list_cut_position(&prev.idts, &acpi_desc->idts, - acpi_desc->idts.prev); - list_cut_position(&prev.flushes, &acpi_desc->flushes, - acpi_desc->flushes.prev); - - end = data + sz; - while (!IS_ERR_OR_NULL(data)) - data = add_table(acpi_desc, &prev, data, end); - - if (IS_ERR(data)) { - dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, - PTR_ERR(data)); - rc = PTR_ERR(data); - goto out_unlock; - } - - rc = acpi_nfit_check_deletions(acpi_desc, &prev); - if (rc) - goto out_unlock; - - rc = nfit_mem_init(acpi_desc); - if (rc) - goto out_unlock; - - rc = acpi_nfit_register_dimms(acpi_desc); - if (rc) - goto out_unlock; - - rc = acpi_nfit_register_regions(acpi_desc); - - out_unlock: - mutex_unlock(&acpi_desc->init_mutex); - return rc; -} -EXPORT_SYMBOL_GPL(acpi_nfit_init); - -struct acpi_nfit_flush_work { - struct work_struct work; - struct completion cmp; -}; - -static void flush_probe(struct work_struct *work) -{ - struct acpi_nfit_flush_work *flush; - - flush = container_of(work, typeof(*flush), work); - complete(&flush->cmp); -} - -static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) -{ - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); - struct device *dev = acpi_desc->dev; - struct acpi_nfit_flush_work flush; - - /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ - device_lock(dev); - device_unlock(dev); - - /* - * Scrub work could take 10s of seconds, userspace may give up so we - * need to be interruptible while waiting. - */ - INIT_WORK_ONSTACK(&flush.work, flush_probe); - COMPLETION_INITIALIZER_ONSTACK(flush.cmp); - queue_work(nfit_wq, &flush.work); - return wait_for_completion_interruptible(&flush.cmp); -} - -static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, - struct nvdimm *nvdimm, unsigned int cmd) -{ - struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); - - if (nvdimm) - return 0; - if (cmd != ND_CMD_ARS_START) - return 0; - - /* - * The kernel and userspace may race to initiate a scrub, but - * the scrub thread is prepared to lose that initial race. It - * just needs guarantees that any ars it initiates are not - * interrupted by any intervening start reqeusts from userspace. - */ - if (work_busy(&acpi_desc->work)) - return -EBUSY; - - return 0; -} - -static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) -{ - struct device *dev = acpi_desc->dev; - struct nfit_spa *nfit_spa; - - if (work_busy(&acpi_desc->work)) - return -EBUSY; - - if (acpi_desc->cancel) - return 0; - - mutex_lock(&acpi_desc->init_mutex); - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - struct acpi_nfit_system_address *spa = nfit_spa->spa; - - if (nfit_spa_type(spa) != NFIT_SPA_PM) - continue; - - nfit_spa->ars_required = 1; - } - queue_work(nfit_wq, &acpi_desc->work); - dev_dbg(dev, "%s: ars_scan triggered\n", __func__); - mutex_unlock(&acpi_desc->init_mutex); - - return 0; -} - -void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) -{ - struct nvdimm_bus_descriptor *nd_desc; - - dev_set_drvdata(dev, acpi_desc); - acpi_desc->dev = dev; - acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; - nd_desc = &acpi_desc->nd_desc; - nd_desc->provider_name = "ACPI.NFIT"; - nd_desc->module = THIS_MODULE; - nd_desc->ndctl = acpi_nfit_ctl; - nd_desc->flush_probe = acpi_nfit_flush_probe; - nd_desc->clear_to_send = acpi_nfit_clear_to_send; - nd_desc->attr_groups = acpi_nfit_attribute_groups; - - INIT_LIST_HEAD(&acpi_desc->spas); - INIT_LIST_HEAD(&acpi_desc->dcrs); - INIT_LIST_HEAD(&acpi_desc->bdws); - INIT_LIST_HEAD(&acpi_desc->idts); - INIT_LIST_HEAD(&acpi_desc->flushes); - INIT_LIST_HEAD(&acpi_desc->memdevs); - INIT_LIST_HEAD(&acpi_desc->dimms); - mutex_init(&acpi_desc->init_mutex); - INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); -} -EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); - -static int acpi_nfit_add(struct acpi_device *adev) -{ - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_nfit_desc *acpi_desc; - struct device *dev = &adev->dev; - struct acpi_table_header *tbl; - acpi_status status = AE_OK; - acpi_size sz; - int rc = 0; - - status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz); - if (ACPI_FAILURE(status)) { - /* This is ok, we could have an nvdimm hotplugged later */ - dev_dbg(dev, "failed to find NFIT at startup\n"); - return 0; - } - - acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); - if (!acpi_desc) - return -ENOMEM; - acpi_nfit_desc_init(acpi_desc, &adev->dev); - - /* Save the acpi header for exporting the revision via sysfs */ - acpi_desc->acpi_header = *tbl; - - /* Evaluate _FIT and override with that if present */ - status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); - if (ACPI_SUCCESS(status) && buf.length > 0) { - union acpi_object *obj = buf.pointer; - - if (obj->type == ACPI_TYPE_BUFFER) - rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, - obj->buffer.length); - else - dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", - __func__, (int) obj->type); - kfree(buf.pointer); - } else - /* skip over the lead-in header table */ - rc = acpi_nfit_init(acpi_desc, (void *) tbl - + sizeof(struct acpi_table_nfit), - sz - sizeof(struct acpi_table_nfit)); - return rc; -} - -static int acpi_nfit_remove(struct acpi_device *adev) -{ - /* see acpi_nfit_destruct */ - return 0; -} - -static void acpi_nfit_notify(struct acpi_device *adev, u32 event) -{ - struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - struct device *dev = &adev->dev; - union acpi_object *obj; - acpi_status status; - int ret; - - dev_dbg(dev, "%s: event: %d\n", __func__, event); - - device_lock(dev); - if (!dev->driver) { - /* dev->driver may be null if we're being removed */ - dev_dbg(dev, "%s: no driver found for dev\n", __func__); - goto out_unlock; - } - - if (!acpi_desc) { - acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); - if (!acpi_desc) - goto out_unlock; - acpi_nfit_desc_init(acpi_desc, &adev->dev); - } else { - /* - * Finish previous registration before considering new - * regions. - */ - flush_workqueue(nfit_wq); - } - - /* Evaluate _FIT */ - status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); - if (ACPI_FAILURE(status)) { - dev_err(dev, "failed to evaluate _FIT\n"); - goto out_unlock; - } - - obj = buf.pointer; - if (obj->type == ACPI_TYPE_BUFFER) { - ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, - obj->buffer.length); - if (ret) - dev_err(dev, "failed to merge updated NFIT\n"); - } else - dev_err(dev, "Invalid _FIT\n"); - kfree(buf.pointer); - - out_unlock: - device_unlock(dev); -} - -static const struct acpi_device_id acpi_nfit_ids[] = { - { "ACPI0012", 0 }, - { "", 0 }, -}; -MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); - -static struct acpi_driver acpi_nfit_driver = { - .name = KBUILD_MODNAME, - .ids = acpi_nfit_ids, - .ops = { - .add = acpi_nfit_add, - .remove = acpi_nfit_remove, - .notify = acpi_nfit_notify, - }, -}; - -static __init int nfit_init(void) -{ - BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); - BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); - BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); - BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); - BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); - BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); - BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); - - acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); - acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); - acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); - acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); - acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); - acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); - acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); - acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); - acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); - acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); - acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); - acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); - acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); - - nfit_wq = create_singlethread_workqueue("nfit"); - if (!nfit_wq) - return -ENOMEM; - - return acpi_bus_register_driver(&acpi_nfit_driver); -} - -static __exit void nfit_exit(void) -{ - acpi_bus_unregister_driver(&acpi_nfit_driver); - destroy_workqueue(nfit_wq); -} - -module_init(nfit_init); -module_exit(nfit_exit); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h deleted file mode 100644 index 6ecf337c97aa..000000000000 --- a/drivers/acpi/nfit.h +++ /dev/null @@ -1,207 +0,0 @@ -/* - * NVDIMM Firmware Interface Table - NFIT - * - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __NFIT_H__ -#define __NFIT_H__ -#include -#include -#include -#include -#include -#include - -/* ACPI 6.1 */ -#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba" - -/* http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf */ -#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66" - -/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */ -#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" -#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e" - -/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ -#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" - -#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ - | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ - | ACPI_NFIT_MEM_NOT_ARMED) - -enum nfit_uuids { - /* for simplicity alias the uuid index with the family id */ - NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL, - NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, - NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, - NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, - NFIT_SPA_VOLATILE, - NFIT_SPA_PM, - NFIT_SPA_DCR, - NFIT_SPA_BDW, - NFIT_SPA_VDISK, - NFIT_SPA_VCD, - NFIT_SPA_PDISK, - NFIT_SPA_PCD, - NFIT_DEV_BUS, - NFIT_UUID_MAX, -}; - -/* - * Region format interface codes are stored as an array of bytes in the - * NFIT DIMM Control Region structure - */ -#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */ -#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */ -#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */ - -enum { - NFIT_BLK_READ_FLUSH = 1, - NFIT_BLK_DCR_LATCH = 2, - NFIT_ARS_STATUS_DONE = 0, - NFIT_ARS_STATUS_BUSY = 1 << 16, - NFIT_ARS_STATUS_NONE = 2 << 16, - NFIT_ARS_STATUS_INTR = 3 << 16, - NFIT_ARS_START_BUSY = 6, - NFIT_ARS_CAP_NONE = 1, - NFIT_ARS_F_OVERFLOW = 1, - NFIT_ARS_TIMEOUT = 90, -}; - -struct nfit_spa { - struct list_head list; - struct nd_region *nd_region; - unsigned int ars_required:1; - u32 clear_err_unit; - u32 max_ars; - struct acpi_nfit_system_address spa[0]; -}; - -struct nfit_dcr { - struct list_head list; - struct acpi_nfit_control_region dcr[0]; -}; - -struct nfit_bdw { - struct list_head list; - struct acpi_nfit_data_region bdw[0]; -}; - -struct nfit_idt { - struct list_head list; - struct acpi_nfit_interleave idt[0]; -}; - -struct nfit_flush { - struct list_head list; - struct acpi_nfit_flush_address flush[0]; -}; - -struct nfit_memdev { - struct list_head list; - struct acpi_nfit_memory_map memdev[0]; -}; - -/* assembled tables for a given dimm/memory-device */ -struct nfit_mem { - struct nvdimm *nvdimm; - struct acpi_nfit_memory_map *memdev_dcr; - struct acpi_nfit_memory_map *memdev_pmem; - struct acpi_nfit_memory_map *memdev_bdw; - struct acpi_nfit_control_region *dcr; - struct acpi_nfit_data_region *bdw; - struct acpi_nfit_system_address *spa_dcr; - struct acpi_nfit_system_address *spa_bdw; - struct acpi_nfit_interleave *idt_dcr; - struct acpi_nfit_interleave *idt_bdw; - struct nfit_flush *nfit_flush; - struct list_head list; - struct acpi_device *adev; - struct acpi_nfit_desc *acpi_desc; - struct resource *flush_wpq; - unsigned long dsm_mask; - int family; -}; - -struct acpi_nfit_desc { - struct nvdimm_bus_descriptor nd_desc; - struct acpi_table_header acpi_header; - struct mutex init_mutex; - struct list_head memdevs; - struct list_head flushes; - struct list_head dimms; - struct list_head spas; - struct list_head dcrs; - struct list_head bdws; - struct list_head idts; - struct nvdimm_bus *nvdimm_bus; - struct device *dev; - struct nd_cmd_ars_status *ars_status; - size_t ars_status_size; - struct work_struct work; - struct kernfs_node *scrub_count_state; - unsigned int scrub_count; - unsigned int cancel:1; - unsigned long dimm_cmd_force_en; - unsigned long bus_cmd_force_en; - int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, - void *iobuf, u64 len, int rw); -}; - -enum nd_blk_mmio_selector { - BDW, - DCR, -}; - -struct nd_blk_addr { - union { - void __iomem *base; - void *aperture; - }; -}; - -struct nfit_blk { - struct nfit_blk_mmio { - struct nd_blk_addr addr; - u64 size; - u64 base_offset; - u32 line_size; - u32 num_lines; - u32 table_size; - struct acpi_nfit_interleave *idt; - struct acpi_nfit_system_address *spa; - } mmio[2]; - struct nd_region *nd_region; - u64 bdw_offset; /* post interleave offset */ - u64 stat_offset; - u64 cmd_offset; - u32 dimm_flags; -}; - -static inline struct acpi_nfit_memory_map *__to_nfit_memdev( - struct nfit_mem *nfit_mem) -{ - if (nfit_mem->memdev_dcr) - return nfit_mem->memdev_dcr; - return nfit_mem->memdev_pmem; -} - -static inline struct acpi_nfit_desc *to_acpi_desc( - struct nvdimm_bus_descriptor *nd_desc) -{ - return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); -} - -const u8 *to_nfit_uuid(enum nfit_uuids id); -int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); -void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); -#endif /* __NFIT_H__ */ diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig new file mode 100644 index 000000000000..dd0d53c52552 --- /dev/null +++ b/drivers/acpi/nfit/Kconfig @@ -0,0 +1,26 @@ +config ACPI_NFIT + tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" + depends on PHYS_ADDR_T_64BIT + depends on BLK_DEV + depends on ARCH_HAS_MMIO_FLUSH + select LIBNVDIMM + help + Infrastructure to probe ACPI 6 compliant platforms for + NVDIMMs (NFIT) and register a libnvdimm device tree. In + addition to storage devices this also enables libnvdimm to pass + ACPI._DSM messages for platform/dimm configuration. + + To compile this driver as a module, choose M here: + the module will be called nfit. + +config ACPI_NFIT_DEBUG + bool "NFIT DSM debug" + depends on ACPI_NFIT + depends on DYNAMIC_DEBUG + default n + help + Enabling this option causes the nfit driver to dump the + input and output buffers of _DSM operations on the ACPI0012 + device and its children. This can be very verbose, so leave + it disabled unless you are debugging a hardware / firmware + issue. diff --git a/drivers/acpi/nfit/Makefile b/drivers/acpi/nfit/Makefile new file mode 100644 index 000000000000..eb95c5aff83b --- /dev/null +++ b/drivers/acpi/nfit/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_ACPI_NFIT) := nfit.o +nfit-y := core.o diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c new file mode 100644 index 000000000000..19d0dfdf9633 --- /dev/null +++ b/drivers/acpi/nfit/core.c @@ -0,0 +1,2765 @@ +/* + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nfit.h" + +/* + * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is + * irrelevant. + */ +#include + +static bool force_enable_dimms; +module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); + +static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT; +module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds"); + +/* after three payloads of overflow, it's dead jim */ +static unsigned int scrub_overflow_abort = 3; +module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(scrub_overflow_abort, + "Number of times we overflow ARS results before abort"); + +static bool disable_vendor_specific; +module_param(disable_vendor_specific, bool, S_IRUGO); +MODULE_PARM_DESC(disable_vendor_specific, + "Limit commands to the publicly specified set\n"); + +static struct workqueue_struct *nfit_wq; + +struct nfit_table_prev { + struct list_head spas; + struct list_head memdevs; + struct list_head dcrs; + struct list_head bdws; + struct list_head idts; + struct list_head flushes; +}; + +static u8 nfit_uuid[NFIT_UUID_MAX][16]; + +const u8 *to_nfit_uuid(enum nfit_uuids id) +{ + return nfit_uuid[id]; +} +EXPORT_SYMBOL(to_nfit_uuid); + +static struct acpi_nfit_desc *to_acpi_nfit_desc( + struct nvdimm_bus_descriptor *nd_desc) +{ + return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); +} + +static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) +{ + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + + /* + * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct + * acpi_device. + */ + if (!nd_desc->provider_name + || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) + return NULL; + + return to_acpi_device(acpi_desc->dev); +} + +static int xlat_status(void *buf, unsigned int cmd) +{ + struct nd_cmd_clear_error *clear_err; + struct nd_cmd_ars_status *ars_status; + struct nd_cmd_ars_start *ars_start; + struct nd_cmd_ars_cap *ars_cap; + u16 flags; + + switch (cmd) { + case ND_CMD_ARS_CAP: + ars_cap = buf; + if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE) + return -ENOTTY; + + /* Command failed */ + if (ars_cap->status & 0xffff) + return -EIO; + + /* No supported scan types for this range */ + flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; + if ((ars_cap->status >> 16 & flags) == 0) + return -ENOTTY; + break; + case ND_CMD_ARS_START: + ars_start = buf; + /* ARS is in progress */ + if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY) + return -EBUSY; + + /* Command failed */ + if (ars_start->status & 0xffff) + return -EIO; + break; + case ND_CMD_ARS_STATUS: + ars_status = buf; + /* Command failed */ + if (ars_status->status & 0xffff) + return -EIO; + /* Check extended status (Upper two bytes) */ + if (ars_status->status == NFIT_ARS_STATUS_DONE) + return 0; + + /* ARS is in progress */ + if (ars_status->status == NFIT_ARS_STATUS_BUSY) + return -EBUSY; + + /* No ARS performed for the current boot */ + if (ars_status->status == NFIT_ARS_STATUS_NONE) + return -EAGAIN; + + /* + * ARS interrupted, either we overflowed or some other + * agent wants the scan to stop. If we didn't overflow + * then just continue with the returned results. + */ + if (ars_status->status == NFIT_ARS_STATUS_INTR) { + if (ars_status->flags & NFIT_ARS_F_OVERFLOW) + return -ENOSPC; + return 0; + } + + /* Unknown status */ + if (ars_status->status >> 16) + return -EIO; + break; + case ND_CMD_CLEAR_ERROR: + clear_err = buf; + if (clear_err->status & 0xffff) + return -EIO; + if (!clear_err->cleared) + return -EIO; + if (clear_err->length > clear_err->cleared) + return clear_err->cleared; + break; + default: + break; + } + + return 0; +} + +static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, + struct nvdimm *nvdimm, unsigned int cmd, void *buf, + unsigned int buf_len, int *cmd_rc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + union acpi_object in_obj, in_buf, *out_obj; + const struct nd_cmd_desc *desc = NULL; + struct device *dev = acpi_desc->dev; + struct nd_cmd_pkg *call_pkg = NULL; + const char *cmd_name, *dimm_name; + unsigned long cmd_mask, dsm_mask; + acpi_handle handle; + unsigned int func; + const u8 *uuid; + u32 offset; + int rc, i; + + func = cmd; + if (cmd == ND_CMD_CALL) { + call_pkg = buf; + func = call_pkg->nd_command; + } + + if (nvdimm) { + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_device *adev = nfit_mem->adev; + + if (!adev) + return -ENOTTY; + if (call_pkg && nfit_mem->family != call_pkg->nd_family) + return -ENOTTY; + + dimm_name = nvdimm_name(nvdimm); + cmd_name = nvdimm_cmd_name(cmd); + cmd_mask = nvdimm_cmd_mask(nvdimm); + dsm_mask = nfit_mem->dsm_mask; + desc = nd_cmd_dimm_desc(cmd); + uuid = to_nfit_uuid(nfit_mem->family); + handle = adev->handle; + } else { + struct acpi_device *adev = to_acpi_dev(acpi_desc); + + cmd_name = nvdimm_bus_cmd_name(cmd); + cmd_mask = nd_desc->cmd_mask; + dsm_mask = cmd_mask; + desc = nd_cmd_bus_desc(cmd); + uuid = to_nfit_uuid(NFIT_DEV_BUS); + handle = adev->handle; + dimm_name = "bus"; + } + + if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) + return -ENOTTY; + + if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) + return -ENOTTY; + + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = 1; + in_obj.package.elements = &in_buf; + in_buf.type = ACPI_TYPE_BUFFER; + in_buf.buffer.pointer = buf; + in_buf.buffer.length = 0; + + /* libnvdimm has already validated the input envelope */ + for (i = 0; i < desc->in_num; i++) + in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, + i, buf); + + if (call_pkg) { + /* skip over package wrapper */ + in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; + in_buf.buffer.length = call_pkg->nd_size_in; + } + + if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { + dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", + __func__, dimm_name, cmd, func, + in_buf.buffer.length); + print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, + in_buf.buffer.pointer, + min_t(u32, 256, in_buf.buffer.length), true); + } + + out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj); + if (!out_obj) { + dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, + cmd_name); + return -EINVAL; + } + + if (call_pkg) { + call_pkg->nd_fw_size = out_obj->buffer.length; + memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, + out_obj->buffer.pointer, + min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); + + ACPI_FREE(out_obj); + /* + * Need to support FW function w/o known size in advance. + * Caller can determine required size based upon nd_fw_size. + * If we return an error (like elsewhere) then caller wouldn't + * be able to rely upon data returned to make calculation. + */ + return 0; + } + + if (out_obj->package.type != ACPI_TYPE_BUFFER) { + dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", + __func__, dimm_name, cmd_name, out_obj->type); + rc = -EINVAL; + goto out; + } + + if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { + dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, + dimm_name, cmd_name, out_obj->buffer.length); + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, + 4, out_obj->buffer.pointer, min_t(u32, 128, + out_obj->buffer.length), true); + } + + for (i = 0, offset = 0; i < desc->out_num; i++) { + u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, + (u32 *) out_obj->buffer.pointer); + + if (offset + out_size > out_obj->buffer.length) { + dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", + __func__, dimm_name, cmd_name, i); + break; + } + + if (in_buf.buffer.length + offset + out_size > buf_len) { + dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", + __func__, dimm_name, cmd_name, i); + rc = -ENXIO; + goto out; + } + memcpy(buf + in_buf.buffer.length + offset, + out_obj->buffer.pointer + offset, out_size); + offset += out_size; + } + if (offset + in_buf.buffer.length < buf_len) { + if (i >= 1) { + /* + * status valid, return the number of bytes left + * unfilled in the output buffer + */ + rc = buf_len - offset - in_buf.buffer.length; + if (cmd_rc) + *cmd_rc = xlat_status(buf, cmd); + } else { + dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", + __func__, dimm_name, cmd_name, buf_len, + offset); + rc = -ENXIO; + } + } else { + rc = 0; + if (cmd_rc) + *cmd_rc = xlat_status(buf, cmd); + } + + out: + ACPI_FREE(out_obj); + + return rc; +} + +static const char *spa_type_name(u16 type) +{ + static const char *to_name[] = { + [NFIT_SPA_VOLATILE] = "volatile", + [NFIT_SPA_PM] = "pmem", + [NFIT_SPA_DCR] = "dimm-control-region", + [NFIT_SPA_BDW] = "block-data-window", + [NFIT_SPA_VDISK] = "volatile-disk", + [NFIT_SPA_VCD] = "volatile-cd", + [NFIT_SPA_PDISK] = "persistent-disk", + [NFIT_SPA_PCD] = "persistent-cd", + + }; + + if (type > NFIT_SPA_PCD) + return "unknown"; + + return to_name[type]; +} + +static int nfit_spa_type(struct acpi_nfit_system_address *spa) +{ + int i; + + for (i = 0; i < NFIT_UUID_MAX; i++) + if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) + return i; + return -1; +} + +static bool add_spa(struct acpi_nfit_desc *acpi_desc, + struct nfit_table_prev *prev, + struct acpi_nfit_system_address *spa) +{ + struct device *dev = acpi_desc->dev; + struct nfit_spa *nfit_spa; + + if (spa->header.length != sizeof(*spa)) + return false; + + list_for_each_entry(nfit_spa, &prev->spas, list) { + if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { + list_move_tail(&nfit_spa->list, &acpi_desc->spas); + return true; + } + } + + nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), + GFP_KERNEL); + if (!nfit_spa) + return false; + INIT_LIST_HEAD(&nfit_spa->list); + memcpy(nfit_spa->spa, spa, sizeof(*spa)); + list_add_tail(&nfit_spa->list, &acpi_desc->spas); + dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, + spa->range_index, + spa_type_name(nfit_spa_type(spa))); + return true; +} + +static bool add_memdev(struct acpi_nfit_desc *acpi_desc, + struct nfit_table_prev *prev, + struct acpi_nfit_memory_map *memdev) +{ + struct device *dev = acpi_desc->dev; + struct nfit_memdev *nfit_memdev; + + if (memdev->header.length != sizeof(*memdev)) + return false; + + list_for_each_entry(nfit_memdev, &prev->memdevs, list) + if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { + list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); + return true; + } + + nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), + GFP_KERNEL); + if (!nfit_memdev) + return false; + INIT_LIST_HEAD(&nfit_memdev->list); + memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); + list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); + dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", + __func__, memdev->device_handle, memdev->range_index, + memdev->region_index); + return true; +} + +/* + * An implementation may provide a truncated control region if no block windows + * are defined. + */ +static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) +{ + if (dcr->header.length < offsetof(struct acpi_nfit_control_region, + window_size)) + return 0; + if (dcr->windows) + return sizeof(*dcr); + return offsetof(struct acpi_nfit_control_region, window_size); +} + +static bool add_dcr(struct acpi_nfit_desc *acpi_desc, + struct nfit_table_prev *prev, + struct acpi_nfit_control_region *dcr) +{ + struct device *dev = acpi_desc->dev; + struct nfit_dcr *nfit_dcr; + + if (!sizeof_dcr(dcr)) + return false; + + list_for_each_entry(nfit_dcr, &prev->dcrs, list) + if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { + list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); + return true; + } + + nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), + GFP_KERNEL); + if (!nfit_dcr) + return false; + INIT_LIST_HEAD(&nfit_dcr->list); + memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); + list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); + dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, + dcr->region_index, dcr->windows); + return true; +} + +static bool add_bdw(struct acpi_nfit_desc *acpi_desc, + struct nfit_table_prev *prev, + struct acpi_nfit_data_region *bdw) +{ + struct device *dev = acpi_desc->dev; + struct nfit_bdw *nfit_bdw; + + if (bdw->header.length != sizeof(*bdw)) + return false; + list_for_each_entry(nfit_bdw, &prev->bdws, list) + if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { + list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); + return true; + } + + nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), + GFP_KERNEL); + if (!nfit_bdw) + return false; + INIT_LIST_HEAD(&nfit_bdw->list); + memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); + list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); + dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, + bdw->region_index, bdw->windows); + return true; +} + +static size_t sizeof_idt(struct acpi_nfit_interleave *idt) +{ + if (idt->header.length < sizeof(*idt)) + return 0; + return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); +} + +static bool add_idt(struct acpi_nfit_desc *acpi_desc, + struct nfit_table_prev *prev, + struct acpi_nfit_interleave *idt) +{ + struct device *dev = acpi_desc->dev; + struct nfit_idt *nfit_idt; + + if (!sizeof_idt(idt)) + return false; + + list_for_each_entry(nfit_idt, &prev->idts, list) { + if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) + continue; + + if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { + list_move_tail(&nfit_idt->list, &acpi_desc->idts); + return true; + } + } + + nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), + GFP_KERNEL); + if (!nfit_idt) + return false; + INIT_LIST_HEAD(&nfit_idt->list); + memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); + list_add_tail(&nfit_idt->list, &acpi_desc->idts); + dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, + idt->interleave_index, idt->line_count); + return true; +} + +static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) +{ + if (flush->header.length < sizeof(*flush)) + return 0; + return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); +} + +static bool add_flush(struct acpi_nfit_desc *acpi_desc, + struct nfit_table_prev *prev, + struct acpi_nfit_flush_address *flush) +{ + struct device *dev = acpi_desc->dev; + struct nfit_flush *nfit_flush; + + if (!sizeof_flush(flush)) + return false; + + list_for_each_entry(nfit_flush, &prev->flushes, list) { + if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) + continue; + + if (memcmp(nfit_flush->flush, flush, + sizeof_flush(flush)) == 0) { + list_move_tail(&nfit_flush->list, &acpi_desc->flushes); + return true; + } + } + + nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) + + sizeof_flush(flush), GFP_KERNEL); + if (!nfit_flush) + return false; + INIT_LIST_HEAD(&nfit_flush->list); + memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); + list_add_tail(&nfit_flush->list, &acpi_desc->flushes); + dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, + flush->device_handle, flush->hint_count); + return true; +} + +static void *add_table(struct acpi_nfit_desc *acpi_desc, + struct nfit_table_prev *prev, void *table, const void *end) +{ + struct device *dev = acpi_desc->dev; + struct acpi_nfit_header *hdr; + void *err = ERR_PTR(-ENOMEM); + + if (table >= end) + return NULL; + + hdr = table; + if (!hdr->length) { + dev_warn(dev, "found a zero length table '%d' parsing nfit\n", + hdr->type); + return NULL; + } + + switch (hdr->type) { + case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: + if (!add_spa(acpi_desc, prev, table)) + return err; + break; + case ACPI_NFIT_TYPE_MEMORY_MAP: + if (!add_memdev(acpi_desc, prev, table)) + return err; + break; + case ACPI_NFIT_TYPE_CONTROL_REGION: + if (!add_dcr(acpi_desc, prev, table)) + return err; + break; + case ACPI_NFIT_TYPE_DATA_REGION: + if (!add_bdw(acpi_desc, prev, table)) + return err; + break; + case ACPI_NFIT_TYPE_INTERLEAVE: + if (!add_idt(acpi_desc, prev, table)) + return err; + break; + case ACPI_NFIT_TYPE_FLUSH_ADDRESS: + if (!add_flush(acpi_desc, prev, table)) + return err; + break; + case ACPI_NFIT_TYPE_SMBIOS: + dev_dbg(dev, "%s: smbios\n", __func__); + break; + default: + dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); + break; + } + + return table + hdr->length; +} + +static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, + struct nfit_mem *nfit_mem) +{ + u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; + u16 dcr = nfit_mem->dcr->region_index; + struct nfit_spa *nfit_spa; + + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + u16 range_index = nfit_spa->spa->range_index; + int type = nfit_spa_type(nfit_spa->spa); + struct nfit_memdev *nfit_memdev; + + if (type != NFIT_SPA_BDW) + continue; + + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + if (nfit_memdev->memdev->range_index != range_index) + continue; + if (nfit_memdev->memdev->device_handle != device_handle) + continue; + if (nfit_memdev->memdev->region_index != dcr) + continue; + + nfit_mem->spa_bdw = nfit_spa->spa; + return; + } + } + + dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", + nfit_mem->spa_dcr->range_index); + nfit_mem->bdw = NULL; +} + +static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, + struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) +{ + u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; + struct nfit_memdev *nfit_memdev; + struct nfit_bdw *nfit_bdw; + struct nfit_idt *nfit_idt; + u16 idt_idx, range_index; + + list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { + if (nfit_bdw->bdw->region_index != dcr) + continue; + nfit_mem->bdw = nfit_bdw->bdw; + break; + } + + if (!nfit_mem->bdw) + return; + + nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); + + if (!nfit_mem->spa_bdw) + return; + + range_index = nfit_mem->spa_bdw->range_index; + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + if (nfit_memdev->memdev->range_index != range_index || + nfit_memdev->memdev->region_index != dcr) + continue; + nfit_mem->memdev_bdw = nfit_memdev->memdev; + idt_idx = nfit_memdev->memdev->interleave_index; + list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { + if (nfit_idt->idt->interleave_index != idt_idx) + continue; + nfit_mem->idt_bdw = nfit_idt->idt; + break; + } + break; + } +} + +static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_system_address *spa) +{ + struct nfit_mem *nfit_mem, *found; + struct nfit_memdev *nfit_memdev; + int type = nfit_spa_type(spa); + + switch (type) { + case NFIT_SPA_DCR: + case NFIT_SPA_PM: + break; + default: + return 0; + } + + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + struct nfit_flush *nfit_flush; + struct nfit_dcr *nfit_dcr; + u32 device_handle; + u16 dcr; + + if (nfit_memdev->memdev->range_index != spa->range_index) + continue; + found = NULL; + dcr = nfit_memdev->memdev->region_index; + device_handle = nfit_memdev->memdev->device_handle; + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) + if (__to_nfit_memdev(nfit_mem)->device_handle + == device_handle) { + found = nfit_mem; + break; + } + + if (found) + nfit_mem = found; + else { + nfit_mem = devm_kzalloc(acpi_desc->dev, + sizeof(*nfit_mem), GFP_KERNEL); + if (!nfit_mem) + return -ENOMEM; + INIT_LIST_HEAD(&nfit_mem->list); + nfit_mem->acpi_desc = acpi_desc; + list_add(&nfit_mem->list, &acpi_desc->dimms); + } + + list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { + if (nfit_dcr->dcr->region_index != dcr) + continue; + /* + * Record the control region for the dimm. For + * the ACPI 6.1 case, where there are separate + * control regions for the pmem vs blk + * interfaces, be sure to record the extended + * blk details. + */ + if (!nfit_mem->dcr) + nfit_mem->dcr = nfit_dcr->dcr; + else if (nfit_mem->dcr->windows == 0 + && nfit_dcr->dcr->windows) + nfit_mem->dcr = nfit_dcr->dcr; + break; + } + + list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { + struct acpi_nfit_flush_address *flush; + u16 i; + + if (nfit_flush->flush->device_handle != device_handle) + continue; + nfit_mem->nfit_flush = nfit_flush; + flush = nfit_flush->flush; + nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, + flush->hint_count + * sizeof(struct resource), GFP_KERNEL); + if (!nfit_mem->flush_wpq) + return -ENOMEM; + for (i = 0; i < flush->hint_count; i++) { + struct resource *res = &nfit_mem->flush_wpq[i]; + + res->start = flush->hint_address[i]; + res->end = res->start + 8 - 1; + } + break; + } + + if (dcr && !nfit_mem->dcr) { + dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", + spa->range_index, dcr); + return -ENODEV; + } + + if (type == NFIT_SPA_DCR) { + struct nfit_idt *nfit_idt; + u16 idt_idx; + + /* multiple dimms may share a SPA when interleaved */ + nfit_mem->spa_dcr = spa; + nfit_mem->memdev_dcr = nfit_memdev->memdev; + idt_idx = nfit_memdev->memdev->interleave_index; + list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { + if (nfit_idt->idt->interleave_index != idt_idx) + continue; + nfit_mem->idt_dcr = nfit_idt->idt; + break; + } + nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); + } else { + /* + * A single dimm may belong to multiple SPA-PM + * ranges, record at least one in addition to + * any SPA-DCR range. + */ + nfit_mem->memdev_pmem = nfit_memdev->memdev; + } + } + + return 0; +} + +static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) +{ + struct nfit_mem *a = container_of(_a, typeof(*a), list); + struct nfit_mem *b = container_of(_b, typeof(*b), list); + u32 handleA, handleB; + + handleA = __to_nfit_memdev(a)->device_handle; + handleB = __to_nfit_memdev(b)->device_handle; + if (handleA < handleB) + return -1; + else if (handleA > handleB) + return 1; + return 0; +} + +static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) +{ + struct nfit_spa *nfit_spa; + + /* + * For each SPA-DCR or SPA-PMEM address range find its + * corresponding MEMDEV(s). From each MEMDEV find the + * corresponding DCR. Then, if we're operating on a SPA-DCR, + * try to find a SPA-BDW and a corresponding BDW that references + * the DCR. Throw it all into an nfit_mem object. Note, that + * BDWs are optional. + */ + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + int rc; + + rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); + if (rc) + return rc; + } + + list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); + + return 0; +} + +static ssize_t revision_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); +} +static DEVICE_ATTR_RO(revision); + +/* + * This shows the number of full Address Range Scrubs that have been + * completed since driver load time. Userspace can wait on this using + * select/poll etc. A '+' at the end indicates an ARS is in progress + */ +static ssize_t scrub_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus_descriptor *nd_desc; + ssize_t rc = -ENXIO; + + device_lock(dev); + nd_desc = dev_get_drvdata(dev); + if (nd_desc) { + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, + (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); + } + device_unlock(dev); + return rc; +} + +static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc); + +static ssize_t scrub_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct nvdimm_bus_descriptor *nd_desc; + ssize_t rc; + long val; + + rc = kstrtol(buf, 0, &val); + if (rc) + return rc; + if (val != 1) + return -EINVAL; + + device_lock(dev); + nd_desc = dev_get_drvdata(dev); + if (nd_desc) { + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + rc = acpi_nfit_ars_rescan(acpi_desc); + } + device_unlock(dev); + if (rc) + return rc; + return size; +} +static DEVICE_ATTR_RW(scrub); + +static bool ars_supported(struct nvdimm_bus *nvdimm_bus) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START + | 1 << ND_CMD_ARS_STATUS; + + return (nd_desc->cmd_mask & mask) == mask; +} + +static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + + if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) + return 0; + return a->mode; +} + +static struct attribute *acpi_nfit_attributes[] = { + &dev_attr_revision.attr, + &dev_attr_scrub.attr, + NULL, +}; + +static struct attribute_group acpi_nfit_attribute_group = { + .name = "nfit", + .attrs = acpi_nfit_attributes, + .is_visible = nfit_visible, +}; + +static const struct attribute_group *acpi_nfit_attribute_groups[] = { + &nvdimm_bus_attribute_group, + &acpi_nfit_attribute_group, + NULL, +}; + +static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + + return __to_nfit_memdev(nfit_mem); +} + +static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + + return nfit_mem->dcr; +} + +static ssize_t handle_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); + + return sprintf(buf, "%#x\n", memdev->device_handle); +} +static DEVICE_ATTR_RO(handle); + +static ssize_t phys_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); + + return sprintf(buf, "%#x\n", memdev->physical_id); +} +static DEVICE_ATTR_RO(phys_id); + +static ssize_t vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); +} +static DEVICE_ATTR_RO(vendor); + +static ssize_t rev_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); +} +static DEVICE_ATTR_RO(rev_id); + +static ssize_t device_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); +} +static DEVICE_ATTR_RO(device); + +static ssize_t subsystem_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); +} +static DEVICE_ATTR_RO(subsystem_vendor); + +static ssize_t subsystem_rev_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "0x%04x\n", + be16_to_cpu(dcr->subsystem_revision_id)); +} +static DEVICE_ATTR_RO(subsystem_rev_id); + +static ssize_t subsystem_device_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); +} +static DEVICE_ATTR_RO(subsystem_device); + +static int num_nvdimm_formats(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + int formats = 0; + + if (nfit_mem->memdev_pmem) + formats++; + if (nfit_mem->memdev_bdw) + formats++; + return formats; +} + +static ssize_t format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code)); +} +static DEVICE_ATTR_RO(format); + +static ssize_t format1_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 handle; + ssize_t rc = -ENXIO; + struct nfit_mem *nfit_mem; + struct nfit_memdev *nfit_memdev; + struct acpi_nfit_desc *acpi_desc; + struct nvdimm *nvdimm = to_nvdimm(dev); + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + nfit_mem = nvdimm_provider_data(nvdimm); + acpi_desc = nfit_mem->acpi_desc; + handle = to_nfit_memdev(dev)->device_handle; + + /* assumes DIMMs have at most 2 published interface codes */ + mutex_lock(&acpi_desc->init_mutex); + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; + struct nfit_dcr *nfit_dcr; + + if (memdev->device_handle != handle) + continue; + + list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { + if (nfit_dcr->dcr->region_index != memdev->region_index) + continue; + if (nfit_dcr->dcr->code == dcr->code) + continue; + rc = sprintf(buf, "%#x\n", + be16_to_cpu(nfit_dcr->dcr->code)); + break; + } + if (rc != ENXIO) + break; + } + mutex_unlock(&acpi_desc->init_mutex); + return rc; +} +static DEVICE_ATTR_RO(format1); + +static ssize_t formats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + + return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); +} +static DEVICE_ATTR_RO(formats); + +static ssize_t serial_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); +} +static DEVICE_ATTR_RO(serial); + +static ssize_t family_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + + if (nfit_mem->family < 0) + return -ENXIO; + return sprintf(buf, "%d\n", nfit_mem->family); +} +static DEVICE_ATTR_RO(family); + +static ssize_t dsm_mask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + + if (nfit_mem->family < 0) + return -ENXIO; + return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); +} +static DEVICE_ATTR_RO(dsm_mask); + +static ssize_t flags_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u16 flags = to_nfit_memdev(dev)->flags; + + return sprintf(buf, "%s%s%s%s%s\n", + flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", + flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", + flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", + flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", + flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); +} +static DEVICE_ATTR_RO(flags); + +static ssize_t id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); + + if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) + return sprintf(buf, "%04x-%02x-%04x-%08x\n", + be16_to_cpu(dcr->vendor_id), + dcr->manufacturing_location, + be16_to_cpu(dcr->manufacturing_date), + be32_to_cpu(dcr->serial_number)); + else + return sprintf(buf, "%04x-%08x\n", + be16_to_cpu(dcr->vendor_id), + be32_to_cpu(dcr->serial_number)); +} +static DEVICE_ATTR_RO(id); + +static struct attribute *acpi_nfit_dimm_attributes[] = { + &dev_attr_handle.attr, + &dev_attr_phys_id.attr, + &dev_attr_vendor.attr, + &dev_attr_device.attr, + &dev_attr_rev_id.attr, + &dev_attr_subsystem_vendor.attr, + &dev_attr_subsystem_device.attr, + &dev_attr_subsystem_rev_id.attr, + &dev_attr_format.attr, + &dev_attr_formats.attr, + &dev_attr_format1.attr, + &dev_attr_serial.attr, + &dev_attr_flags.attr, + &dev_attr_id.attr, + &dev_attr_family.attr, + &dev_attr_dsm_mask.attr, + NULL, +}; + +static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvdimm *nvdimm = to_nvdimm(dev); + + if (!to_nfit_dcr(dev)) + return 0; + if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) + return 0; + return a->mode; +} + +static struct attribute_group acpi_nfit_dimm_attribute_group = { + .name = "nfit", + .attrs = acpi_nfit_dimm_attributes, + .is_visible = acpi_nfit_dimm_attr_visible, +}; + +static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { + &nvdimm_attribute_group, + &nd_device_attribute_group, + &acpi_nfit_dimm_attribute_group, + NULL, +}; + +static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, + u32 device_handle) +{ + struct nfit_mem *nfit_mem; + + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) + if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) + return nfit_mem->nvdimm; + + return NULL; +} + +static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, + struct nfit_mem *nfit_mem, u32 device_handle) +{ + struct acpi_device *adev, *adev_dimm; + struct device *dev = acpi_desc->dev; + unsigned long dsm_mask; + const u8 *uuid; + int i; + + /* nfit test assumes 1:1 relationship between commands and dsms */ + nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; + nfit_mem->family = NVDIMM_FAMILY_INTEL; + adev = to_acpi_dev(acpi_desc); + if (!adev) + return 0; + + adev_dimm = acpi_find_child_device(adev, device_handle, false); + nfit_mem->adev = adev_dimm; + if (!adev_dimm) { + dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", + device_handle); + return force_enable_dimms ? 0 : -ENODEV; + } + + /* + * Until standardization materializes we need to consider 4 + * different command sets. Note, that checking for function0 (bit0) + * tells us if any commands are reachable through this uuid. + */ + for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++) + if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) + break; + + /* limit the supported commands to those that are publicly documented */ + nfit_mem->family = i; + if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { + dsm_mask = 0x3fe; + if (disable_vendor_specific) + dsm_mask &= ~(1 << ND_CMD_VENDOR); + } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { + dsm_mask = 0x1c3c76; + } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { + dsm_mask = 0x1fe; + if (disable_vendor_specific) + dsm_mask &= ~(1 << 8); + } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { + dsm_mask = 0xffffffff; + } else { + dev_err(dev, "unknown dimm command family\n"); + nfit_mem->family = -1; + return force_enable_dimms ? 0 : -ENODEV; + } + + uuid = to_nfit_uuid(nfit_mem->family); + for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) + if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) + set_bit(i, &nfit_mem->dsm_mask); + + return 0; +} + +static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) +{ + struct nfit_mem *nfit_mem; + int dimm_count = 0; + + list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { + struct acpi_nfit_flush_address *flush; + unsigned long flags = 0, cmd_mask; + struct nvdimm *nvdimm; + u32 device_handle; + u16 mem_flags; + int rc; + + device_handle = __to_nfit_memdev(nfit_mem)->device_handle; + nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); + if (nvdimm) { + dimm_count++; + continue; + } + + if (nfit_mem->bdw && nfit_mem->memdev_pmem) + flags |= NDD_ALIASING; + + mem_flags = __to_nfit_memdev(nfit_mem)->flags; + if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) + flags |= NDD_UNARMED; + + rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); + if (rc) + continue; + + /* + * TODO: provide translation for non-NVDIMM_FAMILY_INTEL + * devices (i.e. from nd_cmd to acpi_dsm) to standardize the + * userspace interface. + */ + cmd_mask = 1UL << ND_CMD_CALL; + if (nfit_mem->family == NVDIMM_FAMILY_INTEL) + cmd_mask |= nfit_mem->dsm_mask; + + flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush + : NULL; + nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, + acpi_nfit_dimm_attribute_groups, + flags, cmd_mask, flush ? flush->hint_count : 0, + nfit_mem->flush_wpq); + if (!nvdimm) + return -ENOMEM; + + nfit_mem->nvdimm = nvdimm; + dimm_count++; + + if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) + continue; + + dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", + nvdimm_name(nvdimm), + mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", + mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", + mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", + mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); + + } + + return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); +} + +static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) +{ + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); + struct acpi_device *adev; + int i; + + nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; + adev = to_acpi_dev(acpi_desc); + if (!adev) + return; + + for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) + if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) + set_bit(i, &nd_desc->cmd_mask); +} + +static ssize_t range_index_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_region *nd_region = to_nd_region(dev); + struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); + + return sprintf(buf, "%d\n", nfit_spa->spa->range_index); +} +static DEVICE_ATTR_RO(range_index); + +static struct attribute *acpi_nfit_region_attributes[] = { + &dev_attr_range_index.attr, + NULL, +}; + +static struct attribute_group acpi_nfit_region_attribute_group = { + .name = "nfit", + .attrs = acpi_nfit_region_attributes, +}; + +static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { + &nd_region_attribute_group, + &nd_mapping_attribute_group, + &nd_device_attribute_group, + &nd_numa_attribute_group, + &acpi_nfit_region_attribute_group, + NULL, +}; + +/* enough info to uniquely specify an interleave set */ +struct nfit_set_info { + struct nfit_set_info_map { + u64 region_offset; + u32 serial_number; + u32 pad; + } mapping[0]; +}; + +static size_t sizeof_nfit_set_info(int num_mappings) +{ + return sizeof(struct nfit_set_info) + + num_mappings * sizeof(struct nfit_set_info_map); +} + +static int cmp_map(const void *m0, const void *m1) +{ + const struct nfit_set_info_map *map0 = m0; + const struct nfit_set_info_map *map1 = m1; + + return memcmp(&map0->region_offset, &map1->region_offset, + sizeof(u64)); +} + +/* Retrieve the nth entry referencing this spa */ +static struct acpi_nfit_memory_map *memdev_from_spa( + struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) +{ + struct nfit_memdev *nfit_memdev; + + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) + if (nfit_memdev->memdev->range_index == range_index) + if (n-- == 0) + return nfit_memdev->memdev; + return NULL; +} + +static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, + struct nd_region_desc *ndr_desc, + struct acpi_nfit_system_address *spa) +{ + int i, spa_type = nfit_spa_type(spa); + struct device *dev = acpi_desc->dev; + struct nd_interleave_set *nd_set; + u16 nr = ndr_desc->num_mappings; + struct nfit_set_info *info; + + if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) + /* pass */; + else + return 0; + + nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); + if (!nd_set) + return -ENOMEM; + + info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); + if (!info) + return -ENOMEM; + for (i = 0; i < nr; i++) { + struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; + struct nfit_set_info_map *map = &info->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, + spa->range_index, i); + + if (!memdev || !nfit_mem->dcr) { + dev_err(dev, "%s: failed to find DCR\n", __func__); + return -ENODEV; + } + + map->region_offset = memdev->region_offset; + map->serial_number = nfit_mem->dcr->serial_number; + } + + sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), + cmp_map, NULL); + nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + ndr_desc->nd_set = nd_set; + devm_kfree(dev, info); + + return 0; +} + +static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) +{ + struct acpi_nfit_interleave *idt = mmio->idt; + u32 sub_line_offset, line_index, line_offset; + u64 line_no, table_skip_count, table_offset; + + line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); + table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); + line_offset = idt->line_offset[line_index] + * mmio->line_size; + table_offset = table_skip_count * mmio->table_size; + + return mmio->base_offset + line_offset + table_offset + sub_line_offset; +} + +static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) +{ + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; + u64 offset = nfit_blk->stat_offset + mmio->size * bw; + + if (mmio->num_lines) + offset = to_interleave_offset(offset, mmio); + + return readl(mmio->addr.base + offset); +} + +static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, + resource_size_t dpa, unsigned int len, unsigned int write) +{ + u64 cmd, offset; + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; + + enum { + BCW_OFFSET_MASK = (1ULL << 48)-1, + BCW_LEN_SHIFT = 48, + BCW_LEN_MASK = (1ULL << 8) - 1, + BCW_CMD_SHIFT = 56, + }; + + cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; + len = len >> L1_CACHE_SHIFT; + cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; + cmd |= ((u64) write) << BCW_CMD_SHIFT; + + offset = nfit_blk->cmd_offset + mmio->size * bw; + if (mmio->num_lines) + offset = to_interleave_offset(offset, mmio); + + writeq(cmd, mmio->addr.base + offset); + nvdimm_flush(nfit_blk->nd_region); + + if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) + readq(mmio->addr.base + offset); +} + +static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, + resource_size_t dpa, void *iobuf, size_t len, int rw, + unsigned int lane) +{ + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; + unsigned int copied = 0; + u64 base_offset; + int rc; + + base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES + + lane * mmio->size; + write_blk_ctl(nfit_blk, lane, dpa, len, rw); + while (len) { + unsigned int c; + u64 offset; + + if (mmio->num_lines) { + u32 line_offset; + + offset = to_interleave_offset(base_offset + copied, + mmio); + div_u64_rem(offset, mmio->line_size, &line_offset); + c = min_t(size_t, len, mmio->line_size - line_offset); + } else { + offset = base_offset + nfit_blk->bdw_offset; + c = len; + } + + if (rw) + memcpy_to_pmem(mmio->addr.aperture + offset, + iobuf + copied, c); + else { + if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) + mmio_flush_range((void __force *) + mmio->addr.aperture + offset, c); + + memcpy_from_pmem(iobuf + copied, + mmio->addr.aperture + offset, c); + } + + copied += c; + len -= c; + } + + if (rw) + nvdimm_flush(nfit_blk->nd_region); + + rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; + return rc; +} + +static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, + resource_size_t dpa, void *iobuf, u64 len, int rw) +{ + struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; + struct nd_region *nd_region = nfit_blk->nd_region; + unsigned int lane, copied = 0; + int rc = 0; + + lane = nd_region_acquire_lane(nd_region); + while (len) { + u64 c = min(len, mmio->size); + + rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, + iobuf + copied, c, rw, lane); + if (rc) + break; + + copied += c; + len -= c; + } + nd_region_release_lane(nd_region, lane); + + return rc; +} + +static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, + struct acpi_nfit_interleave *idt, u16 interleave_ways) +{ + if (idt) { + mmio->num_lines = idt->line_count; + mmio->line_size = idt->line_size; + if (interleave_ways == 0) + return -ENXIO; + mmio->table_size = mmio->num_lines * interleave_ways + * mmio->line_size; + } + + return 0; +} + +static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, + struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) +{ + struct nd_cmd_dimm_flags flags; + int rc; + + memset(&flags, 0, sizeof(flags)); + rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, + sizeof(flags), NULL); + + if (rc >= 0 && flags.status == 0) + nfit_blk->dimm_flags = flags.flags; + else if (rc == -ENOTTY) { + /* fall back to a conservative default */ + nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; + rc = 0; + } else + rc = -ENXIO; + + return rc; +} + +static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, + struct device *dev) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct nd_blk_region *ndbr = to_nd_blk_region(dev); + struct nfit_blk_mmio *mmio; + struct nfit_blk *nfit_blk; + struct nfit_mem *nfit_mem; + struct nvdimm *nvdimm; + int rc; + + nvdimm = nd_blk_region_to_dimm(ndbr); + nfit_mem = nvdimm_provider_data(nvdimm); + if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { + dev_dbg(dev, "%s: missing%s%s%s\n", __func__, + nfit_mem ? "" : " nfit_mem", + (nfit_mem && nfit_mem->dcr) ? "" : " dcr", + (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); + return -ENXIO; + } + + nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); + if (!nfit_blk) + return -ENOMEM; + nd_blk_region_set_provider_data(ndbr, nfit_blk); + nfit_blk->nd_region = to_nd_region(dev); + + /* map block aperture memory */ + nfit_blk->bdw_offset = nfit_mem->bdw->offset; + mmio = &nfit_blk->mmio[BDW]; + mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, + nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM); + if (!mmio->addr.base) { + dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, + nvdimm_name(nvdimm)); + return -ENOMEM; + } + mmio->size = nfit_mem->bdw->size; + mmio->base_offset = nfit_mem->memdev_bdw->region_offset; + mmio->idt = nfit_mem->idt_bdw; + mmio->spa = nfit_mem->spa_bdw; + rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, + nfit_mem->memdev_bdw->interleave_ways); + if (rc) { + dev_dbg(dev, "%s: %s failed to init bdw interleave\n", + __func__, nvdimm_name(nvdimm)); + return rc; + } + + /* map block control memory */ + nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; + nfit_blk->stat_offset = nfit_mem->dcr->status_offset; + mmio = &nfit_blk->mmio[DCR]; + mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, + nfit_mem->spa_dcr->length); + if (!mmio->addr.base) { + dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, + nvdimm_name(nvdimm)); + return -ENOMEM; + } + mmio->size = nfit_mem->dcr->window_size; + mmio->base_offset = nfit_mem->memdev_dcr->region_offset; + mmio->idt = nfit_mem->idt_dcr; + mmio->spa = nfit_mem->spa_dcr; + rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, + nfit_mem->memdev_dcr->interleave_ways); + if (rc) { + dev_dbg(dev, "%s: %s failed to init dcr interleave\n", + __func__, nvdimm_name(nvdimm)); + return rc; + } + + rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); + if (rc < 0) { + dev_dbg(dev, "%s: %s failed get DIMM flags\n", + __func__, nvdimm_name(nvdimm)); + return rc; + } + + if (nvdimm_has_flush(nfit_blk->nd_region) < 0) + dev_warn(dev, "unable to guarantee persistence of writes\n"); + + if (mmio->line_size == 0) + return 0; + + if ((u32) nfit_blk->cmd_offset % mmio->line_size + + 8 > mmio->line_size) { + dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); + return -ENXIO; + } else if ((u32) nfit_blk->stat_offset % mmio->line_size + + 8 > mmio->line_size) { + dev_dbg(dev, "stat_offset crosses interleave boundary\n"); + return -ENXIO; + } + + return 0; +} + +static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, + struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) +{ + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + struct acpi_nfit_system_address *spa = nfit_spa->spa; + int cmd_rc, rc; + + cmd->address = spa->address; + cmd->length = spa->length; + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, + sizeof(*cmd), &cmd_rc); + if (rc < 0) + return rc; + return cmd_rc; +} + +static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) +{ + int rc; + int cmd_rc; + struct nd_cmd_ars_start ars_start; + struct acpi_nfit_system_address *spa = nfit_spa->spa; + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + + memset(&ars_start, 0, sizeof(ars_start)); + ars_start.address = spa->address; + ars_start.length = spa->length; + if (nfit_spa_type(spa) == NFIT_SPA_PM) + ars_start.type = ND_ARS_PERSISTENT; + else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) + ars_start.type = ND_ARS_VOLATILE; + else + return -ENOTTY; + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, + sizeof(ars_start), &cmd_rc); + + if (rc < 0) + return rc; + return cmd_rc; +} + +static int ars_continue(struct acpi_nfit_desc *acpi_desc) +{ + int rc, cmd_rc; + struct nd_cmd_ars_start ars_start; + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; + + memset(&ars_start, 0, sizeof(ars_start)); + ars_start.address = ars_status->restart_address; + ars_start.length = ars_status->restart_length; + ars_start.type = ars_status->type; + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, + sizeof(ars_start), &cmd_rc); + if (rc < 0) + return rc; + return cmd_rc; +} + +static int ars_get_status(struct acpi_nfit_desc *acpi_desc) +{ + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; + int rc, cmd_rc; + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, + acpi_desc->ars_status_size, &cmd_rc); + if (rc < 0) + return rc; + return cmd_rc; +} + +static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus, + struct nd_cmd_ars_status *ars_status) +{ + int rc; + u32 i; + + for (i = 0; i < ars_status->num_records; i++) { + rc = nvdimm_bus_add_poison(nvdimm_bus, + ars_status->records[i].err_address, + ars_status->records[i].length); + if (rc) + return rc; + } + + return 0; +} + +static void acpi_nfit_remove_resource(void *data) +{ + struct resource *res = data; + + remove_resource(res); +} + +static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, + struct nd_region_desc *ndr_desc) +{ + struct resource *res, *nd_res = ndr_desc->res; + int is_pmem, ret; + + /* No operation if the region is already registered as PMEM */ + is_pmem = region_intersects(nd_res->start, resource_size(nd_res), + IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); + if (is_pmem == REGION_INTERSECTS) + return 0; + + res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + res->name = "Persistent Memory"; + res->start = nd_res->start; + res->end = nd_res->end; + res->flags = IORESOURCE_MEM; + res->desc = IORES_DESC_PERSISTENT_MEMORY; + + ret = insert_resource(&iomem_resource, res); + if (ret) + return ret; + + ret = devm_add_action_or_reset(acpi_desc->dev, + acpi_nfit_remove_resource, + res); + if (ret) + return ret; + + return 0; +} + +static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, + struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, + struct acpi_nfit_memory_map *memdev, + struct nfit_spa *nfit_spa) +{ + struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, + memdev->device_handle); + struct acpi_nfit_system_address *spa = nfit_spa->spa; + struct nd_blk_region_desc *ndbr_desc; + struct nfit_mem *nfit_mem; + int blk_valid = 0; + + if (!nvdimm) { + dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", + spa->range_index, memdev->device_handle); + return -ENODEV; + } + + nd_mapping->nvdimm = nvdimm; + switch (nfit_spa_type(spa)) { + case NFIT_SPA_PM: + case NFIT_SPA_VOLATILE: + nd_mapping->start = memdev->address; + nd_mapping->size = memdev->region_size; + break; + case NFIT_SPA_DCR: + nfit_mem = nvdimm_provider_data(nvdimm); + if (!nfit_mem || !nfit_mem->bdw) { + dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", + spa->range_index, nvdimm_name(nvdimm)); + } else { + nd_mapping->size = nfit_mem->bdw->capacity; + nd_mapping->start = nfit_mem->bdw->start_address; + ndr_desc->num_lanes = nfit_mem->bdw->windows; + blk_valid = 1; + } + + ndr_desc->nd_mapping = nd_mapping; + ndr_desc->num_mappings = blk_valid; + ndbr_desc = to_blk_region_desc(ndr_desc); + ndbr_desc->enable = acpi_nfit_blk_region_enable; + ndbr_desc->do_io = acpi_desc->blk_do_io; + nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, + ndr_desc); + if (!nfit_spa->nd_region) + return -ENOMEM; + break; + } + + return 0; +} + +static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) +{ + return (nfit_spa_type(spa) == NFIT_SPA_VDISK || + nfit_spa_type(spa) == NFIT_SPA_VCD || + nfit_spa_type(spa) == NFIT_SPA_PDISK || + nfit_spa_type(spa) == NFIT_SPA_PCD); +} + +static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa) +{ + static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; + struct acpi_nfit_system_address *spa = nfit_spa->spa; + struct nd_blk_region_desc ndbr_desc; + struct nd_region_desc *ndr_desc; + struct nfit_memdev *nfit_memdev; + struct nvdimm_bus *nvdimm_bus; + struct resource res; + int count = 0, rc; + + if (nfit_spa->nd_region) + return 0; + + if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { + dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", + __func__); + return 0; + } + + memset(&res, 0, sizeof(res)); + memset(&nd_mappings, 0, sizeof(nd_mappings)); + memset(&ndbr_desc, 0, sizeof(ndbr_desc)); + res.start = spa->address; + res.end = res.start + spa->length - 1; + ndr_desc = &ndbr_desc.ndr_desc; + ndr_desc->res = &res; + ndr_desc->provider_data = nfit_spa; + ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; + if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) + ndr_desc->numa_node = acpi_map_pxm_to_online_node( + spa->proximity_domain); + else + ndr_desc->numa_node = NUMA_NO_NODE; + + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; + struct nd_mapping *nd_mapping; + + if (memdev->range_index != spa->range_index) + continue; + if (count >= ND_MAX_MAPPINGS) { + dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", + spa->range_index, ND_MAX_MAPPINGS); + return -ENXIO; + } + nd_mapping = &nd_mappings[count++]; + rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, + memdev, nfit_spa); + if (rc) + goto out; + } + + ndr_desc->nd_mapping = nd_mappings; + ndr_desc->num_mappings = count; + rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); + if (rc) + goto out; + + nvdimm_bus = acpi_desc->nvdimm_bus; + if (nfit_spa_type(spa) == NFIT_SPA_PM) { + rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); + if (rc) { + dev_warn(acpi_desc->dev, + "failed to insert pmem resource to iomem: %d\n", + rc); + goto out; + } + + nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, + ndr_desc); + if (!nfit_spa->nd_region) + rc = -ENOMEM; + } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { + nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, + ndr_desc); + if (!nfit_spa->nd_region) + rc = -ENOMEM; + } else if (nfit_spa_is_virtual(spa)) { + nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, + ndr_desc); + if (!nfit_spa->nd_region) + rc = -ENOMEM; + } + + out: + if (rc) + dev_err(acpi_desc->dev, "failed to register spa range %d\n", + nfit_spa->spa->range_index); + return rc; +} + +static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc, + u32 max_ars) +{ + struct device *dev = acpi_desc->dev; + struct nd_cmd_ars_status *ars_status; + + if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) { + memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size); + return 0; + } + + if (acpi_desc->ars_status) + devm_kfree(dev, acpi_desc->ars_status); + acpi_desc->ars_status = NULL; + ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL); + if (!ars_status) + return -ENOMEM; + acpi_desc->ars_status = ars_status; + acpi_desc->ars_status_size = max_ars; + return 0; +} + +static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa) +{ + struct acpi_nfit_system_address *spa = nfit_spa->spa; + int rc; + + if (!nfit_spa->max_ars) { + struct nd_cmd_ars_cap ars_cap; + + memset(&ars_cap, 0, sizeof(ars_cap)); + rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); + if (rc < 0) + return rc; + nfit_spa->max_ars = ars_cap.max_ars_out; + nfit_spa->clear_err_unit = ars_cap.clear_err_unit; + /* check that the supported scrub types match the spa type */ + if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE && + ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0) + return -ENOTTY; + else if (nfit_spa_type(spa) == NFIT_SPA_PM && + ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0) + return -ENOTTY; + } + + if (ars_status_alloc(acpi_desc, nfit_spa->max_ars)) + return -ENOMEM; + + rc = ars_get_status(acpi_desc); + if (rc < 0 && rc != -ENOSPC) + return rc; + + if (ars_status_process_records(acpi_desc->nvdimm_bus, + acpi_desc->ars_status)) + return -ENOMEM; + + return 0; +} + +static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa) +{ + struct acpi_nfit_system_address *spa = nfit_spa->spa; + unsigned int overflow_retry = scrub_overflow_abort; + u64 init_ars_start = 0, init_ars_len = 0; + struct device *dev = acpi_desc->dev; + unsigned int tmo = scrub_timeout; + int rc; + + if (!nfit_spa->ars_required || !nfit_spa->nd_region) + return; + + rc = ars_start(acpi_desc, nfit_spa); + /* + * If we timed out the initial scan we'll still be busy here, + * and will wait another timeout before giving up permanently. + */ + if (rc < 0 && rc != -EBUSY) + return; + + do { + u64 ars_start, ars_len; + + if (acpi_desc->cancel) + break; + rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); + if (rc == -ENOTTY) + break; + if (rc == -EBUSY && !tmo) { + dev_warn(dev, "range %d ars timeout, aborting\n", + spa->range_index); + break; + } + + if (rc == -EBUSY) { + /* + * Note, entries may be appended to the list + * while the lock is dropped, but the workqueue + * being active prevents entries being deleted / + * freed. + */ + mutex_unlock(&acpi_desc->init_mutex); + ssleep(1); + tmo--; + mutex_lock(&acpi_desc->init_mutex); + continue; + } + + /* we got some results, but there are more pending... */ + if (rc == -ENOSPC && overflow_retry--) { + if (!init_ars_len) { + init_ars_len = acpi_desc->ars_status->length; + init_ars_start = acpi_desc->ars_status->address; + } + rc = ars_continue(acpi_desc); + } + + if (rc < 0) { + dev_warn(dev, "range %d ars continuation failed\n", + spa->range_index); + break; + } + + if (init_ars_len) { + ars_start = init_ars_start; + ars_len = init_ars_len; + } else { + ars_start = acpi_desc->ars_status->address; + ars_len = acpi_desc->ars_status->length; + } + dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n", + spa->range_index, ars_start, ars_len); + /* notify the region about new poison entries */ + nvdimm_region_notify(nfit_spa->nd_region, + NVDIMM_REVALIDATE_POISON); + break; + } while (1); +} + +static void acpi_nfit_scrub(struct work_struct *work) +{ + struct device *dev; + u64 init_scrub_length = 0; + struct nfit_spa *nfit_spa; + u64 init_scrub_address = 0; + bool init_ars_done = false; + struct acpi_nfit_desc *acpi_desc; + unsigned int tmo = scrub_timeout; + unsigned int overflow_retry = scrub_overflow_abort; + + acpi_desc = container_of(work, typeof(*acpi_desc), work); + dev = acpi_desc->dev; + + /* + * We scrub in 2 phases. The first phase waits for any platform + * firmware initiated scrubs to complete and then we go search for the + * affected spa regions to mark them scanned. In the second phase we + * initiate a directed scrub for every range that was not scrubbed in + * phase 1. If we're called for a 'rescan', we harmlessly pass through + * the first phase, but really only care about running phase 2, where + * regions can be notified of new poison. + */ + + /* process platform firmware initiated scrubs */ + retry: + mutex_lock(&acpi_desc->init_mutex); + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + struct nd_cmd_ars_status *ars_status; + struct acpi_nfit_system_address *spa; + u64 ars_start, ars_len; + int rc; + + if (acpi_desc->cancel) + break; + + if (nfit_spa->nd_region) + continue; + + if (init_ars_done) { + /* + * No need to re-query, we're now just + * reconciling all the ranges covered by the + * initial scrub + */ + rc = 0; + } else + rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); + + if (rc == -ENOTTY) { + /* no ars capability, just register spa and move on */ + acpi_nfit_register_region(acpi_desc, nfit_spa); + continue; + } + + if (rc == -EBUSY && !tmo) { + /* fallthrough to directed scrub in phase 2 */ + dev_warn(dev, "timeout awaiting ars results, continuing...\n"); + break; + } else if (rc == -EBUSY) { + mutex_unlock(&acpi_desc->init_mutex); + ssleep(1); + tmo--; + goto retry; + } + + /* we got some results, but there are more pending... */ + if (rc == -ENOSPC && overflow_retry--) { + ars_status = acpi_desc->ars_status; + /* + * Record the original scrub range, so that we + * can recall all the ranges impacted by the + * initial scrub. + */ + if (!init_scrub_length) { + init_scrub_length = ars_status->length; + init_scrub_address = ars_status->address; + } + rc = ars_continue(acpi_desc); + if (rc == 0) { + mutex_unlock(&acpi_desc->init_mutex); + goto retry; + } + } + + if (rc < 0) { + /* + * Initial scrub failed, we'll give it one more + * try below... + */ + break; + } + + /* We got some final results, record completed ranges */ + ars_status = acpi_desc->ars_status; + if (init_scrub_length) { + ars_start = init_scrub_address; + ars_len = ars_start + init_scrub_length; + } else { + ars_start = ars_status->address; + ars_len = ars_status->length; + } + spa = nfit_spa->spa; + + if (!init_ars_done) { + init_ars_done = true; + dev_dbg(dev, "init scrub %#llx + %#llx complete\n", + ars_start, ars_len); + } + if (ars_start <= spa->address && ars_start + ars_len + >= spa->address + spa->length) + acpi_nfit_register_region(acpi_desc, nfit_spa); + } + + /* + * For all the ranges not covered by an initial scrub we still + * want to see if there are errors, but it's ok to discover them + * asynchronously. + */ + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + /* + * Flag all the ranges that still need scrubbing, but + * register them now to make data available. + */ + if (!nfit_spa->nd_region) { + nfit_spa->ars_required = 1; + acpi_nfit_register_region(acpi_desc, nfit_spa); + } + } + + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) + acpi_nfit_async_scrub(acpi_desc, nfit_spa); + acpi_desc->scrub_count++; + if (acpi_desc->scrub_count_state) + sysfs_notify_dirent(acpi_desc->scrub_count_state); + mutex_unlock(&acpi_desc->init_mutex); +} + +static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) +{ + struct nfit_spa *nfit_spa; + int rc; + + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) + if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { + /* BLK regions don't need to wait for ars results */ + rc = acpi_nfit_register_region(acpi_desc, nfit_spa); + if (rc) + return rc; + } + + queue_work(nfit_wq, &acpi_desc->work); + return 0; +} + +static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, + struct nfit_table_prev *prev) +{ + struct device *dev = acpi_desc->dev; + + if (!list_empty(&prev->spas) || + !list_empty(&prev->memdevs) || + !list_empty(&prev->dcrs) || + !list_empty(&prev->bdws) || + !list_empty(&prev->idts) || + !list_empty(&prev->flushes)) { + dev_err(dev, "new nfit deletes entries (unsupported)\n"); + return -ENXIO; + } + return 0; +} + +static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) +{ + struct device *dev = acpi_desc->dev; + struct kernfs_node *nfit; + struct device *bus_dev; + + if (!ars_supported(acpi_desc->nvdimm_bus)) + return 0; + + bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); + nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); + if (!nfit) { + dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); + return -ENODEV; + } + acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); + sysfs_put(nfit); + if (!acpi_desc->scrub_count_state) { + dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); + return -ENODEV; + } + + return 0; +} + +static void acpi_nfit_destruct(void *data) +{ + struct acpi_nfit_desc *acpi_desc = data; + struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); + + acpi_desc->cancel = 1; + /* + * Bounce the nvdimm bus lock to make sure any in-flight + * acpi_nfit_ars_rescan() submissions have had a chance to + * either submit or see ->cancel set. + */ + device_lock(bus_dev); + device_unlock(bus_dev); + + flush_workqueue(nfit_wq); + if (acpi_desc->scrub_count_state) + sysfs_put(acpi_desc->scrub_count_state); + nvdimm_bus_unregister(acpi_desc->nvdimm_bus); + acpi_desc->nvdimm_bus = NULL; +} + +int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) +{ + struct device *dev = acpi_desc->dev; + struct nfit_table_prev prev; + const void *end; + int rc; + + if (!acpi_desc->nvdimm_bus) { + acpi_nfit_init_dsms(acpi_desc); + + acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, + &acpi_desc->nd_desc); + if (!acpi_desc->nvdimm_bus) + return -ENOMEM; + + rc = devm_add_action_or_reset(dev, acpi_nfit_destruct, + acpi_desc); + if (rc) + return rc; + + rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); + if (rc) + return rc; + } + + mutex_lock(&acpi_desc->init_mutex); + + INIT_LIST_HEAD(&prev.spas); + INIT_LIST_HEAD(&prev.memdevs); + INIT_LIST_HEAD(&prev.dcrs); + INIT_LIST_HEAD(&prev.bdws); + INIT_LIST_HEAD(&prev.idts); + INIT_LIST_HEAD(&prev.flushes); + + list_cut_position(&prev.spas, &acpi_desc->spas, + acpi_desc->spas.prev); + list_cut_position(&prev.memdevs, &acpi_desc->memdevs, + acpi_desc->memdevs.prev); + list_cut_position(&prev.dcrs, &acpi_desc->dcrs, + acpi_desc->dcrs.prev); + list_cut_position(&prev.bdws, &acpi_desc->bdws, + acpi_desc->bdws.prev); + list_cut_position(&prev.idts, &acpi_desc->idts, + acpi_desc->idts.prev); + list_cut_position(&prev.flushes, &acpi_desc->flushes, + acpi_desc->flushes.prev); + + end = data + sz; + while (!IS_ERR_OR_NULL(data)) + data = add_table(acpi_desc, &prev, data, end); + + if (IS_ERR(data)) { + dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, + PTR_ERR(data)); + rc = PTR_ERR(data); + goto out_unlock; + } + + rc = acpi_nfit_check_deletions(acpi_desc, &prev); + if (rc) + goto out_unlock; + + rc = nfit_mem_init(acpi_desc); + if (rc) + goto out_unlock; + + rc = acpi_nfit_register_dimms(acpi_desc); + if (rc) + goto out_unlock; + + rc = acpi_nfit_register_regions(acpi_desc); + + out_unlock: + mutex_unlock(&acpi_desc->init_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(acpi_nfit_init); + +struct acpi_nfit_flush_work { + struct work_struct work; + struct completion cmp; +}; + +static void flush_probe(struct work_struct *work) +{ + struct acpi_nfit_flush_work *flush; + + flush = container_of(work, typeof(*flush), work); + complete(&flush->cmp); +} + +static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + struct device *dev = acpi_desc->dev; + struct acpi_nfit_flush_work flush; + + /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ + device_lock(dev); + device_unlock(dev); + + /* + * Scrub work could take 10s of seconds, userspace may give up so we + * need to be interruptible while waiting. + */ + INIT_WORK_ONSTACK(&flush.work, flush_probe); + COMPLETION_INITIALIZER_ONSTACK(flush.cmp); + queue_work(nfit_wq, &flush.work); + return wait_for_completion_interruptible(&flush.cmp); +} + +static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, + struct nvdimm *nvdimm, unsigned int cmd) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); + + if (nvdimm) + return 0; + if (cmd != ND_CMD_ARS_START) + return 0; + + /* + * The kernel and userspace may race to initiate a scrub, but + * the scrub thread is prepared to lose that initial race. It + * just needs guarantees that any ars it initiates are not + * interrupted by any intervening start reqeusts from userspace. + */ + if (work_busy(&acpi_desc->work)) + return -EBUSY; + + return 0; +} + +static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) +{ + struct device *dev = acpi_desc->dev; + struct nfit_spa *nfit_spa; + + if (work_busy(&acpi_desc->work)) + return -EBUSY; + + if (acpi_desc->cancel) + return 0; + + mutex_lock(&acpi_desc->init_mutex); + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + struct acpi_nfit_system_address *spa = nfit_spa->spa; + + if (nfit_spa_type(spa) != NFIT_SPA_PM) + continue; + + nfit_spa->ars_required = 1; + } + queue_work(nfit_wq, &acpi_desc->work); + dev_dbg(dev, "%s: ars_scan triggered\n", __func__); + mutex_unlock(&acpi_desc->init_mutex); + + return 0; +} + +void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) +{ + struct nvdimm_bus_descriptor *nd_desc; + + dev_set_drvdata(dev, acpi_desc); + acpi_desc->dev = dev; + acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; + nd_desc = &acpi_desc->nd_desc; + nd_desc->provider_name = "ACPI.NFIT"; + nd_desc->module = THIS_MODULE; + nd_desc->ndctl = acpi_nfit_ctl; + nd_desc->flush_probe = acpi_nfit_flush_probe; + nd_desc->clear_to_send = acpi_nfit_clear_to_send; + nd_desc->attr_groups = acpi_nfit_attribute_groups; + + INIT_LIST_HEAD(&acpi_desc->spas); + INIT_LIST_HEAD(&acpi_desc->dcrs); + INIT_LIST_HEAD(&acpi_desc->bdws); + INIT_LIST_HEAD(&acpi_desc->idts); + INIT_LIST_HEAD(&acpi_desc->flushes); + INIT_LIST_HEAD(&acpi_desc->memdevs); + INIT_LIST_HEAD(&acpi_desc->dimms); + mutex_init(&acpi_desc->init_mutex); + INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); +} +EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); + +static int acpi_nfit_add(struct acpi_device *adev) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_nfit_desc *acpi_desc; + struct device *dev = &adev->dev; + struct acpi_table_header *tbl; + acpi_status status = AE_OK; + acpi_size sz; + int rc = 0; + + status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz); + if (ACPI_FAILURE(status)) { + /* This is ok, we could have an nvdimm hotplugged later */ + dev_dbg(dev, "failed to find NFIT at startup\n"); + return 0; + } + + acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); + if (!acpi_desc) + return -ENOMEM; + acpi_nfit_desc_init(acpi_desc, &adev->dev); + + /* Save the acpi header for exporting the revision via sysfs */ + acpi_desc->acpi_header = *tbl; + + /* Evaluate _FIT and override with that if present */ + status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); + if (ACPI_SUCCESS(status) && buf.length > 0) { + union acpi_object *obj = buf.pointer; + + if (obj->type == ACPI_TYPE_BUFFER) + rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, + obj->buffer.length); + else + dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", + __func__, (int) obj->type); + kfree(buf.pointer); + } else + /* skip over the lead-in header table */ + rc = acpi_nfit_init(acpi_desc, (void *) tbl + + sizeof(struct acpi_table_nfit), + sz - sizeof(struct acpi_table_nfit)); + return rc; +} + +static int acpi_nfit_remove(struct acpi_device *adev) +{ + /* see acpi_nfit_destruct */ + return 0; +} + +static void acpi_nfit_notify(struct acpi_device *adev, u32 event) +{ + struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + struct device *dev = &adev->dev; + union acpi_object *obj; + acpi_status status; + int ret; + + dev_dbg(dev, "%s: event: %d\n", __func__, event); + + device_lock(dev); + if (!dev->driver) { + /* dev->driver may be null if we're being removed */ + dev_dbg(dev, "%s: no driver found for dev\n", __func__); + goto out_unlock; + } + + if (!acpi_desc) { + acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); + if (!acpi_desc) + goto out_unlock; + acpi_nfit_desc_init(acpi_desc, &adev->dev); + } else { + /* + * Finish previous registration before considering new + * regions. + */ + flush_workqueue(nfit_wq); + } + + /* Evaluate _FIT */ + status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to evaluate _FIT\n"); + goto out_unlock; + } + + obj = buf.pointer; + if (obj->type == ACPI_TYPE_BUFFER) { + ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, + obj->buffer.length); + if (ret) + dev_err(dev, "failed to merge updated NFIT\n"); + } else + dev_err(dev, "Invalid _FIT\n"); + kfree(buf.pointer); + + out_unlock: + device_unlock(dev); +} + +static const struct acpi_device_id acpi_nfit_ids[] = { + { "ACPI0012", 0 }, + { "", 0 }, +}; +MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); + +static struct acpi_driver acpi_nfit_driver = { + .name = KBUILD_MODNAME, + .ids = acpi_nfit_ids, + .ops = { + .add = acpi_nfit_add, + .remove = acpi_nfit_remove, + .notify = acpi_nfit_notify, + }, +}; + +static __init int nfit_init(void) +{ + BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); + BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); + BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); + BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); + BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); + BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); + BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); + + acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); + acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); + acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); + acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); + acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); + acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); + acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); + acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); + acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); + acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); + acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); + acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); + acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); + + nfit_wq = create_singlethread_workqueue("nfit"); + if (!nfit_wq) + return -ENOMEM; + + return acpi_bus_register_driver(&acpi_nfit_driver); +} + +static __exit void nfit_exit(void) +{ + acpi_bus_unregister_driver(&acpi_nfit_driver); + destroy_workqueue(nfit_wq); +} + +module_init(nfit_init); +module_exit(nfit_exit); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h new file mode 100644 index 000000000000..6ecf337c97aa --- /dev/null +++ b/drivers/acpi/nfit/nfit.h @@ -0,0 +1,207 @@ +/* + * NVDIMM Firmware Interface Table - NFIT + * + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef __NFIT_H__ +#define __NFIT_H__ +#include +#include +#include +#include +#include +#include + +/* ACPI 6.1 */ +#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba" + +/* http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf */ +#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66" + +/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */ +#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" +#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e" + +/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ +#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" + +#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ + | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ + | ACPI_NFIT_MEM_NOT_ARMED) + +enum nfit_uuids { + /* for simplicity alias the uuid index with the family id */ + NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL, + NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, + NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, + NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, + NFIT_SPA_VOLATILE, + NFIT_SPA_PM, + NFIT_SPA_DCR, + NFIT_SPA_BDW, + NFIT_SPA_VDISK, + NFIT_SPA_VCD, + NFIT_SPA_PDISK, + NFIT_SPA_PCD, + NFIT_DEV_BUS, + NFIT_UUID_MAX, +}; + +/* + * Region format interface codes are stored as an array of bytes in the + * NFIT DIMM Control Region structure + */ +#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */ +#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */ +#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */ + +enum { + NFIT_BLK_READ_FLUSH = 1, + NFIT_BLK_DCR_LATCH = 2, + NFIT_ARS_STATUS_DONE = 0, + NFIT_ARS_STATUS_BUSY = 1 << 16, + NFIT_ARS_STATUS_NONE = 2 << 16, + NFIT_ARS_STATUS_INTR = 3 << 16, + NFIT_ARS_START_BUSY = 6, + NFIT_ARS_CAP_NONE = 1, + NFIT_ARS_F_OVERFLOW = 1, + NFIT_ARS_TIMEOUT = 90, +}; + +struct nfit_spa { + struct list_head list; + struct nd_region *nd_region; + unsigned int ars_required:1; + u32 clear_err_unit; + u32 max_ars; + struct acpi_nfit_system_address spa[0]; +}; + +struct nfit_dcr { + struct list_head list; + struct acpi_nfit_control_region dcr[0]; +}; + +struct nfit_bdw { + struct list_head list; + struct acpi_nfit_data_region bdw[0]; +}; + +struct nfit_idt { + struct list_head list; + struct acpi_nfit_interleave idt[0]; +}; + +struct nfit_flush { + struct list_head list; + struct acpi_nfit_flush_address flush[0]; +}; + +struct nfit_memdev { + struct list_head list; + struct acpi_nfit_memory_map memdev[0]; +}; + +/* assembled tables for a given dimm/memory-device */ +struct nfit_mem { + struct nvdimm *nvdimm; + struct acpi_nfit_memory_map *memdev_dcr; + struct acpi_nfit_memory_map *memdev_pmem; + struct acpi_nfit_memory_map *memdev_bdw; + struct acpi_nfit_control_region *dcr; + struct acpi_nfit_data_region *bdw; + struct acpi_nfit_system_address *spa_dcr; + struct acpi_nfit_system_address *spa_bdw; + struct acpi_nfit_interleave *idt_dcr; + struct acpi_nfit_interleave *idt_bdw; + struct nfit_flush *nfit_flush; + struct list_head list; + struct acpi_device *adev; + struct acpi_nfit_desc *acpi_desc; + struct resource *flush_wpq; + unsigned long dsm_mask; + int family; +}; + +struct acpi_nfit_desc { + struct nvdimm_bus_descriptor nd_desc; + struct acpi_table_header acpi_header; + struct mutex init_mutex; + struct list_head memdevs; + struct list_head flushes; + struct list_head dimms; + struct list_head spas; + struct list_head dcrs; + struct list_head bdws; + struct list_head idts; + struct nvdimm_bus *nvdimm_bus; + struct device *dev; + struct nd_cmd_ars_status *ars_status; + size_t ars_status_size; + struct work_struct work; + struct kernfs_node *scrub_count_state; + unsigned int scrub_count; + unsigned int cancel:1; + unsigned long dimm_cmd_force_en; + unsigned long bus_cmd_force_en; + int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, + void *iobuf, u64 len, int rw); +}; + +enum nd_blk_mmio_selector { + BDW, + DCR, +}; + +struct nd_blk_addr { + union { + void __iomem *base; + void *aperture; + }; +}; + +struct nfit_blk { + struct nfit_blk_mmio { + struct nd_blk_addr addr; + u64 size; + u64 base_offset; + u32 line_size; + u32 num_lines; + u32 table_size; + struct acpi_nfit_interleave *idt; + struct acpi_nfit_system_address *spa; + } mmio[2]; + struct nd_region *nd_region; + u64 bdw_offset; /* post interleave offset */ + u64 stat_offset; + u64 cmd_offset; + u32 dimm_flags; +}; + +static inline struct acpi_nfit_memory_map *__to_nfit_memdev( + struct nfit_mem *nfit_mem) +{ + if (nfit_mem->memdev_dcr) + return nfit_mem->memdev_dcr; + return nfit_mem->memdev_pmem; +} + +static inline struct acpi_nfit_desc *to_acpi_desc( + struct nvdimm_bus_descriptor *nd_desc) +{ + return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); +} + +const u8 *to_nfit_uuid(enum nfit_uuids id); +int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); +void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); +#endif /* __NFIT_H__ */ -- cgit v1.2.3 From 6839a6d96f4ea0254266d60208c1fbbd53ade546 Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Sat, 23 Jul 2016 21:51:21 -0700 Subject: nfit: do an ARS scrub on hitting a latent media error When a latent (unknown to 'badblocks') error is encountered, it will trigger a machine check exception. On a system with machine check recovery, this will only SIGBUS the process(es) which had the bad page mapped (as opposed to a kernel panic on platforms without machine check recovery features). In the former case, we want to trigger a full rescan of that nvdimm bus. This will allow any additional, new errors to be captured in the block devices' badblocks lists, and offending operations on them can be trapped early, avoiding machine checks. This is done by registering a callback function with the x86_mce_decoder_chain and calling the new ars_rescan functionality with the address in the mce notificatiion. Cc: Rafael J. Wysocki Cc: Tony Luck Signed-off-by: Vishal Verma Signed-off-by: Dan Williams --- drivers/acpi/nfit/Makefile | 1 + drivers/acpi/nfit/core.c | 26 +++++++++++--- drivers/acpi/nfit/mce.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/acpi/nfit/nfit.h | 20 +++++++++++ 4 files changed, 132 insertions(+), 4 deletions(-) create mode 100644 drivers/acpi/nfit/mce.c (limited to 'drivers/acpi') diff --git a/drivers/acpi/nfit/Makefile b/drivers/acpi/nfit/Makefile index eb95c5aff83b..a407e769f103 100644 --- a/drivers/acpi/nfit/Makefile +++ b/drivers/acpi/nfit/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_ACPI_NFIT) := nfit.o nfit-y := core.o +nfit-$(CONFIG_X86_MCE) += mce.o diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 19d0dfdf9633..69b35b7f97a1 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -51,6 +51,9 @@ module_param(disable_vendor_specific, bool, S_IRUGO); MODULE_PARM_DESC(disable_vendor_specific, "Limit commands to the publicly specified set\n"); +LIST_HEAD(acpi_descs); +DEFINE_MUTEX(acpi_desc_lock); + static struct workqueue_struct *nfit_wq; struct nfit_table_prev { @@ -361,7 +364,7 @@ static const char *spa_type_name(u16 type) return to_name[type]; } -static int nfit_spa_type(struct acpi_nfit_system_address *spa) +int nfit_spa_type(struct acpi_nfit_system_address *spa) { int i; @@ -898,8 +901,6 @@ static ssize_t scrub_show(struct device *dev, return rc; } -static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc); - static ssize_t scrub_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { @@ -2400,6 +2401,11 @@ static void acpi_nfit_destruct(void *data) struct acpi_nfit_desc *acpi_desc = data; struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); + /* + * Destruct under acpi_desc_lock so that nfit_handle_mce does not + * race teardown + */ + mutex_lock(&acpi_desc_lock); acpi_desc->cancel = 1; /* * Bounce the nvdimm bus lock to make sure any in-flight @@ -2414,6 +2420,8 @@ static void acpi_nfit_destruct(void *data) sysfs_put(acpi_desc->scrub_count_state); nvdimm_bus_unregister(acpi_desc->nvdimm_bus); acpi_desc->nvdimm_bus = NULL; + list_del(&acpi_desc->list); + mutex_unlock(&acpi_desc_lock); } int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) @@ -2439,6 +2447,11 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); if (rc) return rc; + + /* register this acpi_desc for mce notifications */ + mutex_lock(&acpi_desc_lock); + list_add_tail(&acpi_desc->list, &acpi_descs); + mutex_unlock(&acpi_desc_lock); } mutex_lock(&acpi_desc->init_mutex); @@ -2549,7 +2562,7 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, return 0; } -static int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) { struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; @@ -2598,6 +2611,7 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) INIT_LIST_HEAD(&acpi_desc->flushes); INIT_LIST_HEAD(&acpi_desc->memdevs); INIT_LIST_HEAD(&acpi_desc->dimms); + INIT_LIST_HEAD(&acpi_desc->list); mutex_init(&acpi_desc->init_mutex); INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); } @@ -2750,13 +2764,17 @@ static __init int nfit_init(void) if (!nfit_wq) return -ENOMEM; + nfit_mce_register(); + return acpi_bus_register_driver(&acpi_nfit_driver); } static __exit void nfit_exit(void) { + nfit_mce_unregister(); acpi_bus_unregister_driver(&acpi_nfit_driver); destroy_workqueue(nfit_wq); + WARN_ON(!list_empty(&acpi_descs)); } module_init(nfit_init); diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c new file mode 100644 index 000000000000..4c745bf389fe --- /dev/null +++ b/drivers/acpi/nfit/mce.c @@ -0,0 +1,89 @@ +/* + * NFIT - Machine Check Handler + * + * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include "nfit.h" + +static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *mce = (struct mce *)data; + struct acpi_nfit_desc *acpi_desc; + struct nfit_spa *nfit_spa; + + /* We only care about memory errors */ + if (!(mce->status & MCACOD)) + return NOTIFY_DONE; + + /* + * mce->addr contains the physical addr accessed that caused the + * machine check. We need to walk through the list of NFITs, and see + * if any of them matches that address, and only then start a scrub. + */ + mutex_lock(&acpi_desc_lock); + list_for_each_entry(acpi_desc, &acpi_descs, list) { + struct device *dev = acpi_desc->dev; + int found_match = 0; + + mutex_lock(&acpi_desc->init_mutex); + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + struct acpi_nfit_system_address *spa = nfit_spa->spa; + + if (nfit_spa_type(spa) == NFIT_SPA_PM) + continue; + /* find the spa that covers the mce addr */ + if (spa->address > mce->addr) + continue; + if ((spa->address + spa->length - 1) < mce->addr) + continue; + found_match = 1; + dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n", + __func__, spa->range_index, spa->address, + spa->length); + /* + * We can break at the first match because we're going + * to rescan all the SPA ranges. There shouldn't be any + * aliasing anyway. + */ + break; + } + mutex_unlock(&acpi_desc->init_mutex); + + /* + * We can ignore an -EBUSY here because if an ARS is already + * in progress, just let that be the last authoritative one + */ + if (found_match) + acpi_nfit_ars_rescan(acpi_desc); + } + + mutex_unlock(&acpi_desc_lock); + return NOTIFY_DONE; +} + +static struct notifier_block nfit_mce_dec = { + .notifier_call = nfit_handle_mce, +}; + +void nfit_mce_register(void) +{ + mce_register_decode_chain(&nfit_mce_dec); +} + +void nfit_mce_unregister(void) +{ + mce_unregister_decode_chain(&nfit_mce_dec); +} diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 6ecf337c97aa..ba6074a06958 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -16,6 +16,7 @@ #define __NFIT_H__ #include #include +#include #include #include #include @@ -148,6 +149,7 @@ struct acpi_nfit_desc { struct nd_cmd_ars_status *ars_status; size_t ars_status_size; struct work_struct work; + struct list_head list; struct kernfs_node *scrub_count_state; unsigned int scrub_count; unsigned int cancel:1; @@ -187,6 +189,24 @@ struct nfit_blk { u32 dimm_flags; }; +extern struct list_head acpi_descs; +extern struct mutex acpi_desc_lock; +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc); + +#ifdef CONFIG_X86_MCE +void nfit_mce_register(void); +void nfit_mce_unregister(void); +#else +static inline void nfit_mce_register(void) +{ +} +static inline void nfit_mce_unregister(void) +{ +} +#endif + +int nfit_spa_type(struct acpi_nfit_system_address *spa); + static inline struct acpi_nfit_memory_map *__to_nfit_memdev( struct nfit_mem *nfit_mem) { -- cgit v1.2.3