diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-08 12:05:31 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-08 12:05:31 -0700 |
commit | 23fc02e36e4f657af242e59175c891b27c704935 (patch) | |
tree | 43e7d81637b4f3a6ec2d8e5d754f33dc6171f729 /drivers/s390 | |
parent | 4e3a16ee9148e966678bbc713579235422271a63 (diff) | |
parent | bfa50e1427e4608ce6941d3d0855445fcaa7dbb7 (diff) | |
download | linux-23fc02e36e4f657af242e59175c891b27c704935.tar.gz linux-23fc02e36e4f657af242e59175c891b27c704935.tar.bz2 linux-23fc02e36e4f657af242e59175c891b27c704935.zip |
Merge tag 's390-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik:
- Add support for multi-function devices in pci code.
- Enable PF-VF linking for architectures using the pdev->no_vf_scan
flag (currently just s390).
- Add reipl from NVMe support.
- Get rid of critical section cleanup in entry.S.
- Refactor PNSO CHSC (perform network subchannel operation) in cio and
qeth.
- QDIO interrupts and error handling fixes and improvements, more
refactoring changes.
- Align ioremap() with generic code.
- Accept requests without the prefetch bit set in vfio-ccw.
- Enable path handling via two new regions in vfio-ccw.
- Other small fixes and improvements all over the code.
* tag 's390-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (52 commits)
vfio-ccw: make vfio_ccw_regops variables declarations static
vfio-ccw: Add trace for CRW event
vfio-ccw: Wire up the CRW irq and CRW region
vfio-ccw: Introduce a new CRW region
vfio-ccw: Refactor IRQ handlers
vfio-ccw: Introduce a new schib region
vfio-ccw: Refactor the unregister of the async regions
vfio-ccw: Register a chp_event callback for vfio-ccw
vfio-ccw: Introduce new helper functions to free/destroy regions
vfio-ccw: document possible errors
vfio-ccw: Enable transparent CCW IPL from DASD
s390/pci: Log new handle in clp_disable_fh()
s390/cio, s390/qeth: cleanup PNSO CHSC
s390/qdio: remove q->first_to_kick
s390/qdio: fix up qdio_start_irq() kerneldoc
s390: remove critical section cleanup from entry.S
s390: add machine check SIGP
s390/pci: ioremap() align with generic code
s390/ap: introduce new ap function ap_get_qdev()
Documentation/s390: Update / remove developerWorks web links
...
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/cio/Makefile | 2 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.c | 40 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.h | 50 | ||||
-rw-r--r-- | drivers/s390/cio/device_ops.c | 23 | ||||
-rw-r--r-- | drivers/s390/cio/idset.c | 12 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 16 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 299 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 100 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 61 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_chp.c | 148 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_cp.c | 19 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_drv.c | 165 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_ops.c | 65 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_private.h | 16 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_trace.c | 1 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_trace.h | 30 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.c | 94 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.h | 25 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_card.c | 47 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_queue.c | 10 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l2_main.c | 198 |
21 files changed, 830 insertions, 591 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index 23eae4188876..a9235f111e79 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -21,5 +21,5 @@ qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \ - vfio_ccw_async.o vfio_ccw_trace.o + vfio_ccw_async.o vfio_ccw_trace.o vfio_ccw_chp.o obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 1ca73c2e5a8f..c314e9495c1b 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -57,6 +57,7 @@ int chsc_error_from_response(int response) case 0x0104: return -EINVAL; case 0x0004: + case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */ return -EOPNOTSUPP; case 0x000b: case 0x0107: /* "Channel busy" for the op 0x003d */ @@ -1336,36 +1337,35 @@ out: EXPORT_SYMBOL_GPL(chsc_scm_info); /** - * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info. + * chsc_pnso() - Perform Network-Subchannel Operation * @schid: id of the subchannel on which PNSO is performed - * @brinfo_area: request and response block for the operation + * @pnso_area: request and response block for the operation * @resume_token: resume token for multiblock response * @cnc: Boolean change-notification control * - * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) + * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) * * Returns 0 on success. */ -int chsc_pnso_brinfo(struct subchannel_id schid, - struct chsc_pnso_area *brinfo_area, - struct chsc_brinfo_resume_token resume_token, - int cnc) +int chsc_pnso(struct subchannel_id schid, + struct chsc_pnso_area *pnso_area, + struct chsc_pnso_resume_token resume_token, + int cnc) { - memset(brinfo_area, 0, sizeof(*brinfo_area)); - brinfo_area->request.length = 0x0030; - brinfo_area->request.code = 0x003d; /* network-subchannel operation */ - brinfo_area->m = schid.m; - brinfo_area->ssid = schid.ssid; - brinfo_area->sch = schid.sch_no; - brinfo_area->cssid = schid.cssid; - brinfo_area->oc = 0; /* Store-network-bridging-information list */ - brinfo_area->resume_token = resume_token; - brinfo_area->n = (cnc != 0); - if (chsc(brinfo_area)) + memset(pnso_area, 0, sizeof(*pnso_area)); + pnso_area->request.length = 0x0030; + pnso_area->request.code = 0x003d; /* network-subchannel operation */ + pnso_area->m = schid.m; + pnso_area->ssid = schid.ssid; + pnso_area->sch = schid.sch_no; + pnso_area->cssid = schid.cssid; + pnso_area->oc = 0; /* Store-network-bridging-information list */ + pnso_area->resume_token = resume_token; + pnso_area->n = (cnc != 0); + if (chsc(pnso_area)) return -EIO; - return chsc_error_from_response(brinfo_area->response.code); + return chsc_error_from_response(pnso_area->response.code); } -EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); int chsc_sgib(u32 origin) { diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 34de6d77442c..7ecf7e4c402e 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -205,52 +205,10 @@ struct chsc_scm_info { int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); -struct chsc_brinfo_resume_token { - u64 t1; - u64 t2; -} __packed; - -struct chsc_brinfo_naihdr { - struct chsc_brinfo_resume_token resume_token; - u32:32; - u32 instance; - u32:24; - u8 naids; - u32 reserved[3]; -} __packed; - -struct chsc_pnso_area { - struct chsc_header request; - u8:2; - u8 m:1; - u8:5; - u8:2; - u8 ssid:2; - u8 fmt:4; - u16 sch; - u8:8; - u8 cssid; - u16:16; - u8 oc; - u32:24; - struct chsc_brinfo_resume_token resume_token; - u32 n:1; - u32:31; - u32 reserved[3]; - struct chsc_header response; - u32:32; - struct chsc_brinfo_naihdr naihdr; - union { - struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0]; - struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0]; - struct qdio_brinfo_entry_l2 l2[0]; - } entries; -} __packed __aligned(PAGE_SIZE); - -int chsc_pnso_brinfo(struct subchannel_id schid, - struct chsc_pnso_area *brinfo_area, - struct chsc_brinfo_resume_token resume_token, - int cnc); +int chsc_pnso(struct subchannel_id schid, + struct chsc_pnso_area *pnso_area, + struct chsc_pnso_resume_token resume_token, + int cnc); int __init chsc_get_cssid(int idx); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index ccecf6b9504e..963fcc9054c6 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -710,6 +710,29 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid) } EXPORT_SYMBOL_GPL(ccw_device_get_schid); +/** + * ccw_device_pnso() - Perform Network-Subchannel Operation + * @cdev: device on which PNSO is performed + * @pnso_area: request and response block for the operation + * @resume_token: resume token for multiblock response + * @cnc: Boolean change-notification control + * + * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) + * + * Returns 0 on success. + */ +int ccw_device_pnso(struct ccw_device *cdev, + struct chsc_pnso_area *pnso_area, + struct chsc_pnso_resume_token resume_token, + int cnc) +{ + struct subchannel_id schid; + + ccw_device_get_schid(cdev, &schid); + return chsc_pnso(schid, pnso_area, resume_token, cnc); +} +EXPORT_SYMBOL_GPL(ccw_device_pnso); + /* * Allocate zeroed dma coherent 31 bit addressable memory using * the subchannels dma pool. Maximal size of allocation supported diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 77d0ea7b381b..45f9c0736be4 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c @@ -59,18 +59,6 @@ static inline int idset_contains(struct idset *set, int ssid, int id) return test_bit(ssid * set->num_id + id, set->bitmap); } -static inline int idset_get_first(struct idset *set, int *ssid, int *id) -{ - int bitnum; - - bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); - if (bitnum >= set->num_ssid * set->num_id) - return 0; - *ssid = bitnum / set->num_id; - *id = bitnum % set->num_id; - return 1; -} - struct idset *idset_sch_new(void) { return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index b8453b594679..eb13c479e11d 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -221,9 +221,6 @@ struct qdio_q { */ int first_to_check; - /* beginning position for calling the program */ - int first_to_kick; - /* number of buffers in use by the adapter */ atomic_t nr_buf_used; @@ -292,6 +289,8 @@ struct qdio_irq { struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; + unsigned int max_input_qs; + unsigned int max_output_qs; void (*irq_poll)(struct ccw_device *cdev, unsigned long data); unsigned long poll_state; @@ -364,16 +363,13 @@ static inline int multicast_outbound(struct qdio_q *q) extern u64 last_ai_time; /* prototypes for thin interrupt */ -void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); void tiqdio_add_device(struct qdio_irq *irq_ptr); void tiqdio_remove_device(struct qdio_irq *irq_ptr); void tiqdio_inbound_processing(unsigned long q); -int tiqdio_allocate_memory(void); -void tiqdio_free_memory(void); -int tiqdio_register_thinints(void); -void tiqdio_unregister_thinints(void); +int qdio_thinint_init(void); +void qdio_thinint_exit(void); int test_nonshared_ind(struct qdio_irq *); /* prototypes for setup */ @@ -389,8 +385,10 @@ int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, struct subchannel_id *schid, struct qdio_ssqd_desc *data); int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data); +void qdio_shutdown_irq(struct qdio_irq *irq); void qdio_print_subchannel_info(struct qdio_irq *irq_ptr); -void qdio_release_memory(struct qdio_irq *irq_ptr); +void qdio_free_queues(struct qdio_irq *irq_ptr); +void qdio_free_async_data(struct qdio_irq *irq_ptr); int qdio_setup_init(void); void qdio_setup_exit(void); int qdio_enable_async_operation(struct qdio_output_q *q); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index bcc3ab14e72d..610c05f59589 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -143,7 +143,7 @@ again: DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr, - q->first_to_kick, count, q->irq_ptr->int_parm); + q->first_to_check, count, q->irq_ptr->int_parm); return 0; } } @@ -191,7 +191,7 @@ again: DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr, - q->first_to_kick, count, q->irq_ptr->int_parm); + q->first_to_check, count, q->irq_ptr->int_parm); return 0; } } @@ -438,15 +438,12 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start, q->sbal[start]->element[15].sflags); } -static inline void inbound_primed(struct qdio_q *q, unsigned int start, - int count) +static inline void inbound_handle_work(struct qdio_q *q, unsigned int start, + int count, bool auto_ack) { int new; - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count); - - /* for QEBSM the ACK was already set by EQBS */ - if (is_qebsm(q)) { + if (auto_ack) { if (!q->u.in.ack_count) { q->u.in.ack_count = count; q->u.in.ack_start = start; @@ -466,15 +463,14 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start, * or by the next inbound run. */ new = add_buf(start, count - 1); - if (q->u.in.ack_count) { - /* reset the previous ACK but first set the new one */ - set_buf_state(q, new, SLSB_P_INPUT_ACK); - set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); - } else { - q->u.in.ack_count = 1; - set_buf_state(q, new, SLSB_P_INPUT_ACK); - } + set_buf_state(q, new, SLSB_P_INPUT_ACK); + + /* delete the previous ACKs */ + if (q->u.in.ack_count) + set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, + q->u.in.ack_count); + q->u.in.ack_count = 1; q->u.in.ack_start = new; count--; if (!count) @@ -508,19 +504,21 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) switch (state) { case SLSB_P_INPUT_PRIMED: - inbound_primed(q, start, count); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, + count); + + inbound_handle_work(q, start, count, is_qebsm(q)); if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); return count; case SLSB_P_INPUT_ERROR: + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr, + count); + process_buffer_error(q, start, count); - /* - * Interrupts may be avoided as long as the error is present - * so change the buffer state immediately to avoid starvation. - */ - set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count); + inbound_handle_work(q, start, count, false); if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) @@ -624,10 +622,9 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, return phys_aob; } -static void qdio_kick_handler(struct qdio_q *q, unsigned int count) +static void qdio_kick_handler(struct qdio_q *q, unsigned int start, + unsigned int count) { - int start = q->first_to_kick; - if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; @@ -644,7 +641,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count) q->irq_ptr->int_parm); /* for the next time */ - q->first_to_kick = add_buf(start, count); q->qdio_error = 0; } @@ -668,9 +664,9 @@ static void __qdio_inbound_processing(struct qdio_q *q) if (count == 0) return; + qdio_kick_handler(q, start, count); start = add_buf(start, count); q->first_to_check = start; - qdio_kick_handler(q, count); if (!qdio_inbound_q_done(q, start)) { /* means poll time is not yet over */ @@ -826,7 +822,7 @@ static void __qdio_outbound_processing(struct qdio_q *q) count = qdio_outbound_q_moved(q, start); if (count) { q->first_to_check = add_buf(start, count); - qdio_kick_handler(q, count); + qdio_kick_handler(q, start, count); } if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) && @@ -880,47 +876,17 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) qdio_tasklet_schedule(out); } -static void __tiqdio_inbound_processing(struct qdio_q *q) +void tiqdio_inbound_processing(unsigned long data) { - unsigned int start = q->first_to_check; - int count; + struct qdio_q *q = (struct qdio_q *)data; - qperf_inc(q, tasklet_inbound); if (need_siga_sync(q) && need_siga_sync_after_ai(q)) qdio_sync_queues(q); /* The interrupt could be caused by a PCI request: */ qdio_check_outbound_pci_queues(q->irq_ptr); - count = qdio_inbound_q_moved(q, start); - if (count == 0) - return; - - start = add_buf(start, count); - q->first_to_check = start; - qdio_kick_handler(q, count); - - if (!qdio_inbound_q_done(q, start)) { - qperf_inc(q, tasklet_inbound_resched); - if (!qdio_tasklet_schedule(q)) - return; - } - - qdio_stop_polling(q); - /* - * We need to check again to not lose initiative after - * resetting the ACK state. - */ - if (!qdio_inbound_q_done(q, start)) { - qperf_inc(q, tasklet_inbound_resched2); - qdio_tasklet_schedule(q); - } -} - -void tiqdio_inbound_processing(unsigned long data) -{ - struct qdio_q *q = (struct qdio_q *)data; - __tiqdio_inbound_processing(q); + __qdio_inbound_processing(q); } static inline void qdio_set_state(struct qdio_irq *irq_ptr, @@ -977,7 +943,6 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_q *q; - int count; DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); DBF_ERROR("intp :%lx", intparm); @@ -992,9 +957,8 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, goto no_handler; } - count = sub_buf(q->first_to_check, q->first_to_kick); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, - q->nr, q->first_to_kick, count, irq_ptr->int_parm); + q->nr, q->first_to_check, 0, irq_ptr->int_parm); no_handler: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); /* @@ -1154,35 +1118,27 @@ int qdio_shutdown(struct ccw_device *cdev, int how) /* cleanup subchannel */ spin_lock_irq(get_ccwdev_lock(cdev)); - + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); else /* default behaviour is halt */ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4d", rc); goto no_cleanup; } - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - spin_unlock_irq(get_ccwdev_lock(cdev)); wait_event_interruptible_timeout(cdev->private->wait_q, irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || irq_ptr->state == QDIO_IRQ_STATE_ERR, 10 * HZ); - spin_lock_irq(get_ccwdev_lock(cdev)); no_cleanup: qdio_shutdown_thinint(irq_ptr); - - /* restore interrupt handler */ - if ((void *)cdev->handler == (void *)qdio_int_handler) { - cdev->handler = irq_ptr->orig_handler; - cdev->private->intparm = 0; - } - spin_unlock_irq(get_ccwdev_lock(cdev)); + qdio_shutdown_irq(irq_ptr); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); mutex_unlock(&irq_ptr->setup_mutex); @@ -1213,7 +1169,11 @@ int qdio_free(struct ccw_device *cdev) cdev->private->qdio_data = NULL; mutex_unlock(&irq_ptr->setup_mutex); - qdio_release_memory(irq_ptr); + qdio_free_async_data(irq_ptr); + qdio_free_queues(irq_ptr); + free_page((unsigned long) irq_ptr->qdr); + free_page(irq_ptr->chsc_page); + free_page((unsigned long) irq_ptr); return 0; } EXPORT_SYMBOL_GPL(qdio_free); @@ -1229,6 +1189,7 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, { struct subchannel_id schid; struct qdio_irq *irq_ptr; + int rc = -ENOMEM; ccw_device_get_schid(cdev, &schid); DBF_EVENT("qallocate:%4x", schid.sch_no); @@ -1240,12 +1201,12 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!irq_ptr) - goto out_err; + return -ENOMEM; irq_ptr->cdev = cdev; mutex_init(&irq_ptr->setup_mutex); if (qdio_allocate_dbf(irq_ptr)) - goto out_rel; + goto err_dbf; DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs, no_output_qs); @@ -1258,24 +1219,30 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, */ irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); if (!irq_ptr->chsc_page) - goto out_rel; + goto err_chsc; /* qdr is used in ccw1.cda which is u32 */ irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!irq_ptr->qdr) - goto out_rel; + goto err_qdr; - if (qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs)) - goto out_rel; + rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs); + if (rc) + goto err_queues; INIT_LIST_HEAD(&irq_ptr->entry); cdev->private->qdio_data = irq_ptr; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); return 0; -out_rel: - qdio_release_memory(irq_ptr); -out_err: - return -ENOMEM; + +err_queues: + free_page((unsigned long) irq_ptr->qdr); +err_qdr: + free_page(irq_ptr->chsc_page); +err_chsc: +err_dbf: + free_page((unsigned long) irq_ptr); + return rc; } EXPORT_SYMBOL_GPL(qdio_allocate); @@ -1338,6 +1305,10 @@ int qdio_establish(struct ccw_device *cdev, if (!irq_ptr) return -ENODEV; + if (init_data->no_input_qs > irq_ptr->max_input_qs || + init_data->no_output_qs > irq_ptr->max_output_qs) + return -EINVAL; + if ((init_data->no_input_qs && !init_data->input_handler) || (init_data->no_output_qs && !init_data->output_handler)) return -EINVAL; @@ -1352,8 +1323,8 @@ int qdio_establish(struct ccw_device *cdev, rc = qdio_establish_thinint(irq_ptr); if (rc) { + qdio_shutdown_irq(irq_ptr); mutex_unlock(&irq_ptr->setup_mutex); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); return rc; } @@ -1371,8 +1342,9 @@ int qdio_establish(struct ccw_device *cdev, if (rc) { DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); + qdio_shutdown_thinint(irq_ptr); + qdio_shutdown_irq(irq_ptr); mutex_unlock(&irq_ptr->setup_mutex); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); return rc; } @@ -1460,25 +1432,6 @@ out: } EXPORT_SYMBOL_GPL(qdio_activate); -static inline int buf_in_between(int bufnr, int start, int count) -{ - int end = add_buf(start, count); - - if (end > start) { - if (bufnr >= start && bufnr < end) - return 1; - else - return 0; - } - - /* wrap-around case */ - if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || - (bufnr < end)) - return 1; - else - return 0; -} - /** * handle_inbound - reset processed input buffers * @q: queue containing the buffers @@ -1489,36 +1442,18 @@ static inline int buf_in_between(int bufnr, int start, int count) static int handle_inbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { - int diff; + int overlap; qperf_inc(q, inbound_call); - if (!q->u.in.ack_count) - goto set; - - /* protect against stop polling setting an ACK for an emptied slsb */ - if (count == QDIO_MAX_BUFFERS_PER_Q) { - /* overwriting everything, just delete polling status */ - q->u.in.ack_count = 0; - goto set; - } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { - if (is_qebsm(q)) { - /* partial overwrite, just update ack_start */ - diff = add_buf(bufnr, count); - diff = sub_buf(diff, q->u.in.ack_start); - q->u.in.ack_count -= diff; - if (q->u.in.ack_count <= 0) { - q->u.in.ack_count = 0; - goto set; - } - q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); - } else { - /* the only ACK will be deleted */ - q->u.in.ack_count = 0; - } + /* If any ACKed SBALs are returned to HW, adjust ACK tracking: */ + overlap = min(count - sub_buf(q->u.in.ack_start, bufnr), + q->u.in.ack_count); + if (overlap > 0) { + q->u.in.ack_start = add_buf(q->u.in.ack_start, overlap); + q->u.in.ack_count -= overlap; } -set: count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); atomic_add(count, &q->nr_buf_used); @@ -1627,7 +1562,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, EXPORT_SYMBOL_GPL(do_QDIO); /** - * qdio_start_irq - process input buffers + * qdio_start_irq - enable interrupt processing for the device * @cdev: associated ccw_device for the qdio subchannel * * Return codes @@ -1770,94 +1705,6 @@ int qdio_stop_irq(struct ccw_device *cdev) } EXPORT_SYMBOL(qdio_stop_irq); -/** - * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info. - * @schid: Subchannel ID. - * @cnc: Boolean Change-Notification Control - * @response: Response code will be stored at this address - * @cb: Callback function will be executed for each element - * of the address list - * @priv: Pointer to pass to the callback function. - * - * Performs "Store-network-bridging-information list" operation and calls - * the callback function for every entry in the list. If "change- - * notification-control" is set, further changes in the address list - * will be reported via the IPA command. - */ -int qdio_pnso_brinfo(struct subchannel_id schid, - int cnc, u16 *response, - void (*cb)(void *priv, enum qdio_brinfo_entry_type type, - void *entry), - void *priv) -{ - struct chsc_pnso_area *rr; - int rc; - u32 prev_instance = 0; - int isfirstblock = 1; - int i, size, elems; - - rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); - if (rr == NULL) - return -ENOMEM; - do { - /* on the first iteration, naihdr.resume_token will be zero */ - rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc); - if (rc != 0 && rc != -EBUSY) - goto out; - if (rr->response.code != 1) { - rc = -EIO; - continue; - } else - rc = 0; - - if (cb == NULL) - continue; - - size = rr->naihdr.naids; - elems = (rr->response.length - - sizeof(struct chsc_header) - - sizeof(struct chsc_brinfo_naihdr)) / - size; - - if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { - /* Inform the caller that they need to scrap */ - /* the data that was already reported via cb */ - rc = -EAGAIN; - break; - } - isfirstblock = 0; - prev_instance = rr->naihdr.instance; - for (i = 0; i < elems; i++) - switch (size) { - case sizeof(struct qdio_brinfo_entry_l3_ipv6): - (*cb)(priv, l3_ipv6_addr, - &rr->entries.l3_ipv6[i]); - break; - case sizeof(struct qdio_brinfo_entry_l3_ipv4): - (*cb)(priv, l3_ipv4_addr, - &rr->entries.l3_ipv4[i]); - break; - case sizeof(struct qdio_brinfo_entry_l2): - (*cb)(priv, l2_addr_lnid, - &rr->entries.l2[i]); - break; - default: - WARN_ON_ONCE(1); - rc = -EIO; - goto out; - } - } while (rr->response.code == 0x0107 || /* channel busy */ - (rr->response.code == 1 && /* list stored */ - /* resume token is non-zero => list incomplete */ - (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); - (*response) = rr->response.code; - -out: - free_page((unsigned long)rr); - return rc; -} -EXPORT_SYMBOL_GPL(qdio_pnso_brinfo); - static int __init init_QDIO(void) { int rc; @@ -1868,16 +1715,11 @@ static int __init init_QDIO(void) rc = qdio_setup_init(); if (rc) goto out_debug; - rc = tiqdio_allocate_memory(); + rc = qdio_thinint_init(); if (rc) goto out_cache; - rc = tiqdio_register_thinints(); - if (rc) - goto out_ti; return 0; -out_ti: - tiqdio_free_memory(); out_cache: qdio_setup_exit(); out_debug: @@ -1887,8 +1729,7 @@ out_debug: static void __exit exit_QDIO(void) { - tiqdio_unregister_thinints(); - tiqdio_free_memory(); + qdio_thinint_exit(); qdio_setup_exit(); qdio_debug_exit(); } diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 3083edd61f0c..2c5cc6ec668e 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -135,6 +135,27 @@ output: } } +static void __qdio_free_queues(struct qdio_q **queues, unsigned int count) +{ + struct qdio_q *q; + unsigned int i; + + for (i = 0; i < count; i++) { + q = queues[i]; + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } +} + +void qdio_free_queues(struct qdio_irq *irq_ptr) +{ + __qdio_free_queues(irq_ptr->input_qs, irq_ptr->max_input_qs); + irq_ptr->max_input_qs = 0; + + __qdio_free_queues(irq_ptr->output_qs, irq_ptr->max_output_qs); + irq_ptr->max_output_qs = 0; +} + static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) { struct qdio_q *q; @@ -142,12 +163,15 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) for (i = 0; i < nr_queues; i++) { q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL); - if (!q) + if (!q) { + __qdio_free_queues(irq_ptr_qs, i); return -ENOMEM; + } q->slib = (struct slib *) __get_free_page(GFP_KERNEL); if (!q->slib) { kmem_cache_free(qdio_q_cache, q); + __qdio_free_queues(irq_ptr_qs, i); return -ENOMEM; } irq_ptr_qs[i] = q; @@ -162,8 +186,16 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); if (rc) return rc; + rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); - return rc; + if (rc) { + __qdio_free_queues(irq_ptr->input_qs, nr_input_qs); + return rc; + } + + irq_ptr->max_input_qs = nr_input_qs; + irq_ptr->max_output_qs = nr_output_qs; + return 0; } static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, @@ -347,45 +379,28 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac); } -void qdio_release_memory(struct qdio_irq *irq_ptr) +void qdio_free_async_data(struct qdio_irq *irq_ptr) { struct qdio_q *q; int i; - /* - * Must check queue array manually since irq_ptr->nr_input_queues / - * irq_ptr->nr_input_queues may not yet be set. - */ - for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { - q = irq_ptr->input_qs[i]; - if (q) { - free_page((unsigned long) q->slib); - kmem_cache_free(qdio_q_cache, q); - } - } - for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + for (i = 0; i < irq_ptr->max_output_qs; i++) { q = irq_ptr->output_qs[i]; - if (q) { - if (q->u.out.use_cq) { - int n; - - for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { - struct qaob *aob = q->u.out.aobs[n]; - if (aob) { - qdio_release_aob(aob); - q->u.out.aobs[n] = NULL; - } - } + if (q->u.out.use_cq) { + unsigned int n; + + for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) { + struct qaob *aob = q->u.out.aobs[n]; - qdio_disable_async_operation(&q->u.out); + if (aob) { + qdio_release_aob(aob); + q->u.out.aobs[n] = NULL; + } } - free_page((unsigned long) q->slib); - kmem_cache_free(qdio_q_cache, q); + + qdio_disable_async_operation(&q->u.out); } } - free_page((unsigned long) irq_ptr->qdr); - free_page(irq_ptr->chsc_page); - free_page((unsigned long) irq_ptr); } static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, @@ -480,7 +495,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) } setup_qib(irq_ptr, init_data); - qdio_setup_thinint(irq_ptr); set_impl_params(irq_ptr, init_data->qib_param_field_format, init_data->qib_param_field, init_data->input_slib_elements, @@ -491,6 +505,12 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ + /* set our IRQ handler */ + spin_lock_irq(get_ccwdev_lock(cdev)); + irq_ptr->orig_handler = cdev->handler; + cdev->handler = qdio_int_handler; + spin_unlock_irq(get_ccwdev_lock(cdev)); + /* get qdio commands */ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE); if (!ciw) { @@ -506,12 +526,18 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) } irq_ptr->aqueue = *ciw; - /* set new interrupt handler */ + return 0; +} + +void qdio_shutdown_irq(struct qdio_irq *irq) +{ + struct ccw_device *cdev = irq->cdev; + + /* restore IRQ handler */ spin_lock_irq(get_ccwdev_lock(cdev)); - irq_ptr->orig_handler = cdev->handler; - cdev->handler = qdio_int_handler; + cdev->handler = irq->orig_handler; + cdev->private->intparm = 0; spin_unlock_irq(get_ccwdev_lock(cdev)); - return 0; } void qdio_print_subchannel_info(struct qdio_irq *irq_ptr) diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index ae50373617cd..7a440e4328cd 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -197,47 +197,21 @@ out: return rc; } -/* allocate non-shared indicators and shared indicator */ -int __init tiqdio_allocate_memory(void) -{ - q_indicators = kcalloc(TIQDIO_NR_INDICATORS, - sizeof(struct indicator_t), - GFP_KERNEL); - if (!q_indicators) - return -ENOMEM; - return 0; -} - -void tiqdio_free_memory(void) -{ - kfree(q_indicators); -} - -int __init tiqdio_register_thinints(void) +int qdio_establish_thinint(struct qdio_irq *irq_ptr) { int rc; - rc = register_adapter_interrupt(&tiqdio_airq); - if (rc) { - DBF_EVENT("RTI:%x", rc); - return rc; - } - return 0; -} - -int qdio_establish_thinint(struct qdio_irq *irq_ptr) -{ if (!is_thinint_irq(irq_ptr)) return 0; - return set_subchannel_ind(irq_ptr, 0); -} -void qdio_setup_thinint(struct qdio_irq *irq_ptr) -{ - if (!is_thinint_irq(irq_ptr)) - return; irq_ptr->dsci = get_indicator(); DBF_HEX(&irq_ptr->dsci, sizeof(void *)); + + rc = set_subchannel_ind(irq_ptr, 0); + if (rc) + put_indicator(irq_ptr->dsci); + + return rc; } void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) @@ -250,8 +224,27 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) put_indicator(irq_ptr->dsci); } -void __exit tiqdio_unregister_thinints(void) +int __init qdio_thinint_init(void) +{ + int rc; + + q_indicators = kcalloc(TIQDIO_NR_INDICATORS, sizeof(struct indicator_t), + GFP_KERNEL); + if (!q_indicators) + return -ENOMEM; + + rc = register_adapter_interrupt(&tiqdio_airq); + if (rc) { + DBF_EVENT("RTI:%x", rc); + kfree(q_indicators); + return rc; + } + return 0; +} + +void __exit qdio_thinint_exit(void) { WARN_ON(!list_empty(&tiq_list)); unregister_adapter_interrupt(&tiqdio_airq); + kfree(q_indicators); } diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c new file mode 100644 index 000000000000..a646fc81c872 --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_chp.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Channel path related status regions for vfio_ccw + * + * Copyright IBM Corp. 2020 + * + * Author(s): Farhan Ali <alifm@linux.ibm.com> + * Eric Farman <farman@linux.ibm.com> + */ + +#include <linux/vfio.h> +#include "vfio_ccw_private.h" + +static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private, + char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS; + loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK; + struct ccw_schib_region *region; + int ret; + + if (pos + count > sizeof(*region)) + return -EINVAL; + + mutex_lock(&private->io_mutex); + region = private->region[i].data; + + if (cio_update_schib(private->sch)) { + ret = -ENODEV; + goto out; + } + + memcpy(region, &private->sch->schib, sizeof(*region)); + + if (copy_to_user(buf, (void *)region + pos, count)) { + ret = -EFAULT; + goto out; + } + + ret = count; + +out: + mutex_unlock(&private->io_mutex); + return ret; +} + +static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private, + const char __user *buf, size_t count, + loff_t *ppos) +{ + return -EINVAL; +} + + +static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private, + struct vfio_ccw_region *region) +{ + +} + +static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = { + .read = vfio_ccw_schib_region_read, + .write = vfio_ccw_schib_region_write, + .release = vfio_ccw_schib_region_release, +}; + +int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private) +{ + return vfio_ccw_register_dev_region(private, + VFIO_REGION_SUBTYPE_CCW_SCHIB, + &vfio_ccw_schib_region_ops, + sizeof(struct ccw_schib_region), + VFIO_REGION_INFO_FLAG_READ, + private->schib_region); +} + +static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private, + char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS; + loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK; + struct ccw_crw_region *region; + struct vfio_ccw_crw *crw; + int ret; + + if (pos + count > sizeof(*region)) + return -EINVAL; + + crw = list_first_entry_or_null(&private->crw, + struct vfio_ccw_crw, next); + + if (crw) + list_del(&crw->next); + + mutex_lock(&private->io_mutex); + region = private->region[i].data; + + if (crw) + memcpy(®ion->crw, &crw->crw, sizeof(region->crw)); + + if (copy_to_user(buf, (void *)region + pos, count)) + ret = -EFAULT; + else + ret = count; + + region->crw = 0; + + mutex_unlock(&private->io_mutex); + + kfree(crw); + + /* Notify the guest if more CRWs are on our queue */ + if (!list_empty(&private->crw) && private->crw_trigger) + eventfd_signal(private->crw_trigger, 1); + + return ret; +} + +static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private, + const char __user *buf, size_t count, + loff_t *ppos) +{ + return -EINVAL; +} + +static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private, + struct vfio_ccw_region *region) +{ + +} + +static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = { + .read = vfio_ccw_crw_region_read, + .write = vfio_ccw_crw_region_write, + .release = vfio_ccw_crw_region_release, +}; + +int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private) +{ + return vfio_ccw_register_dev_region(private, + VFIO_REGION_SUBTYPE_CCW_CRW, + &vfio_ccw_crw_region_ops, + sizeof(struct ccw_crw_region), + VFIO_REGION_INFO_FLAG_READ, + private->crw_region); +} diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 3645d1720c4b..b9febc581b1f 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -8,6 +8,7 @@ * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> */ +#include <linux/ratelimit.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/iommu.h> @@ -625,23 +626,27 @@ static int ccwchain_fetch_one(struct ccwchain *chain, * the target channel program from @orb->cmd.iova to the new ccwchain(s). * * Limitations: - * 1. Supports only prefetch enabled mode. - * 2. Supports idal(c64) ccw chaining. - * 3. Supports 4k idaw. + * 1. Supports idal(c64) ccw chaining. + * 2. Supports 4k idaw. * * Returns: * %0 on success and a negative error value on failure. */ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) { + /* custom ratelimit used to avoid flood during guest IPL */ + static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1); int ret; /* - * XXX: - * Only support prefetch enable mode now. + * We only support prefetching the channel program. We assume all channel + * programs executed by supported guests likewise support prefetching. + * Executing a channel program that does not specify prefetching will + * typically not cause an error, but a warning is issued to help identify + * the problem if something does break. */ - if (!orb->cmd.pfch) - return -EOPNOTSUPP; + if (!orb->cmd.pfch && __ratelimit(&ratelimit_state)) + dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB"); INIT_LIST_HEAD(&cp->ccwchain_list); memcpy(&cp->orb, orb, sizeof(*orb)); diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 339a6bc0339b..8c625b530035 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -19,6 +19,7 @@ #include <asm/isc.h> +#include "chp.h" #include "ioasm.h" #include "css.h" #include "vfio_ccw_private.h" @@ -26,6 +27,8 @@ struct workqueue_struct *vfio_ccw_work_q; static struct kmem_cache *vfio_ccw_io_region; static struct kmem_cache *vfio_ccw_cmd_region; +static struct kmem_cache *vfio_ccw_schib_region; +static struct kmem_cache *vfio_ccw_crw_region; debug_info_t *vfio_ccw_debug_msg_id; debug_info_t *vfio_ccw_debug_trace_id; @@ -105,6 +108,16 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) eventfd_signal(private->io_trigger, 1); } +static void vfio_ccw_crw_todo(struct work_struct *work) +{ + struct vfio_ccw_private *private; + + private = container_of(work, struct vfio_ccw_private, crw_work); + + if (!list_empty(&private->crw) && private->crw_trigger) + eventfd_signal(private->crw_trigger, 1); +} + /* * Css driver callbacks */ @@ -116,6 +129,18 @@ static void vfio_ccw_sch_irq(struct subchannel *sch) vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); } +static void vfio_ccw_free_regions(struct vfio_ccw_private *private) +{ + if (private->crw_region) + kmem_cache_free(vfio_ccw_crw_region, private->crw_region); + if (private->schib_region) + kmem_cache_free(vfio_ccw_schib_region, private->schib_region); + if (private->cmd_region) + kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); + if (private->io_region) + kmem_cache_free(vfio_ccw_io_region, private->io_region); +} + static int vfio_ccw_sch_probe(struct subchannel *sch) { struct pmcw *pmcw = &sch->schib.pmcw; @@ -147,6 +172,18 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (!private->cmd_region) goto out_free; + private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, + GFP_KERNEL | GFP_DMA); + + if (!private->schib_region) + goto out_free; + + private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, + GFP_KERNEL | GFP_DMA); + + if (!private->crw_region) + goto out_free; + private->sch = sch; dev_set_drvdata(&sch->dev, private); mutex_init(&private->io_mutex); @@ -159,7 +196,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (ret) goto out_free; + INIT_LIST_HEAD(&private->crw); INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); + INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); atomic_set(&private->avail, 1); private->state = VFIO_CCW_STATE_STANDBY; @@ -181,10 +220,7 @@ out_disable: cio_disable_subchannel(sch); out_free: dev_set_drvdata(&sch->dev, NULL); - if (private->cmd_region) - kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); - if (private->io_region) - kmem_cache_free(vfio_ccw_io_region, private->io_region); + vfio_ccw_free_regions(private); kfree(private->cp.guest_cp); kfree(private); return ret; @@ -193,15 +229,20 @@ out_free: static int vfio_ccw_sch_remove(struct subchannel *sch) { struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + struct vfio_ccw_crw *crw, *temp; vfio_ccw_sch_quiesce(sch); + list_for_each_entry_safe(crw, temp, &private->crw, next) { + list_del(&crw->next); + kfree(crw); + } + vfio_ccw_mdev_unreg(sch); dev_set_drvdata(&sch->dev, NULL); - kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); - kmem_cache_free(vfio_ccw_io_region, private->io_region); + vfio_ccw_free_regions(private); kfree(private->cp.guest_cp); kfree(private); @@ -258,6 +299,83 @@ out_unlock: return rc; } +static void vfio_ccw_queue_crw(struct vfio_ccw_private *private, + unsigned int rsc, + unsigned int erc, + unsigned int rsid) +{ + struct vfio_ccw_crw *crw; + + /* + * If unable to allocate a CRW, just drop the event and + * carry on. The guest will either see a later one or + * learn when it issues its own store subchannel. + */ + crw = kzalloc(sizeof(*crw), GFP_ATOMIC); + if (!crw) + return; + + /* + * Build the CRW based on the inputs given to us. + */ + crw->crw.rsc = rsc; + crw->crw.erc = erc; + crw->crw.rsid = rsid; + + list_add_tail(&crw->next, &private->crw); + queue_work(vfio_ccw_work_q, &private->crw_work); +} + +static int vfio_ccw_chp_event(struct subchannel *sch, + struct chp_link *link, int event) +{ + struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + int mask = chp_ssd_get_mask(&sch->ssd_info, link); + int retry = 255; + + if (!private || !mask) + return 0; + + trace_vfio_ccw_chp_event(private->sch->schid, mask, event); + VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n", + mdev_uuid(private->mdev), sch->schid.cssid, + sch->schid.ssid, sch->schid.sch_no, + mask, event); + + if (cio_update_schib(sch)) + return -ENODEV; + + switch (event) { + case CHP_VARY_OFF: + /* Path logically turned off */ + sch->opm &= ~mask; + sch->lpm &= ~mask; + if (sch->schib.pmcw.lpum & mask) + cio_cancel_halt_clear(sch, &retry); + break; + case CHP_OFFLINE: + /* Path is gone */ + if (sch->schib.pmcw.lpum & mask) + cio_cancel_halt_clear(sch, &retry); + vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN, + link->chpid.id); + break; + case CHP_VARY_ON: + /* Path logically turned on */ + sch->opm |= mask; + sch->lpm |= mask; + break; + case CHP_ONLINE: + /* Path became available */ + sch->lpm |= mask & sch->opm; + vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT, + link->chpid.id); + break; + } + + return 0; +} + static struct css_device_id vfio_ccw_sch_ids[] = { { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, { /* end of list */ }, @@ -275,6 +393,7 @@ static struct css_driver vfio_ccw_sch_driver = { .remove = vfio_ccw_sch_remove, .shutdown = vfio_ccw_sch_shutdown, .sch_event = vfio_ccw_sch_event, + .chp_event = vfio_ccw_chp_event, }; static int __init vfio_ccw_debug_init(void) @@ -304,6 +423,14 @@ static void vfio_ccw_debug_exit(void) debug_unregister(vfio_ccw_debug_trace_id); } +static void vfio_ccw_destroy_regions(void) +{ + kmem_cache_destroy(vfio_ccw_crw_region); + kmem_cache_destroy(vfio_ccw_schib_region); + kmem_cache_destroy(vfio_ccw_cmd_region); + kmem_cache_destroy(vfio_ccw_io_region); +} + static int __init vfio_ccw_sch_init(void) { int ret; @@ -336,6 +463,26 @@ static int __init vfio_ccw_sch_init(void) goto out_err; } + vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region", + sizeof(struct ccw_schib_region), 0, + SLAB_ACCOUNT, 0, + sizeof(struct ccw_schib_region), NULL); + + if (!vfio_ccw_schib_region) { + ret = -ENOMEM; + goto out_err; + } + + vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region", + sizeof(struct ccw_crw_region), 0, + SLAB_ACCOUNT, 0, + sizeof(struct ccw_crw_region), NULL); + + if (!vfio_ccw_crw_region) { + ret = -ENOMEM; + goto out_err; + } + isc_register(VFIO_CCW_ISC); ret = css_driver_register(&vfio_ccw_sch_driver); if (ret) { @@ -346,8 +493,7 @@ static int __init vfio_ccw_sch_init(void) return ret; out_err: - kmem_cache_destroy(vfio_ccw_cmd_region); - kmem_cache_destroy(vfio_ccw_io_region); + vfio_ccw_destroy_regions(); destroy_workqueue(vfio_ccw_work_q); vfio_ccw_debug_exit(); return ret; @@ -357,8 +503,7 @@ static void __exit vfio_ccw_sch_exit(void) { css_driver_unregister(&vfio_ccw_sch_driver); isc_unregister(VFIO_CCW_ISC); - kmem_cache_destroy(vfio_ccw_io_region); - kmem_cache_destroy(vfio_ccw_cmd_region); + vfio_ccw_destroy_regions(); destroy_workqueue(vfio_ccw_work_q); vfio_ccw_debug_exit(); } diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index f0d71ab77c50..8b3ed5b45277 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -172,8 +172,22 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev) ret = vfio_ccw_register_async_dev_regions(private); if (ret) - vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, - &private->nb); + goto out_unregister; + + ret = vfio_ccw_register_schib_dev_regions(private); + if (ret) + goto out_unregister; + + ret = vfio_ccw_register_crw_dev_regions(private); + if (ret) + goto out_unregister; + + return ret; + +out_unregister: + vfio_ccw_unregister_dev_regions(private); + vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + &private->nb); return ret; } @@ -181,7 +195,6 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev) { struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev)); - int i; if ((private->state != VFIO_CCW_STATE_NOT_OPER) && (private->state != VFIO_CCW_STATE_STANDBY)) { @@ -191,15 +204,9 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev) } cp_free(&private->cp); + vfio_ccw_unregister_dev_regions(private); vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &private->nb); - - for (i = 0; i < private->num_regions; i++) - private->region[i].ops->release(private, &private->region[i]); - - private->num_regions = 0; - kfree(private->region); - private->region = NULL; } static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private, @@ -384,17 +391,22 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) { - if (info->index != VFIO_CCW_IO_IRQ_INDEX) + switch (info->index) { + case VFIO_CCW_IO_IRQ_INDEX: + case VFIO_CCW_CRW_IRQ_INDEX: + info->count = 1; + info->flags = VFIO_IRQ_INFO_EVENTFD; + break; + default: return -EINVAL; - - info->count = 1; - info->flags = VFIO_IRQ_INFO_EVENTFD; + } return 0; } static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev, uint32_t flags, + uint32_t index, void __user *data) { struct vfio_ccw_private *private; @@ -404,7 +416,17 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev, return -EINVAL; private = dev_get_drvdata(mdev_parent_dev(mdev)); - ctx = &private->io_trigger; + + switch (index) { + case VFIO_CCW_IO_IRQ_INDEX: + ctx = &private->io_trigger; + break; + case VFIO_CCW_CRW_IRQ_INDEX: + ctx = &private->crw_trigger; + break; + default: + return -EINVAL; + } switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { case VFIO_IRQ_SET_DATA_NONE: @@ -482,6 +504,17 @@ int vfio_ccw_register_dev_region(struct vfio_ccw_private *private, return 0; } +void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private) +{ + int i; + + for (i = 0; i < private->num_regions; i++) + private->region[i].ops->release(private, &private->region[i]); + private->num_regions = 0; + kfree(private->region); + private->region = NULL; +} + static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg) @@ -565,7 +598,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, return ret; data = (void __user *)(arg + minsz); - return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data); + return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data); } case VFIO_DEVICE_RESET: return vfio_ccw_mdev_reset(mdev); diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index 9b9bb4982972..8723156b29ea 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -17,6 +17,7 @@ #include <linux/eventfd.h> #include <linux/workqueue.h> #include <linux/vfio_ccw.h> +#include <asm/crw.h> #include <asm/debug.h> #include "css.h" @@ -53,8 +54,16 @@ int vfio_ccw_register_dev_region(struct vfio_ccw_private *private, unsigned int subtype, const struct vfio_ccw_regops *ops, size_t size, u32 flags, void *data); +void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private); int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private); +int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private); +int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private); + +struct vfio_ccw_crw { + struct list_head next; + struct crw crw; +}; /** * struct vfio_ccw_private @@ -68,6 +77,8 @@ int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private); * @io_mutex: protect against concurrent update of I/O regions * @region: additional regions for other subchannel operations * @cmd_region: MMIO region for asynchronous I/O commands other than START + * @schib_region: MMIO region for SCHIB information + * @crw_region: MMIO region for getting channel report words * @num_regions: number of additional regions * @cp: channel program for the current I/O operation * @irb: irb info received from interrupt @@ -86,14 +97,19 @@ struct vfio_ccw_private { struct mutex io_mutex; struct vfio_ccw_region *region; struct ccw_cmd_region *cmd_region; + struct ccw_schib_region *schib_region; + struct ccw_crw_region *crw_region; int num_regions; struct channel_program cp; struct irb irb; union scsw scsw; + struct list_head crw; struct eventfd_ctx *io_trigger; + struct eventfd_ctx *crw_trigger; struct work_struct io_work; + struct work_struct crw_work; } __aligned(8); extern int vfio_ccw_mdev_reg(struct subchannel *sch); diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c index 8c671d2519f6..4a0205905afc 100644 --- a/drivers/s390/cio/vfio_ccw_trace.c +++ b/drivers/s390/cio/vfio_ccw_trace.c @@ -9,6 +9,7 @@ #define CREATE_TRACE_POINTS #include "vfio_ccw_trace.h" +EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_chp_event); EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request); EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event); EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request); diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h index f5d31887d413..62fb30598d47 100644 --- a/drivers/s390/cio/vfio_ccw_trace.h +++ b/drivers/s390/cio/vfio_ccw_trace.h @@ -17,6 +17,36 @@ #include <linux/tracepoint.h> +TRACE_EVENT(vfio_ccw_chp_event, + TP_PROTO(struct subchannel_id schid, + int mask, + int event), + TP_ARGS(schid, mask, event), + + TP_STRUCT__entry( + __field(u8, cssid) + __field(u8, ssid) + __field(u16, sch_no) + __field(int, mask) + __field(int, event) + ), + + TP_fast_assign( + __entry->cssid = schid.cssid; + __entry->ssid = schid.ssid; + __entry->sch_no = schid.sch_no; + __entry->mask = mask; + __entry->event = event; + ), + + TP_printk("schid=%x.%x.%04x mask=0x%x event=%d", + __entry->cssid, + __entry->ssid, + __entry->sch_no, + __entry->mask, + __entry->event) +); + TRACE_EVENT(vfio_ccw_fsm_async_request, TP_PROTO(struct subchannel_id schid, int command, diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 35064443e748..e71ca4a719a5 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -62,8 +62,10 @@ MODULE_PARM_DESC(aqmask, "AP bus domain mask."); static struct device *ap_root_device; -DEFINE_SPINLOCK(ap_list_lock); -LIST_HEAD(ap_card_list); +/* Hashtable of all queue devices on the AP bus */ +DEFINE_HASHTABLE(ap_queues, 8); +/* lock used for the ap_queues hashtable */ +DEFINE_SPINLOCK(ap_queues_lock); /* Default permissions (ioctl, card and domain masking) */ struct ap_perms ap_perms; @@ -414,7 +416,7 @@ static void ap_interrupt_handler(struct airq_struct *airq, bool floating) */ static void ap_tasklet_fn(unsigned long dummy) { - struct ap_card *ac; + int bkt; struct ap_queue *aq; enum ap_wait wait = AP_WAIT_NONE; @@ -425,34 +427,30 @@ static void ap_tasklet_fn(unsigned long dummy) if (ap_using_interrupts()) xchg(ap_airq.lsi_ptr, 0); - spin_lock_bh(&ap_list_lock); - for_each_ap_card(ac) { - for_each_ap_queue(aq, ac) { - spin_lock_bh(&aq->lock); - wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL)); - spin_unlock_bh(&aq->lock); - } + spin_lock_bh(&ap_queues_lock); + hash_for_each(ap_queues, bkt, aq, hnode) { + spin_lock_bh(&aq->lock); + wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); } - spin_unlock_bh(&ap_list_lock); + spin_unlock_bh(&ap_queues_lock); ap_wait(wait); } static int ap_pending_requests(void) { - struct ap_card *ac; + int bkt; struct ap_queue *aq; - spin_lock_bh(&ap_list_lock); - for_each_ap_card(ac) { - for_each_ap_queue(aq, ac) { - if (aq->queue_count == 0) - continue; - spin_unlock_bh(&ap_list_lock); - return 1; - } + spin_lock_bh(&ap_queues_lock); + hash_for_each(ap_queues, bkt, aq, hnode) { + if (aq->queue_count == 0) + continue; + spin_unlock_bh(&ap_queues_lock); + return 1; } - spin_unlock_bh(&ap_list_lock); + spin_unlock_bh(&ap_queues_lock); return 0; } @@ -683,24 +681,20 @@ static int ap_device_probe(struct device *dev) } /* Add queue/card to list of active queues/cards */ - spin_lock_bh(&ap_list_lock); - if (is_card_dev(dev)) - list_add(&to_ap_card(dev)->list, &ap_card_list); - else - list_add(&to_ap_queue(dev)->list, - &to_ap_queue(dev)->card->queues); - spin_unlock_bh(&ap_list_lock); + spin_lock_bh(&ap_queues_lock); + if (is_queue_dev(dev)) + hash_add(ap_queues, &to_ap_queue(dev)->hnode, + to_ap_queue(dev)->qid); + spin_unlock_bh(&ap_queues_lock); ap_dev->drv = ap_drv; rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; if (rc) { - spin_lock_bh(&ap_list_lock); - if (is_card_dev(dev)) - list_del_init(&to_ap_card(dev)->list); - else - list_del_init(&to_ap_queue(dev)->list); - spin_unlock_bh(&ap_list_lock); + spin_lock_bh(&ap_queues_lock); + if (is_queue_dev(dev)) + hash_del(&to_ap_queue(dev)->hnode); + spin_unlock_bh(&ap_queues_lock); ap_dev->drv = NULL; } @@ -725,16 +719,33 @@ static int ap_device_remove(struct device *dev) ap_queue_remove(to_ap_queue(dev)); /* Remove queue/card from list of active queues/cards */ - spin_lock_bh(&ap_list_lock); - if (is_card_dev(dev)) - list_del_init(&to_ap_card(dev)->list); - else - list_del_init(&to_ap_queue(dev)->list); - spin_unlock_bh(&ap_list_lock); + spin_lock_bh(&ap_queues_lock); + if (is_queue_dev(dev)) + hash_del(&to_ap_queue(dev)->hnode); + spin_unlock_bh(&ap_queues_lock); return 0; } +struct ap_queue *ap_get_qdev(ap_qid_t qid) +{ + int bkt; + struct ap_queue *aq; + + spin_lock_bh(&ap_queues_lock); + hash_for_each(ap_queues, bkt, aq, hnode) { + if (aq->qid == qid) { + get_device(&aq->ap_dev.device); + spin_unlock_bh(&ap_queues_lock); + return aq; + } + } + spin_unlock_bh(&ap_queues_lock); + + return NULL; +} +EXPORT_SYMBOL(ap_get_qdev); + int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, char *name) { @@ -1506,6 +1517,9 @@ static int __init ap_module_init(void) return -ENODEV; } + /* init ap_queue hashtable */ + hash_init(ap_queues); + /* set up the AP permissions (ioctls, ap and aq masks) */ ap_perms_init(); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 8e8e37b6c0ee..053cc34d2ca2 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -15,6 +15,7 @@ #include <linux/device.h> #include <linux/types.h> +#include <linux/hashtable.h> #include <asm/isc.h> #include <asm/ap.h> @@ -27,8 +28,8 @@ extern int ap_domain_index; -extern spinlock_t ap_list_lock; -extern struct list_head ap_card_list; +extern DECLARE_HASHTABLE(ap_queues, 8); +extern spinlock_t ap_queues_lock; static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) { @@ -152,8 +153,6 @@ struct ap_device { struct ap_card { struct ap_device ap_dev; - struct list_head list; /* Private list of AP cards. */ - struct list_head queues; /* List of assoc. AP queues */ void *private; /* ap driver private pointer. */ int raw_hwtype; /* AP raw hardware type. */ unsigned int functions; /* AP device function bitfield. */ @@ -166,7 +165,7 @@ struct ap_card { struct ap_queue { struct ap_device ap_dev; - struct list_head list; /* Private list of AP queues. */ + struct hlist_node hnode; /* Node for the ap_queues hashtable */ struct ap_card *card; /* Ptr to assoc. AP card. */ spinlock_t lock; /* Per device lock. */ void *private; /* ap driver private pointer. */ @@ -223,12 +222,6 @@ static inline void ap_release_message(struct ap_message *ap_msg) kzfree(ap_msg->private); } -#define for_each_ap_card(_ac) \ - list_for_each_entry(_ac, &ap_card_list, list) - -#define for_each_ap_queue(_aq, _ac) \ - list_for_each_entry(_aq, &(_ac)->queues, list) - /* * Note: don't use ap_send/ap_recv after using ap_queue_message * for the first time. Otherwise the ap message queue will get @@ -270,6 +263,16 @@ extern struct ap_perms ap_perms; extern struct mutex ap_perms_mutex; /* + * Get ap_queue device for this qid. + * Returns ptr to the struct ap_queue device or NULL if there + * was no ap_queue device with this qid found. When something is + * found, the reference count of the embedded device is increased. + * So the caller has to decrease the reference count after use + * with a call to put_device(&aq->ap_dev.device). + */ +struct ap_queue *ap_get_qdev(ap_qid_t qid); + +/* * check APQN for owned/reserved by ap bus and default driver(s). * Checks if this APQN is or will be in use by the ap bus * and the default set of drivers. diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 0a39dfdb6a1d..6588713319ba 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -66,9 +66,9 @@ static ssize_t request_count_show(struct device *dev, u64 req_cnt; req_cnt = 0; - spin_lock_bh(&ap_list_lock); + spin_lock_bh(&ap_queues_lock); req_cnt = atomic64_read(&ac->total_request_count); - spin_unlock_bh(&ap_list_lock); + spin_unlock_bh(&ap_queues_lock); return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); } @@ -76,13 +76,15 @@ static ssize_t request_count_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct ap_card *ac = to_ap_card(dev); + int bkt; struct ap_queue *aq; + struct ap_card *ac = to_ap_card(dev); - spin_lock_bh(&ap_list_lock); - for_each_ap_queue(aq, ac) - aq->total_request_count = 0; - spin_unlock_bh(&ap_list_lock); + spin_lock_bh(&ap_queues_lock); + hash_for_each(ap_queues, bkt, aq, hnode) + if (ac == aq->card) + aq->total_request_count = 0; + spin_unlock_bh(&ap_queues_lock); atomic64_set(&ac->total_request_count, 0); return count; @@ -93,15 +95,17 @@ static DEVICE_ATTR_RW(request_count); static ssize_t requestq_count_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct ap_card *ac = to_ap_card(dev); + int bkt; struct ap_queue *aq; unsigned int reqq_cnt; + struct ap_card *ac = to_ap_card(dev); reqq_cnt = 0; - spin_lock_bh(&ap_list_lock); - for_each_ap_queue(aq, ac) - reqq_cnt += aq->requestq_count; - spin_unlock_bh(&ap_list_lock); + spin_lock_bh(&ap_queues_lock); + hash_for_each(ap_queues, bkt, aq, hnode) + if (ac == aq->card) + reqq_cnt += aq->requestq_count; + spin_unlock_bh(&ap_queues_lock); return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); } @@ -110,15 +114,17 @@ static DEVICE_ATTR_RO(requestq_count); static ssize_t pendingq_count_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct ap_card *ac = to_ap_card(dev); + int bkt; struct ap_queue *aq; unsigned int penq_cnt; + struct ap_card *ac = to_ap_card(dev); penq_cnt = 0; - spin_lock_bh(&ap_list_lock); - for_each_ap_queue(aq, ac) - penq_cnt += aq->pendingq_count; - spin_unlock_bh(&ap_list_lock); + spin_lock_bh(&ap_queues_lock); + hash_for_each(ap_queues, bkt, aq, hnode) + if (ac == aq->card) + penq_cnt += aq->pendingq_count; + spin_unlock_bh(&ap_queues_lock); return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); } @@ -163,11 +169,6 @@ static void ap_card_device_release(struct device *dev) { struct ap_card *ac = to_ap_card(dev); - if (!list_empty(&ac->list)) { - spin_lock_bh(&ap_list_lock); - list_del_init(&ac->list); - spin_unlock_bh(&ap_list_lock); - } kfree(ac); } @@ -179,8 +180,6 @@ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, ac = kzalloc(sizeof(*ac), GFP_KERNEL); if (!ac) return NULL; - INIT_LIST_HEAD(&ac->list); - INIT_LIST_HEAD(&ac->queues); ac->ap_dev.device.release = ap_card_device_release; ac->ap_dev.device.type = &ap_card_type; ac->ap_dev.device_type = comp_type; diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 0eaf1d04e8df..73b077dca3e6 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -568,11 +568,10 @@ static void ap_queue_device_release(struct device *dev) { struct ap_queue *aq = to_ap_queue(dev); - if (!list_empty(&aq->list)) { - spin_lock_bh(&ap_list_lock); - list_del_init(&aq->list); - spin_unlock_bh(&ap_list_lock); - } + spin_lock_bh(&ap_queues_lock); + hash_del(&aq->hnode); + spin_unlock_bh(&ap_queues_lock); + kfree(aq); } @@ -590,7 +589,6 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) aq->state = AP_STATE_UNBOUND; aq->interrupt = AP_INTR_DISABLED; spin_lock_init(&aq->lock); - INIT_LIST_HEAD(&aq->list); INIT_LIST_HEAD(&aq->pendingq); INIT_LIST_HEAD(&aq->requestq); timer_setup(&aq->timeout, ap_request_timeout, 0); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index da47e423e1b1..2d3bca3c0141 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -20,6 +20,7 @@ #include <linux/list.h> #include <linux/hash.h> #include <linux/hashtable.h> +#include <asm/chsc.h> #include <asm/setup.h> #include "qeth_core.h" #include "qeth_l2.h" @@ -27,8 +28,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card); static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); -static void qeth_bridge_host_event(struct qeth_card *card, - struct qeth_ipa_cmd *cmd); +static void qeth_addr_change_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); static void qeth_l2_vnicc_set_defaults(struct qeth_card *card); static void qeth_l2_vnicc_init(struct qeth_card *card); static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, @@ -629,6 +630,72 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) schedule_work(&card->rx_mode_work); } +/** + * qeth_l2_pnso() - perform network subchannel operation + * @card: qeth_card structure pointer + * @cnc: Boolean Change-Notification Control + * @cb: Callback function will be executed for each element + * of the address list + * @priv: Pointer to pass to the callback function. + * + * Collects network information in a network address list and calls the + * callback function for every entry in the list. If "change-notification- + * control" is set, further changes in the address list will be reported + * via the IPA command. + */ +static int qeth_l2_pnso(struct qeth_card *card, int cnc, + void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry), + void *priv) +{ + struct ccw_device *ddev = CARD_DDEV(card); + struct chsc_pnso_area *rr; + u32 prev_instance = 0; + int isfirstblock = 1; + int i, size, elems; + int rc; + + QETH_CARD_TEXT(card, 2, "PNSO"); + rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); + if (rr == NULL) + return -ENOMEM; + do { + /* on the first iteration, naihdr.resume_token will be zero */ + rc = ccw_device_pnso(ddev, rr, rr->naihdr.resume_token, cnc); + if (rc) + continue; + if (cb == NULL) + continue; + + size = rr->naihdr.naids; + if (size != sizeof(struct chsc_pnso_naid_l2)) { + WARN_ON_ONCE(1); + continue; + } + + elems = (rr->response.length - sizeof(struct chsc_header) - + sizeof(struct chsc_pnso_naihdr)) / size; + + if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { + /* Inform the caller that they need to scrap */ + /* the data that was already reported via cb */ + rc = -EAGAIN; + break; + } + isfirstblock = 0; + prev_instance = rr->naihdr.instance; + for (i = 0; i < elems; i++) + (*cb)(priv, &rr->entries[i]); + } while ((rc == -EBUSY) || (!rc && /* list stored */ + /* resume token is non-zero => list incomplete */ + (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); + + if (rc) + QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code); + + free_page((unsigned long)rr); + return rc; +} + static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, @@ -856,7 +923,7 @@ static int qeth_l2_control_event(struct qeth_card *card, } else return 1; case IPA_CMD_ADDRESS_CHANGE_NOTIF: - qeth_bridge_host_event(card, cmd); + qeth_addr_change_event(card, cmd); return 0; default: return 1; @@ -973,8 +1040,10 @@ enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset}; * for all currently registered addresses. */ static void qeth_bridge_emit_host_event(struct qeth_card *card, - enum qeth_an_event_type evtype, - u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid) + enum qeth_an_event_type evtype, + u8 code, + struct net_if_token *token, + struct mac_addr_lnid *addr_lnid) { char str[7][32]; char *env[8]; @@ -1091,74 +1160,76 @@ static void qeth_bridge_state_change(struct qeth_card *card, queue_work(card->event_wq, &data->worker); } -struct qeth_bridge_host_data { +struct qeth_addr_change_data { struct work_struct worker; struct qeth_card *card; - struct qeth_ipacmd_addr_change hostevs; + struct qeth_ipacmd_addr_change ac_event; }; -static void qeth_bridge_host_event_worker(struct work_struct *work) +static void qeth_addr_change_event_worker(struct work_struct *work) { - struct qeth_bridge_host_data *data = - container_of(work, struct qeth_bridge_host_data, worker); + struct qeth_addr_change_data *data = + container_of(work, struct qeth_addr_change_data, worker); int i; - if (data->hostevs.lost_event_mask) { + QETH_CARD_TEXT(data->card, 4, "adrchgew"); + if (data->ac_event.lost_event_mask) { dev_info(&data->card->gdev->dev, -"Address notification from the Bridge Port stopped %s (%s)\n", - data->card->dev->name, - (data->hostevs.lost_event_mask == 0x01) + "Address change notification stopped on %s (%s)\n", + data->card->dev->name, + (data->ac_event.lost_event_mask == 0x01) ? "Overflow" - : (data->hostevs.lost_event_mask == 0x02) + : (data->ac_event.lost_event_mask == 0x02) ? "Bridge port state change" : "Unknown reason"); mutex_lock(&data->card->sbp_lock); data->card->options.sbp.hostnotification = 0; mutex_unlock(&data->card->sbp_lock); qeth_bridge_emit_host_event(data->card, anev_abort, - 0, NULL, NULL); + 0, NULL, NULL); } else - for (i = 0; i < data->hostevs.num_entries; i++) { + for (i = 0; i < data->ac_event.num_entries; i++) { struct qeth_ipacmd_addr_change_entry *entry = - &data->hostevs.entry[i]; + &data->ac_event.entry[i]; qeth_bridge_emit_host_event(data->card, - anev_reg_unreg, - entry->change_code, - &entry->token, &entry->addr_lnid); + anev_reg_unreg, + entry->change_code, + &entry->token, + &entry->addr_lnid); } kfree(data); } -static void qeth_bridge_host_event(struct qeth_card *card, - struct qeth_ipa_cmd *cmd) +static void qeth_addr_change_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) { struct qeth_ipacmd_addr_change *hostevs = &cmd->data.addrchange; - struct qeth_bridge_host_data *data; + struct qeth_addr_change_data *data; int extrasize; - QETH_CARD_TEXT(card, 2, "brhostev"); + QETH_CARD_TEXT(card, 4, "adrchgev"); if (cmd->hdr.return_code != 0x0000) { if (cmd->hdr.return_code == 0x0010) { if (hostevs->lost_event_mask == 0x00) hostevs->lost_event_mask = 0xff; } else { - QETH_CARD_TEXT_(card, 2, "BPHe%04x", + QETH_CARD_TEXT_(card, 2, "ACHN%04x", cmd->hdr.return_code); return; } } extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) * hostevs->num_entries; - data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize, - GFP_ATOMIC); + data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize, + GFP_ATOMIC); if (!data) { - QETH_CARD_TEXT(card, 2, "BPHalloc"); + QETH_CARD_TEXT(card, 2, "ACNalloc"); return; } - INIT_WORK(&data->worker, qeth_bridge_host_event_worker); + INIT_WORK(&data->worker, qeth_addr_change_event_worker); data->card = card; - memcpy(&data->hostevs, hostevs, + memcpy(&data->ac_event, hostevs, sizeof(struct qeth_ipacmd_addr_change) + extrasize); queue_work(card->event_wq, &data->worker); } @@ -1448,63 +1519,18 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL); } -/** - * qeth_anset_makerc() - derive "traditional" error from hardware codes. - * @card: qeth_card structure pointer, for debug messages. - * - * Returns negative errno-compatible error indication or 0 on success. - */ -static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response) -{ - int rc; - - if (pnso_rc == 0) - switch (response) { - case 0x0001: - rc = 0; - break; - case 0x0004: - case 0x0100: - case 0x0106: - rc = -EOPNOTSUPP; - dev_err(&card->gdev->dev, - "Setting address notification failed\n"); - break; - case 0x0107: - rc = -EAGAIN; - break; - default: - rc = -EIO; - } - else - rc = -EIO; - - if (rc) { - QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc); - QETH_CARD_TEXT_(card, 2, "SBPr%04x", response); - } - return rc; -} - static void qeth_bridgeport_an_set_cb(void *priv, - enum qdio_brinfo_entry_type type, void *entry) + struct chsc_pnso_naid_l2 *entry) { struct qeth_card *card = (struct qeth_card *)priv; - struct qdio_brinfo_entry_l2 *l2entry; u8 code; - if (type != l2_addr_lnid) { - WARN_ON_ONCE(1); - return; - } - - l2entry = (struct qdio_brinfo_entry_l2 *)entry; code = IPA_ADDR_CHANGE_CODE_MACADDR; - if (l2entry->addr_lnid.lnid < VLAN_N_VID) + if (entry->addr_lnid.lnid < VLAN_N_VID) code |= IPA_ADDR_CHANGE_CODE_VLANID; qeth_bridge_emit_host_event(card, anev_reg_unreg, code, - (struct net_if_token *)&l2entry->nit, - (struct mac_addr_lnid *)&l2entry->addr_lnid); + (struct net_if_token *)&entry->nit, + (struct mac_addr_lnid *)&entry->addr_lnid); } /** @@ -1520,22 +1546,16 @@ static void qeth_bridgeport_an_set_cb(void *priv, int qeth_bridgeport_an_set(struct qeth_card *card, int enable) { int rc; - u16 response; - struct ccw_device *ddev; - struct subchannel_id schid; if (!card->options.sbp.supported_funcs) return -EOPNOTSUPP; - ddev = CARD_DDEV(card); - ccw_device_get_schid(ddev, &schid); if (enable) { qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL); - rc = qdio_pnso_brinfo(schid, 1, &response, - qeth_bridgeport_an_set_cb, card); + rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card); } else - rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL); - return qeth_anset_makerc(card, rc, response); + rc = qeth_l2_pnso(card, 0, NULL, NULL); + return rc; } static bool qeth_bridgeport_is_in_use(struct qeth_card *card) |