summaryrefslogtreecommitdiffstats
path: root/arch/s390/pci/pci_msi.c
blob: 90fd3482b9e21373276ffc2b96d3957dafd07c70 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
/*
 * Copyright IBM Corp. 2012
 *
 * Author(s):
 *   Jan Glauber <jang@linux.vnet.ibm.com>
 */

#define COMPONENT "zPCI"
#define pr_fmt(fmt) COMPONENT ": " fmt

#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/rculist.h>
#include <linux/hash.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <asm/hw_irq.h>

/* mapping of irq numbers to msi_desc */
static struct hlist_head *msi_hash;
static unsigned int msihash_shift = 6;
#define msi_hashfn(nr)	hash_long(nr, msihash_shift)

static DEFINE_SPINLOCK(msi_map_lock);

struct msi_desc *__irq_get_msi_desc(unsigned int irq)
{
	struct hlist_node *entry;
	struct msi_map *map;

	hlist_for_each_entry_rcu(map, entry,
			&msi_hash[msi_hashfn(irq)], msi_chain)
		if (map->irq == irq)
			return map->msi;
	return NULL;
}

int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
{
	if (msi->msi_attrib.is_msix) {
		int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
			PCI_MSIX_ENTRY_VECTOR_CTRL;
		msi->masked = readl(msi->mask_base + offset);
		writel(flag, msi->mask_base + offset);
	} else {
		if (msi->msi_attrib.maskbit) {
			int pos;
			u32 mask_bits;

			pos = (long) msi->mask_base;
			pci_read_config_dword(msi->dev, pos, &mask_bits);
			mask_bits &= ~(mask);
			mask_bits |= flag & mask;
			pci_write_config_dword(msi->dev, pos, mask_bits);
		} else {
			return 0;
		}
	}

	msi->msi_attrib.maskbit = !!flag;
	return 1;
}

int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
			unsigned int nr, int offset)
{
	struct msi_map *map;
	struct msi_msg msg;
	int rc;

	map = kmalloc(sizeof(*map), GFP_KERNEL);
	if (map == NULL)
		return -ENOMEM;

	map->irq = nr;
	map->msi = msi;
	zdev->msi_map[nr & ZPCI_MSI_MASK] = map;

	pr_debug("%s hashing irq: %u  to bucket nr: %llu\n",
		__func__, nr, msi_hashfn(nr));
	hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);

	spin_lock(&msi_map_lock);
	rc = irq_set_msi_desc(nr, msi);
	if (rc) {
		spin_unlock(&msi_map_lock);
		hlist_del_rcu(&map->msi_chain);
		kfree(map);
		zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
		return rc;
	}
	spin_unlock(&msi_map_lock);

	msg.data = nr - offset;
	msg.address_lo = zdev->msi_addr & 0xffffffff;
	msg.address_hi = zdev->msi_addr >> 32;
	write_msi_msg(nr, &msg);
	return 0;
}

void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
{
	int irq = msi->irq & ZPCI_MSI_MASK;
	struct msi_map *map;

	msi->msg.address_lo = 0;
	msi->msg.address_hi = 0;
	msi->msg.data = 0;
	msi->irq = 0;
	zpci_msi_set_mask_bits(msi, 1, 1);

	spin_lock(&msi_map_lock);
	map = zdev->msi_map[irq];
	hlist_del_rcu(&map->msi_chain);
	kfree(map);
	zdev->msi_map[irq] = NULL;
	spin_unlock(&msi_map_lock);
}

/*
 * The msi hash table has 256 entries which is good for 4..20
 * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
 * the hash table size adjustable later.
 */
int __init zpci_msihash_init(void)
{
	unsigned int i;

	msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
	if (!msi_hash)
		return -ENOMEM;

	for (i = 0; i < (1U << msihash_shift); i++)
		INIT_HLIST_HEAD(&msi_hash[i]);
	return 0;
}

void __init zpci_msihash_exit(void)
{
	kfree(msi_hash);
}