• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * File:	htirq.c
3  * Purpose:	Hypertransport Interrupt Capability
4  *
5  * Copyright (C) 2006 Linux Networx
6  * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
7  */
8 
9 #include <linux/irq.h>
10 #include <linux/pci.h>
11 #include <linux/spinlock.h>
12 #include <linux/export.h>
13 #include <linux/slab.h>
14 #include <linux/htirq.h>
15 
16 /* Global ht irq lock.
17  *
18  * This is needed to serialize access to the data port in hypertransport
19  * irq capability.
20  *
21  * With multiple simultaneous hypertransport irq devices it might pay
22  * to make this more fine grained.  But start with simple, stupid, and correct.
23  */
24 static DEFINE_SPINLOCK(ht_irq_lock);
25 
26 struct ht_irq_cfg {
27 	struct pci_dev *dev;
28 	 /* Update callback used to cope with buggy hardware */
29 	ht_irq_update_t *update;
30 	unsigned pos;
31 	unsigned idx;
32 	struct ht_irq_msg msg;
33 };
34 
35 
write_ht_irq_msg(unsigned int irq,struct ht_irq_msg * msg)36 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
37 {
38 	struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
39 	unsigned long flags;
40 	spin_lock_irqsave(&ht_irq_lock, flags);
41 	if (cfg->msg.address_lo != msg->address_lo) {
42 		pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
43 		pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
44 	}
45 	if (cfg->msg.address_hi != msg->address_hi) {
46 		pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
47 		pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
48 	}
49 	if (cfg->update)
50 		cfg->update(cfg->dev, irq, msg);
51 	spin_unlock_irqrestore(&ht_irq_lock, flags);
52 	cfg->msg = *msg;
53 }
54 
fetch_ht_irq_msg(unsigned int irq,struct ht_irq_msg * msg)55 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
56 {
57 	struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
58 	*msg = cfg->msg;
59 }
60 
mask_ht_irq(struct irq_data * data)61 void mask_ht_irq(struct irq_data *data)
62 {
63 	struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
64 	struct ht_irq_msg msg = cfg->msg;
65 
66 	msg.address_lo |= 1;
67 	write_ht_irq_msg(data->irq, &msg);
68 }
69 
unmask_ht_irq(struct irq_data * data)70 void unmask_ht_irq(struct irq_data *data)
71 {
72 	struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
73 	struct ht_irq_msg msg = cfg->msg;
74 
75 	msg.address_lo &= ~1;
76 	write_ht_irq_msg(data->irq, &msg);
77 }
78 
79 /**
80  * __ht_create_irq - create an irq and attach it to a device.
81  * @dev: The hypertransport device to find the irq capability on.
82  * @idx: Which of the possible irqs to attach to.
83  * @update: Function to be called when changing the htirq message
84  *
85  * The irq number of the new irq or a negative error value is returned.
86  */
__ht_create_irq(struct pci_dev * dev,int idx,ht_irq_update_t * update)87 int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
88 {
89 	struct ht_irq_cfg *cfg;
90 	unsigned long flags;
91 	u32 data;
92 	int max_irq;
93 	int pos;
94 	int irq;
95 	int node;
96 
97 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
98 	if (!pos)
99 		return -EINVAL;
100 
101 	/* Verify the idx I want to use is in range */
102 	spin_lock_irqsave(&ht_irq_lock, flags);
103 	pci_write_config_byte(dev, pos + 2, 1);
104 	pci_read_config_dword(dev, pos + 4, &data);
105 	spin_unlock_irqrestore(&ht_irq_lock, flags);
106 
107 	max_irq = (data >> 16) & 0xff;
108 	if ( idx > max_irq)
109 		return -EINVAL;
110 
111 	cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
112 	if (!cfg)
113 		return -ENOMEM;
114 
115 	cfg->dev = dev;
116 	cfg->update = update;
117 	cfg->pos = pos;
118 	cfg->idx = 0x10 + (idx * 2);
119 	/* Initialize msg to a value that will never match the first write. */
120 	cfg->msg.address_lo = 0xffffffff;
121 	cfg->msg.address_hi = 0xffffffff;
122 
123 	node = dev_to_node(&dev->dev);
124 	irq = create_irq_nr(0, node);
125 
126 	if (irq <= 0) {
127 		kfree(cfg);
128 		return -EBUSY;
129 	}
130 	irq_set_handler_data(irq, cfg);
131 
132 	if (arch_setup_ht_irq(irq, dev) < 0) {
133 		ht_destroy_irq(irq);
134 		return -EBUSY;
135 	}
136 
137 	return irq;
138 }
139 
140 /**
141  * ht_create_irq - create an irq and attach it to a device.
142  * @dev: The hypertransport device to find the irq capability on.
143  * @idx: Which of the possible irqs to attach to.
144  *
145  * ht_create_irq needs to be called for all hypertransport devices
146  * that generate irqs.
147  *
148  * The irq number of the new irq or a negative error value is returned.
149  */
ht_create_irq(struct pci_dev * dev,int idx)150 int ht_create_irq(struct pci_dev *dev, int idx)
151 {
152 	return __ht_create_irq(dev, idx, NULL);
153 }
154 
155 /**
156  * ht_destroy_irq - destroy an irq created with ht_create_irq
157  * @irq: irq to be destroyed
158  *
159  * This reverses ht_create_irq removing the specified irq from
160  * existence.  The irq should be free before this happens.
161  */
ht_destroy_irq(unsigned int irq)162 void ht_destroy_irq(unsigned int irq)
163 {
164 	struct ht_irq_cfg *cfg;
165 
166 	cfg = irq_get_handler_data(irq);
167 	irq_set_chip(irq, NULL);
168 	irq_set_handler_data(irq, NULL);
169 	destroy_irq(irq);
170 
171 	kfree(cfg);
172 }
173 
174 EXPORT_SYMBOL(__ht_create_irq);
175 EXPORT_SYMBOL(ht_create_irq);
176 EXPORT_SYMBOL(ht_destroy_irq);
177