1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 */
5
6/*
7 * This driver supports an interface for DCA clients and providers to meet.
8 */
9
10#include <linux/kernel.h>
11#include <linux/notifier.h>
12#include <linux/device.h>
13#include <linux/dca.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16
17#define DCA_VERSION "1.12.1"
18
19MODULE_VERSION(DCA_VERSION);
20MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Intel Corporation");
22
23static DEFINE_RAW_SPINLOCK(dca_lock);
24
25static LIST_HEAD(dca_domains);
26
27static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
28
29static int dca_providers_blocked;
30
31static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
32{
33 struct pci_dev *pdev = to_pci_dev(dev);
34 struct pci_bus *bus = pdev->bus;
35
36 while (bus->parent)
37 bus = bus->parent;
38
39 return bus;
40}
41
42static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
43{
44 struct dca_domain *domain;
45
46 domain = kzalloc(size: sizeof(*domain), GFP_NOWAIT);
47 if (!domain)
48 return NULL;
49
50 INIT_LIST_HEAD(list: &domain->dca_providers);
51 domain->pci_rc = rc;
52
53 return domain;
54}
55
56static void dca_free_domain(struct dca_domain *domain)
57{
58 list_del(entry: &domain->node);
59 kfree(objp: domain);
60}
61
62static int dca_provider_ioat_ver_3_0(struct device *dev)
63{
64 struct pci_dev *pdev = to_pci_dev(dev);
65
66 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
67 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
68 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
69 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
70 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
71 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
72 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
73 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
74 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
75}
76
77static void unregister_dca_providers(void)
78{
79 struct dca_provider *dca, *_dca;
80 struct list_head unregistered_providers;
81 struct dca_domain *domain;
82 unsigned long flags;
83
84 blocking_notifier_call_chain(nh: &dca_provider_chain,
85 DCA_PROVIDER_REMOVE, NULL);
86
87 INIT_LIST_HEAD(list: &unregistered_providers);
88
89 raw_spin_lock_irqsave(&dca_lock, flags);
90
91 if (list_empty(head: &dca_domains)) {
92 raw_spin_unlock_irqrestore(&dca_lock, flags);
93 return;
94 }
95
96 /* at this point only one domain in the list is expected */
97 domain = list_first_entry(&dca_domains, struct dca_domain, node);
98
99 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
100 list_move(list: &dca->node, head: &unregistered_providers);
101
102 dca_free_domain(domain);
103
104 raw_spin_unlock_irqrestore(&dca_lock, flags);
105
106 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
107 dca_sysfs_remove_provider(dca);
108 list_del(entry: &dca->node);
109 }
110}
111
112static struct dca_domain *dca_find_domain(struct pci_bus *rc)
113{
114 struct dca_domain *domain;
115
116 list_for_each_entry(domain, &dca_domains, node)
117 if (domain->pci_rc == rc)
118 return domain;
119
120 return NULL;
121}
122
123static struct dca_domain *dca_get_domain(struct device *dev)
124{
125 struct pci_bus *rc;
126 struct dca_domain *domain;
127
128 rc = dca_pci_rc_from_dev(dev);
129 domain = dca_find_domain(rc);
130
131 if (!domain) {
132 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(head: &dca_domains))
133 dca_providers_blocked = 1;
134 }
135
136 return domain;
137}
138
139static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
140{
141 struct dca_provider *dca;
142 struct pci_bus *rc;
143 struct dca_domain *domain;
144
145 if (dev) {
146 rc = dca_pci_rc_from_dev(dev);
147 domain = dca_find_domain(rc);
148 if (!domain)
149 return NULL;
150 } else {
151 if (!list_empty(head: &dca_domains))
152 domain = list_first_entry(&dca_domains,
153 struct dca_domain,
154 node);
155 else
156 return NULL;
157 }
158
159 list_for_each_entry(dca, &domain->dca_providers, node)
160 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
161 return dca;
162
163 return NULL;
164}
165
166/**
167 * dca_add_requester - add a dca client to the list
168 * @dev - the device that wants dca service
169 */
170int dca_add_requester(struct device *dev)
171{
172 struct dca_provider *dca;
173 int err, slot = -ENODEV;
174 unsigned long flags;
175 struct pci_bus *pci_rc;
176 struct dca_domain *domain;
177
178 if (!dev)
179 return -EFAULT;
180
181 raw_spin_lock_irqsave(&dca_lock, flags);
182
183 /* check if the requester has not been added already */
184 dca = dca_find_provider_by_dev(dev);
185 if (dca) {
186 raw_spin_unlock_irqrestore(&dca_lock, flags);
187 return -EEXIST;
188 }
189
190 pci_rc = dca_pci_rc_from_dev(dev);
191 domain = dca_find_domain(rc: pci_rc);
192 if (!domain) {
193 raw_spin_unlock_irqrestore(&dca_lock, flags);
194 return -ENODEV;
195 }
196
197 list_for_each_entry(dca, &domain->dca_providers, node) {
198 slot = dca->ops->add_requester(dca, dev);
199 if (slot >= 0)
200 break;
201 }
202
203 raw_spin_unlock_irqrestore(&dca_lock, flags);
204
205 if (slot < 0)
206 return slot;
207
208 err = dca_sysfs_add_req(dca, dev, slot);
209 if (err) {
210 raw_spin_lock_irqsave(&dca_lock, flags);
211 if (dca == dca_find_provider_by_dev(dev))
212 dca->ops->remove_requester(dca, dev);
213 raw_spin_unlock_irqrestore(&dca_lock, flags);
214 return err;
215 }
216
217 return 0;
218}
219EXPORT_SYMBOL_GPL(dca_add_requester);
220
221/**
222 * dca_remove_requester - remove a dca client from the list
223 * @dev - the device that wants dca service
224 */
225int dca_remove_requester(struct device *dev)
226{
227 struct dca_provider *dca;
228 int slot;
229 unsigned long flags;
230
231 if (!dev)
232 return -EFAULT;
233
234 raw_spin_lock_irqsave(&dca_lock, flags);
235 dca = dca_find_provider_by_dev(dev);
236 if (!dca) {
237 raw_spin_unlock_irqrestore(&dca_lock, flags);
238 return -ENODEV;
239 }
240 slot = dca->ops->remove_requester(dca, dev);
241 raw_spin_unlock_irqrestore(&dca_lock, flags);
242
243 if (slot < 0)
244 return slot;
245
246 dca_sysfs_remove_req(dca, slot);
247
248 return 0;
249}
250EXPORT_SYMBOL_GPL(dca_remove_requester);
251
252/**
253 * dca_common_get_tag - return the dca tag (serves both new and old api)
254 * @dev - the device that wants dca service
255 * @cpu - the cpuid as returned by get_cpu()
256 */
257static u8 dca_common_get_tag(struct device *dev, int cpu)
258{
259 struct dca_provider *dca;
260 u8 tag;
261 unsigned long flags;
262
263 raw_spin_lock_irqsave(&dca_lock, flags);
264
265 dca = dca_find_provider_by_dev(dev);
266 if (!dca) {
267 raw_spin_unlock_irqrestore(&dca_lock, flags);
268 return -ENODEV;
269 }
270 tag = dca->ops->get_tag(dca, dev, cpu);
271
272 raw_spin_unlock_irqrestore(&dca_lock, flags);
273 return tag;
274}
275
276/**
277 * dca3_get_tag - return the dca tag to the requester device
278 * for the given cpu (new api)
279 * @dev - the device that wants dca service
280 * @cpu - the cpuid as returned by get_cpu()
281 */
282u8 dca3_get_tag(struct device *dev, int cpu)
283{
284 if (!dev)
285 return -EFAULT;
286
287 return dca_common_get_tag(dev, cpu);
288}
289EXPORT_SYMBOL_GPL(dca3_get_tag);
290
291/**
292 * dca_get_tag - return the dca tag for the given cpu (old api)
293 * @cpu - the cpuid as returned by get_cpu()
294 */
295u8 dca_get_tag(int cpu)
296{
297 return dca_common_get_tag(NULL, cpu);
298}
299EXPORT_SYMBOL_GPL(dca_get_tag);
300
301/**
302 * alloc_dca_provider - get data struct for describing a dca provider
303 * @ops - pointer to struct of dca operation function pointers
304 * @priv_size - size of extra mem to be added for provider's needs
305 */
306struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
307 int priv_size)
308{
309 struct dca_provider *dca;
310 int alloc_size;
311
312 alloc_size = (sizeof(*dca) + priv_size);
313 dca = kzalloc(size: alloc_size, GFP_KERNEL);
314 if (!dca)
315 return NULL;
316 dca->ops = ops;
317
318 return dca;
319}
320EXPORT_SYMBOL_GPL(alloc_dca_provider);
321
322/**
323 * free_dca_provider - release the dca provider data struct
324 * @ops - pointer to struct of dca operation function pointers
325 * @priv_size - size of extra mem to be added for provider's needs
326 */
327void free_dca_provider(struct dca_provider *dca)
328{
329 kfree(objp: dca);
330}
331EXPORT_SYMBOL_GPL(free_dca_provider);
332
333/**
334 * register_dca_provider - register a dca provider
335 * @dca - struct created by alloc_dca_provider()
336 * @dev - device providing dca services
337 */
338int register_dca_provider(struct dca_provider *dca, struct device *dev)
339{
340 int err;
341 unsigned long flags;
342 struct dca_domain *domain, *newdomain = NULL;
343
344 raw_spin_lock_irqsave(&dca_lock, flags);
345 if (dca_providers_blocked) {
346 raw_spin_unlock_irqrestore(&dca_lock, flags);
347 return -ENODEV;
348 }
349 raw_spin_unlock_irqrestore(&dca_lock, flags);
350
351 err = dca_sysfs_add_provider(dca, dev);
352 if (err)
353 return err;
354
355 raw_spin_lock_irqsave(&dca_lock, flags);
356 domain = dca_get_domain(dev);
357 if (!domain) {
358 struct pci_bus *rc;
359
360 if (dca_providers_blocked) {
361 raw_spin_unlock_irqrestore(&dca_lock, flags);
362 dca_sysfs_remove_provider(dca);
363 unregister_dca_providers();
364 return -ENODEV;
365 }
366
367 raw_spin_unlock_irqrestore(&dca_lock, flags);
368 rc = dca_pci_rc_from_dev(dev);
369 newdomain = dca_allocate_domain(rc);
370 if (!newdomain)
371 return -ENODEV;
372 raw_spin_lock_irqsave(&dca_lock, flags);
373 /* Recheck, we might have raced after dropping the lock */
374 domain = dca_get_domain(dev);
375 if (!domain) {
376 domain = newdomain;
377 newdomain = NULL;
378 list_add(new: &domain->node, head: &dca_domains);
379 }
380 }
381 list_add(new: &dca->node, head: &domain->dca_providers);
382 raw_spin_unlock_irqrestore(&dca_lock, flags);
383
384 blocking_notifier_call_chain(nh: &dca_provider_chain,
385 DCA_PROVIDER_ADD, NULL);
386 kfree(objp: newdomain);
387 return 0;
388}
389EXPORT_SYMBOL_GPL(register_dca_provider);
390
391/**
392 * unregister_dca_provider - remove a dca provider
393 * @dca - struct created by alloc_dca_provider()
394 */
395void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
396{
397 unsigned long flags;
398 struct pci_bus *pci_rc;
399 struct dca_domain *domain;
400
401 blocking_notifier_call_chain(nh: &dca_provider_chain,
402 DCA_PROVIDER_REMOVE, NULL);
403
404 raw_spin_lock_irqsave(&dca_lock, flags);
405
406 if (list_empty(head: &dca_domains)) {
407 raw_spin_unlock_irqrestore(&dca_lock, flags);
408 return;
409 }
410
411 list_del(entry: &dca->node);
412
413 pci_rc = dca_pci_rc_from_dev(dev);
414 domain = dca_find_domain(rc: pci_rc);
415 if (list_empty(head: &domain->dca_providers))
416 dca_free_domain(domain);
417
418 raw_spin_unlock_irqrestore(&dca_lock, flags);
419
420 dca_sysfs_remove_provider(dca);
421}
422EXPORT_SYMBOL_GPL(unregister_dca_provider);
423
424/**
425 * dca_register_notify - register a client's notifier callback
426 */
427void dca_register_notify(struct notifier_block *nb)
428{
429 blocking_notifier_chain_register(nh: &dca_provider_chain, nb);
430}
431EXPORT_SYMBOL_GPL(dca_register_notify);
432
433/**
434 * dca_unregister_notify - remove a client's notifier callback
435 */
436void dca_unregister_notify(struct notifier_block *nb)
437{
438 blocking_notifier_chain_unregister(nh: &dca_provider_chain, nb);
439}
440EXPORT_SYMBOL_GPL(dca_unregister_notify);
441
442static int __init dca_init(void)
443{
444 pr_info("dca service started, version %s\n", DCA_VERSION);
445 return dca_sysfs_init();
446}
447
448static void __exit dca_exit(void)
449{
450 dca_sysfs_exit();
451}
452
453arch_initcall(dca_init);
454module_exit(dca_exit);
455
456

source code of linux/drivers/dca/dca-core.c