• Home
  • Raw
  • Download

Lines Matching refs:idxd

78 		struct idxd_device *idxd = confdev_to_idxd(dev);  in idxd_config_bus_match()  local
80 if (idxd->state != IDXD_DEV_CONF_READY) in idxd_config_bus_match()
85 struct idxd_device *idxd = wq->idxd; in idxd_config_bus_match() local
87 if (idxd->state < IDXD_DEV_CONF_READY) in idxd_config_bus_match()
111 struct idxd_device *idxd = confdev_to_idxd(dev); in idxd_config_bus_probe() local
113 if (idxd->state != IDXD_DEV_CONF_READY) { in idxd_config_bus_probe()
122 spin_lock_irqsave(&idxd->dev_lock, flags); in idxd_config_bus_probe()
123 rc = idxd_device_config(idxd); in idxd_config_bus_probe()
124 spin_unlock_irqrestore(&idxd->dev_lock, flags); in idxd_config_bus_probe()
132 rc = idxd_device_enable(idxd); in idxd_config_bus_probe()
141 rc = idxd_register_dma_device(idxd); in idxd_config_bus_probe()
150 struct idxd_device *idxd = wq->idxd; in idxd_config_bus_probe() local
154 if (idxd->state != IDXD_DEV_ENABLED) { in idxd_config_bus_probe()
185 spin_lock_irqsave(&idxd->dev_lock, flags); in idxd_config_bus_probe()
186 rc = idxd_device_config(idxd); in idxd_config_bus_probe()
187 spin_unlock_irqrestore(&idxd->dev_lock, flags); in idxd_config_bus_probe()
242 struct idxd_device *idxd = wq->idxd; in disable_wq() local
243 struct device *dev = &idxd->pdev->dev; in disable_wq()
285 struct idxd_device *idxd = confdev_to_idxd(dev); in idxd_config_bus_remove() local
289 dev_name(&idxd->conf_dev)); in idxd_config_bus_remove()
290 for (i = 0; i < idxd->max_wqs; i++) { in idxd_config_bus_remove()
291 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_config_bus_remove()
296 dev_name(&idxd->conf_dev)); in idxd_config_bus_remove()
300 idxd_unregister_dma_device(idxd); in idxd_config_bus_remove()
301 rc = idxd_device_disable(idxd); in idxd_config_bus_remove()
302 for (i = 0; i < idxd->max_wqs; i++) { in idxd_config_bus_remove()
303 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_config_bus_remove()
350 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) in idxd_get_bus_type() argument
352 return idxd_bus_types[idxd->type]; in idxd_get_bus_type()
355 static struct device_type *idxd_get_device_type(struct idxd_device *idxd) in idxd_get_device_type() argument
357 if (idxd->type == IDXD_TYPE_DSA) in idxd_get_device_type()
409 struct idxd_device *idxd = engine->idxd; in engine_group_id_store() local
418 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in engine_group_id_store()
421 if (id > idxd->max_groups - 1 || id < -1) in engine_group_id_store()
436 engine->group = &idxd->groups[id]; in engine_group_id_store()
462 static void idxd_set_free_tokens(struct idxd_device *idxd) in idxd_set_free_tokens() argument
466 for (i = 0, tokens = 0; i < idxd->max_groups; i++) { in idxd_set_free_tokens()
467 struct idxd_group *g = &idxd->groups[i]; in idxd_set_free_tokens()
472 idxd->nr_tokens = idxd->max_tokens - tokens; in idxd_set_free_tokens()
491 struct idxd_device *idxd = group->idxd; in group_tokens_reserved_store() local
499 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_tokens_reserved_store()
502 if (idxd->state == IDXD_DEV_ENABLED) in group_tokens_reserved_store()
505 if (val > idxd->max_tokens) in group_tokens_reserved_store()
508 if (val > idxd->nr_tokens + group->tokens_reserved) in group_tokens_reserved_store()
512 idxd_set_free_tokens(idxd); in group_tokens_reserved_store()
536 struct idxd_device *idxd = group->idxd; in group_tokens_allowed_store() local
544 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_tokens_allowed_store()
547 if (idxd->state == IDXD_DEV_ENABLED) in group_tokens_allowed_store()
551 val > group->tokens_reserved + idxd->nr_tokens) in group_tokens_allowed_store()
578 struct idxd_device *idxd = group->idxd; in group_use_token_limit_store() local
586 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_use_token_limit_store()
589 if (idxd->state == IDXD_DEV_ENABLED) in group_use_token_limit_store()
592 if (idxd->token_limit == 0) in group_use_token_limit_store()
610 struct idxd_device *idxd = group->idxd; in group_engines_show() local
612 for (i = 0; i < idxd->max_engines; i++) { in group_engines_show()
613 struct idxd_engine *engine = &idxd->engines[i]; in group_engines_show()
620 idxd->id, engine->id); in group_engines_show()
639 struct idxd_device *idxd = group->idxd; in group_work_queues_show() local
641 for (i = 0; i < idxd->max_wqs; i++) { in group_work_queues_show()
642 struct idxd_wq *wq = &idxd->wqs[i]; in group_work_queues_show()
649 idxd->id, wq->id); in group_work_queues_show()
677 struct idxd_device *idxd = group->idxd; in group_traffic_class_a_store() local
685 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_traffic_class_a_store()
688 if (idxd->state == IDXD_DEV_ENABLED) in group_traffic_class_a_store()
718 struct idxd_device *idxd = group->idxd; in group_traffic_class_b_store() local
726 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_traffic_class_b_store()
729 if (idxd->state == IDXD_DEV_ENABLED) in group_traffic_class_b_store()
809 struct idxd_device *idxd = wq->idxd; in wq_group_id_store() local
818 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_group_id_store()
824 if (id > idxd->max_groups - 1 || id < -1) in wq_group_id_store()
835 group = &idxd->groups[id]; in wq_group_id_store()
862 struct idxd_device *idxd = wq->idxd; in wq_mode_store() local
864 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_mode_store()
891 static int total_claimed_wq_size(struct idxd_device *idxd) in total_claimed_wq_size() argument
896 for (i = 0; i < idxd->max_wqs; i++) { in total_claimed_wq_size()
897 struct idxd_wq *wq = &idxd->wqs[i]; in total_claimed_wq_size()
911 struct idxd_device *idxd = wq->idxd; in wq_size_store() local
918 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_size_store()
921 if (idxd->state == IDXD_DEV_ENABLED) in wq_size_store()
924 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) in wq_size_store()
948 struct idxd_device *idxd = wq->idxd; in wq_priority_store() local
955 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_priority_store()
1097 struct idxd_device *idxd = wq->idxd; in wq_max_transfer_size_store() local
1108 if (xfer_size > idxd->max_xfer_bytes) in wq_max_transfer_size_store()
1131 struct idxd_device *idxd = wq->idxd; in wq_max_batch_size_store() local
1142 if (batch_size > idxd->max_batch_size) in wq_max_batch_size_store()
1181 struct idxd_device *idxd = in version_show() local
1184 return sprintf(buf, "%#x\n", idxd->hw.version); in version_show()
1192 struct idxd_device *idxd = in max_work_queues_size_show() local
1195 return sprintf(buf, "%u\n", idxd->max_wq_size); in max_work_queues_size_show()
1202 struct idxd_device *idxd = in max_groups_show() local
1205 return sprintf(buf, "%u\n", idxd->max_groups); in max_groups_show()
1212 struct idxd_device *idxd = in max_work_queues_show() local
1215 return sprintf(buf, "%u\n", idxd->max_wqs); in max_work_queues_show()
1222 struct idxd_device *idxd = in max_engines_show() local
1225 return sprintf(buf, "%u\n", idxd->max_engines); in max_engines_show()
1232 struct idxd_device *idxd = in numa_node_show() local
1235 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); in numa_node_show()
1242 struct idxd_device *idxd = in max_batch_size_show() local
1245 return sprintf(buf, "%u\n", idxd->max_batch_size); in max_batch_size_show()
1253 struct idxd_device *idxd = in max_transfer_size_show() local
1256 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); in max_transfer_size_show()
1263 struct idxd_device *idxd = in op_cap_show() local
1268 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]); in op_cap_show()
1279 struct idxd_device *idxd = in gen_cap_show() local
1282 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); in gen_cap_show()
1289 struct idxd_device *idxd = in configurable_show() local
1293 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); in configurable_show()
1300 struct idxd_device *idxd = in clients_show() local
1305 spin_lock_irqsave(&idxd->dev_lock, flags); in clients_show()
1306 for (i = 0; i < idxd->max_wqs; i++) { in clients_show()
1307 struct idxd_wq *wq = &idxd->wqs[i]; in clients_show()
1311 spin_unlock_irqrestore(&idxd->dev_lock, flags); in clients_show()
1320 struct idxd_device *idxd = in state_show() local
1323 switch (idxd->state) { in state_show()
1340 struct idxd_device *idxd = in errors_show() local
1345 spin_lock_irqsave(&idxd->dev_lock, flags); in errors_show()
1347 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); in errors_show()
1348 spin_unlock_irqrestore(&idxd->dev_lock, flags); in errors_show()
1358 struct idxd_device *idxd = in max_tokens_show() local
1361 return sprintf(buf, "%u\n", idxd->max_tokens); in max_tokens_show()
1368 struct idxd_device *idxd = in token_limit_show() local
1371 return sprintf(buf, "%u\n", idxd->token_limit); in token_limit_show()
1378 struct idxd_device *idxd = in token_limit_store() local
1387 if (idxd->state == IDXD_DEV_ENABLED) in token_limit_store()
1390 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in token_limit_store()
1393 if (!idxd->hw.group_cap.token_limit) in token_limit_store()
1396 if (val > idxd->hw.group_cap.total_tokens) in token_limit_store()
1399 idxd->token_limit = val; in token_limit_store()
1407 struct idxd_device *idxd = in cdev_major_show() local
1410 return sprintf(buf, "%u\n", idxd->major); in cdev_major_show()
1417 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); in cmd_status_show() local
1419 return sprintf(buf, "%#x\n", idxd->cmd_status); in cmd_status_show()
1454 static int idxd_setup_engine_sysfs(struct idxd_device *idxd) in idxd_setup_engine_sysfs() argument
1456 struct device *dev = &idxd->pdev->dev; in idxd_setup_engine_sysfs()
1459 for (i = 0; i < idxd->max_engines; i++) { in idxd_setup_engine_sysfs()
1460 struct idxd_engine *engine = &idxd->engines[i]; in idxd_setup_engine_sysfs()
1462 engine->conf_dev.parent = &idxd->conf_dev; in idxd_setup_engine_sysfs()
1464 idxd->id, engine->id); in idxd_setup_engine_sysfs()
1465 engine->conf_dev.bus = idxd_get_bus_type(idxd); in idxd_setup_engine_sysfs()
1481 struct idxd_engine *engine = &idxd->engines[i]; in idxd_setup_engine_sysfs()
1488 static int idxd_setup_group_sysfs(struct idxd_device *idxd) in idxd_setup_group_sysfs() argument
1490 struct device *dev = &idxd->pdev->dev; in idxd_setup_group_sysfs()
1493 for (i = 0; i < idxd->max_groups; i++) { in idxd_setup_group_sysfs()
1494 struct idxd_group *group = &idxd->groups[i]; in idxd_setup_group_sysfs()
1496 group->conf_dev.parent = &idxd->conf_dev; in idxd_setup_group_sysfs()
1498 idxd->id, group->id); in idxd_setup_group_sysfs()
1499 group->conf_dev.bus = idxd_get_bus_type(idxd); in idxd_setup_group_sysfs()
1515 struct idxd_group *group = &idxd->groups[i]; in idxd_setup_group_sysfs()
1522 static int idxd_setup_wq_sysfs(struct idxd_device *idxd) in idxd_setup_wq_sysfs() argument
1524 struct device *dev = &idxd->pdev->dev; in idxd_setup_wq_sysfs()
1527 for (i = 0; i < idxd->max_wqs; i++) { in idxd_setup_wq_sysfs()
1528 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_setup_wq_sysfs()
1530 wq->conf_dev.parent = &idxd->conf_dev; in idxd_setup_wq_sysfs()
1531 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); in idxd_setup_wq_sysfs()
1532 wq->conf_dev.bus = idxd_get_bus_type(idxd); in idxd_setup_wq_sysfs()
1548 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_setup_wq_sysfs()
1555 static int idxd_setup_device_sysfs(struct idxd_device *idxd) in idxd_setup_device_sysfs() argument
1557 struct device *dev = &idxd->pdev->dev; in idxd_setup_device_sysfs()
1561 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); in idxd_setup_device_sysfs()
1562 idxd->conf_dev.parent = dev; in idxd_setup_device_sysfs()
1563 dev_set_name(&idxd->conf_dev, "%s", devname); in idxd_setup_device_sysfs()
1564 idxd->conf_dev.bus = idxd_get_bus_type(idxd); in idxd_setup_device_sysfs()
1565 idxd->conf_dev.groups = idxd_attribute_groups; in idxd_setup_device_sysfs()
1566 idxd->conf_dev.type = idxd_get_device_type(idxd); in idxd_setup_device_sysfs()
1568 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); in idxd_setup_device_sysfs()
1569 rc = device_register(&idxd->conf_dev); in idxd_setup_device_sysfs()
1571 put_device(&idxd->conf_dev); in idxd_setup_device_sysfs()
1578 int idxd_setup_sysfs(struct idxd_device *idxd) in idxd_setup_sysfs() argument
1580 struct device *dev = &idxd->pdev->dev; in idxd_setup_sysfs()
1583 rc = idxd_setup_device_sysfs(idxd); in idxd_setup_sysfs()
1589 rc = idxd_setup_wq_sysfs(idxd); in idxd_setup_sysfs()
1596 rc = idxd_setup_group_sysfs(idxd); in idxd_setup_sysfs()
1603 rc = idxd_setup_engine_sysfs(idxd); in idxd_setup_sysfs()
1613 void idxd_cleanup_sysfs(struct idxd_device *idxd) in idxd_cleanup_sysfs() argument
1617 for (i = 0; i < idxd->max_wqs; i++) { in idxd_cleanup_sysfs()
1618 struct idxd_wq *wq = &idxd->wqs[i]; in idxd_cleanup_sysfs()
1623 for (i = 0; i < idxd->max_engines; i++) { in idxd_cleanup_sysfs()
1624 struct idxd_engine *engine = &idxd->engines[i]; in idxd_cleanup_sysfs()
1629 for (i = 0; i < idxd->max_groups; i++) { in idxd_cleanup_sysfs()
1630 struct idxd_group *group = &idxd->groups[i]; in idxd_cleanup_sysfs()
1635 device_unregister(&idxd->conf_dev); in idxd_cleanup_sysfs()