| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082 |
- // SPDX-License-Identifier: GPL-2.0-only
- /* Copyright(c) 2023 Intel Corporation. All rights reserved. */
- #include <linux/acpi.h>
- #include <linux/xarray.h>
- #include <linux/fw_table.h>
- #include <linux/node.h>
- #include <linux/overflow.h>
- #include "cxlpci.h"
- #include "cxlmem.h"
- #include "core.h"
- #include "cxl.h"
- struct dsmas_entry {
- struct range dpa_range;
- u8 handle;
- struct access_coordinate coord[ACCESS_COORDINATE_MAX];
- struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
- int entries;
- int qos_class;
- };
- static u32 cdat_normalize(u16 entry, u64 base, u8 type)
- {
- u32 value;
- /*
- * Check for invalid and overflow values
- */
- if (entry == 0xffff || !entry)
- return 0;
- else if (base > (UINT_MAX / (entry)))
- return 0;
- /*
- * CDAT fields follow the format of HMAT fields. See table 5 Device
- * Scoped Latency and Bandwidth Information Structure in Coherent Device
- * Attribute Table (CDAT) Specification v1.01.
- */
- value = entry * base;
- switch (type) {
- case ACPI_HMAT_ACCESS_LATENCY:
- case ACPI_HMAT_READ_LATENCY:
- case ACPI_HMAT_WRITE_LATENCY:
- value = DIV_ROUND_UP(value, 1000);
- break;
- default:
- break;
- }
- return value;
- }
- static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
- {
- struct acpi_cdat_header *hdr = &header->cdat;
- struct acpi_cdat_dsmas *dsmas;
- int size = sizeof(*hdr) + sizeof(*dsmas);
- struct xarray *dsmas_xa = arg;
- struct dsmas_entry *dent;
- u16 len;
- int rc;
- len = le16_to_cpu((__force __le16)hdr->length);
- if (len != size || (unsigned long)hdr + len > end) {
- pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
- return -EINVAL;
- }
- /* Skip common header */
- dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
- dent = kzalloc(sizeof(*dent), GFP_KERNEL);
- if (!dent)
- return -ENOMEM;
- dent->handle = dsmas->dsmad_handle;
- dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
- dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
- le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
- rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
- if (rc) {
- kfree(dent);
- return rc;
- }
- return 0;
- }
- static void __cxl_access_coordinate_set(struct access_coordinate *coord,
- int access, unsigned int val)
- {
- switch (access) {
- case ACPI_HMAT_ACCESS_LATENCY:
- coord->read_latency = val;
- coord->write_latency = val;
- break;
- case ACPI_HMAT_READ_LATENCY:
- coord->read_latency = val;
- break;
- case ACPI_HMAT_WRITE_LATENCY:
- coord->write_latency = val;
- break;
- case ACPI_HMAT_ACCESS_BANDWIDTH:
- coord->read_bandwidth = val;
- coord->write_bandwidth = val;
- break;
- case ACPI_HMAT_READ_BANDWIDTH:
- coord->read_bandwidth = val;
- break;
- case ACPI_HMAT_WRITE_BANDWIDTH:
- coord->write_bandwidth = val;
- break;
- }
- }
- static void cxl_access_coordinate_set(struct access_coordinate *coord,
- int access, unsigned int val)
- {
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
- __cxl_access_coordinate_set(&coord[i], access, val);
- }
- static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
- {
- struct acpi_cdat_header *hdr = &header->cdat;
- struct acpi_cdat_dslbis *dslbis;
- int size = sizeof(*hdr) + sizeof(*dslbis);
- struct xarray *dsmas_xa = arg;
- struct dsmas_entry *dent;
- __le64 le_base;
- __le16 le_val;
- u64 val;
- u16 len;
- len = le16_to_cpu((__force __le16)hdr->length);
- if (len != size || (unsigned long)hdr + len > end) {
- pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
- return -EINVAL;
- }
- /* Skip common header */
- dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
- /* Skip unrecognized data type */
- if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
- return 0;
- /* Not a memory type, skip */
- if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
- return 0;
- dent = xa_load(dsmas_xa, dslbis->handle);
- if (!dent) {
- pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
- return 0;
- }
- le_base = (__force __le64)dslbis->entry_base_unit;
- le_val = (__force __le16)dslbis->entry[0];
- val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
- dslbis->data_type);
- cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val);
- return 0;
- }
- static int cdat_table_parse_output(int rc)
- {
- if (rc < 0)
- return rc;
- if (rc == 0)
- return -ENOENT;
- return 0;
- }
- static int cxl_cdat_endpoint_process(struct cxl_port *port,
- struct xarray *dsmas_xa)
- {
- int rc;
- rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
- dsmas_xa, port->cdat.table, port->cdat.length);
- rc = cdat_table_parse_output(rc);
- if (rc)
- return rc;
- rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
- dsmas_xa, port->cdat.table, port->cdat.length);
- return cdat_table_parse_output(rc);
- }
- static int cxl_port_perf_data_calculate(struct cxl_port *port,
- struct xarray *dsmas_xa)
- {
- struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
- struct dsmas_entry *dent;
- int valid_entries = 0;
- unsigned long index;
- int rc;
- rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
- if (rc) {
- dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
- return rc;
- }
- struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
- if (!cxl_root)
- return -ENODEV;
- if (!cxl_root->ops || !cxl_root->ops->qos_class)
- return -EOPNOTSUPP;
- xa_for_each(dsmas_xa, index, dent) {
- int qos_class;
- cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c);
- dent->entries = 1;
- rc = cxl_root->ops->qos_class(cxl_root,
- &dent->coord[ACCESS_COORDINATE_CPU],
- 1, &qos_class);
- if (rc != 1)
- continue;
- valid_entries++;
- dent->qos_class = qos_class;
- }
- if (!valid_entries)
- return -ENOENT;
- return 0;
- }
- static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
- struct cxl_dpa_perf *dpa_perf)
- {
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- dpa_perf->coord[i] = dent->coord[i];
- dpa_perf->cdat_coord[i] = dent->cdat_coord[i];
- }
- dpa_perf->dpa_range = dent->dpa_range;
- dpa_perf->qos_class = dent->qos_class;
- dev_dbg(dev,
- "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
- dent->dpa_range.start, dpa_perf->qos_class,
- dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
- dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
- dent->coord[ACCESS_COORDINATE_CPU].read_latency,
- dent->coord[ACCESS_COORDINATE_CPU].write_latency);
- }
- static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
- struct xarray *dsmas_xa)
- {
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct device *dev = cxlds->dev;
- struct range pmem_range = {
- .start = cxlds->pmem_res.start,
- .end = cxlds->pmem_res.end,
- };
- struct range ram_range = {
- .start = cxlds->ram_res.start,
- .end = cxlds->ram_res.end,
- };
- struct dsmas_entry *dent;
- unsigned long index;
- xa_for_each(dsmas_xa, index, dent) {
- if (resource_size(&cxlds->ram_res) &&
- range_contains(&ram_range, &dent->dpa_range))
- update_perf_entry(dev, dent, &mds->ram_perf);
- else if (resource_size(&cxlds->pmem_res) &&
- range_contains(&pmem_range, &dent->dpa_range))
- update_perf_entry(dev, dent, &mds->pmem_perf);
- else
- dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
- dent->dpa_range.start);
- }
- }
- static int match_cxlrd_qos_class(struct device *dev, void *data)
- {
- int dev_qos_class = *(int *)data;
- struct cxl_root_decoder *cxlrd;
- if (!is_root_decoder(dev))
- return 0;
- cxlrd = to_cxl_root_decoder(dev);
- if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
- return 0;
- if (cxlrd->qos_class == dev_qos_class)
- return 1;
- return 0;
- }
- static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
- {
- *dpa_perf = (struct cxl_dpa_perf) {
- .qos_class = CXL_QOS_CLASS_INVALID,
- };
- }
- static bool cxl_qos_match(struct cxl_port *root_port,
- struct cxl_dpa_perf *dpa_perf)
- {
- if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
- return false;
- if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
- match_cxlrd_qos_class))
- return false;
- return true;
- }
- static int match_cxlrd_hb(struct device *dev, void *data)
- {
- struct device *host_bridge = data;
- struct cxl_switch_decoder *cxlsd;
- struct cxl_root_decoder *cxlrd;
- if (!is_root_decoder(dev))
- return 0;
- cxlrd = to_cxl_root_decoder(dev);
- cxlsd = &cxlrd->cxlsd;
- guard(rwsem_read)(&cxl_region_rwsem);
- for (int i = 0; i < cxlsd->nr_targets; i++) {
- if (host_bridge == cxlsd->target[i]->dport_dev)
- return 1;
- }
- return 0;
- }
- static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
- {
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_port *root_port;
- int rc;
- struct cxl_root *cxl_root __free(put_cxl_root) =
- find_cxl_root(cxlmd->endpoint);
- if (!cxl_root)
- return -ENODEV;
- root_port = &cxl_root->port;
- /* Check that the QTG IDs are all sane between end device and root decoders */
- if (!cxl_qos_match(root_port, &mds->ram_perf))
- reset_dpa_perf(&mds->ram_perf);
- if (!cxl_qos_match(root_port, &mds->pmem_perf))
- reset_dpa_perf(&mds->pmem_perf);
- /* Check to make sure that the device's host bridge is under a root decoder */
- rc = device_for_each_child(&root_port->dev,
- cxlmd->endpoint->host_bridge, match_cxlrd_hb);
- if (!rc) {
- reset_dpa_perf(&mds->ram_perf);
- reset_dpa_perf(&mds->pmem_perf);
- }
- return rc;
- }
- static void discard_dsmas(struct xarray *xa)
- {
- unsigned long index;
- void *ent;
- xa_for_each(xa, index, ent) {
- xa_erase(xa, index);
- kfree(ent);
- }
- xa_destroy(xa);
- }
- DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
- void cxl_endpoint_parse_cdat(struct cxl_port *port)
- {
- struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct xarray __dsmas_xa;
- struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
- int rc;
- xa_init(&__dsmas_xa);
- if (!port->cdat.table)
- return;
- rc = cxl_cdat_endpoint_process(port, dsmas_xa);
- if (rc < 0) {
- dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
- return;
- }
- rc = cxl_port_perf_data_calculate(port, dsmas_xa);
- if (rc) {
- dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
- return;
- }
- cxl_memdev_set_qos_class(cxlds, dsmas_xa);
- cxl_qos_class_verify(cxlmd);
- cxl_memdev_update_perf(cxlmd);
- }
- EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
- static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
- {
- struct acpi_cdat_sslbis_table {
- struct acpi_cdat_header header;
- struct acpi_cdat_sslbis sslbis_header;
- struct acpi_cdat_sslbe entries[];
- } *tbl = (struct acpi_cdat_sslbis_table *)header;
- int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
- struct acpi_cdat_sslbis *sslbis;
- struct cxl_port *port = arg;
- struct device *dev = &port->dev;
- int remain, entries, i;
- u16 len;
- len = le16_to_cpu((__force __le16)header->cdat.length);
- remain = len - size;
- if (!remain || remain % sizeof(tbl->entries[0]) ||
- (unsigned long)header + len > end) {
- dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
- return -EINVAL;
- }
- sslbis = &tbl->sslbis_header;
- /* Unrecognized data type, we can skip */
- if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
- return 0;
- entries = remain / sizeof(tbl->entries[0]);
- if (struct_size(tbl, entries, entries) != len)
- return -EINVAL;
- for (i = 0; i < entries; i++) {
- u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
- u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
- __le64 le_base;
- __le16 le_val;
- struct cxl_dport *dport;
- unsigned long index;
- u16 dsp_id;
- u64 val;
- switch (x) {
- case ACPI_CDAT_SSLBIS_US_PORT:
- dsp_id = y;
- break;
- case ACPI_CDAT_SSLBIS_ANY_PORT:
- switch (y) {
- case ACPI_CDAT_SSLBIS_US_PORT:
- dsp_id = x;
- break;
- case ACPI_CDAT_SSLBIS_ANY_PORT:
- dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
- break;
- default:
- dsp_id = y;
- break;
- }
- break;
- default:
- dsp_id = x;
- break;
- }
- le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
- le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
- val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
- sslbis->data_type);
- xa_for_each(&port->dports, index, dport) {
- if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id) {
- cxl_access_coordinate_set(dport->coord,
- sslbis->data_type,
- val);
- }
- }
- }
- return 0;
- }
- void cxl_switch_parse_cdat(struct cxl_port *port)
- {
- int rc;
- if (!port->cdat.table)
- return;
- rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
- port, port->cdat.table, port->cdat.length);
- rc = cdat_table_parse_output(rc);
- if (rc)
- dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
- }
- EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
- static void __cxl_coordinates_combine(struct access_coordinate *out,
- struct access_coordinate *c1,
- struct access_coordinate *c2)
- {
- if (c1->write_bandwidth && c2->write_bandwidth)
- out->write_bandwidth = min(c1->write_bandwidth,
- c2->write_bandwidth);
- out->write_latency = c1->write_latency + c2->write_latency;
- if (c1->read_bandwidth && c2->read_bandwidth)
- out->read_bandwidth = min(c1->read_bandwidth,
- c2->read_bandwidth);
- out->read_latency = c1->read_latency + c2->read_latency;
- }
- /**
- * cxl_coordinates_combine - Combine the two input coordinates
- *
- * @out: Output coordinate of c1 and c2 combined
- * @c1: input coordinates
- * @c2: input coordinates
- */
- void cxl_coordinates_combine(struct access_coordinate *out,
- struct access_coordinate *c1,
- struct access_coordinate *c2)
- {
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
- __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
- }
- MODULE_IMPORT_NS(CXL);
- static void cxl_bandwidth_add(struct access_coordinate *coord,
- struct access_coordinate *c1,
- struct access_coordinate *c2)
- {
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- coord[i].read_bandwidth = c1[i].read_bandwidth +
- c2[i].read_bandwidth;
- coord[i].write_bandwidth = c1[i].write_bandwidth +
- c2[i].write_bandwidth;
- }
- }
- static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
- struct resource *dpa_res)
- {
- struct range dpa = {
- .start = dpa_res->start,
- .end = dpa_res->end,
- };
- return range_contains(&perf->dpa_range, &dpa);
- }
- static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode)
- {
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
- struct cxl_dpa_perf *perf;
- switch (mode) {
- case CXL_DECODER_RAM:
- perf = &mds->ram_perf;
- break;
- case CXL_DECODER_PMEM:
- perf = &mds->pmem_perf;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
- if (!dpa_perf_contains(perf, cxled->dpa_res))
- return ERR_PTR(-EINVAL);
- return perf;
- }
- /*
- * Transient context for containing the current calculation of bandwidth when
- * doing walking the port hierarchy to deal with shared upstream link.
- */
- struct cxl_perf_ctx {
- struct access_coordinate coord[ACCESS_COORDINATE_MAX];
- struct cxl_port *port;
- };
- /**
- * cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray
- * @cxlr: CXL region for the bandwidth calculation
- * @cxled: endpoint decoder to start on
- * @usp_xa: (output) the xarray that collects all the bandwidth coordinates
- * indexed by the upstream device with data of 'struct cxl_perf_ctx'.
- * @gp_is_root: (output) bool of whether the grandparent is cxl root.
- *
- * Return: 0 for success or -errno
- *
- * Collects aggregated endpoint bandwidth and store the bandwidth in
- * an xarray indexed by the upstream device of the switch or the RP
- * device. Each endpoint consists the minimum of the bandwidth from DSLBIS
- * from the endpoint CDAT, the endpoint upstream link bandwidth, and the
- * bandwidth from the SSLBIS of the switch CDAT for the switch upstream port to
- * the downstream port that's associated with the endpoint. If the
- * device is directly connected to a RP, then no SSLBIS is involved.
- */
- static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled,
- struct xarray *usp_xa,
- bool *gp_is_root)
- {
- struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent);
- struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent);
- struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
- struct access_coordinate pci_coord[ACCESS_COORDINATE_MAX];
- struct access_coordinate sw_coord[ACCESS_COORDINATE_MAX];
- struct access_coordinate ep_coord[ACCESS_COORDINATE_MAX];
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- struct cxl_perf_ctx *perf_ctx;
- struct cxl_dpa_perf *perf;
- unsigned long index;
- void *ptr;
- int rc;
- if (!dev_is_pci(cxlds->dev))
- return -ENODEV;
- if (cxlds->rcd)
- return -ENODEV;
- perf = cxled_get_dpa_perf(cxled, cxlr->mode);
- if (IS_ERR(perf))
- return PTR_ERR(perf);
- gp_port = to_cxl_port(parent_port->dev.parent);
- *gp_is_root = is_cxl_root(gp_port);
- /*
- * If the grandparent is cxl root, then index is the root port,
- * otherwise it's the parent switch upstream device.
- */
- if (*gp_is_root)
- index = (unsigned long)endpoint->parent_dport->dport_dev;
- else
- index = (unsigned long)parent_port->uport_dev;
- perf_ctx = xa_load(usp_xa, index);
- if (!perf_ctx) {
- struct cxl_perf_ctx *c __free(kfree) =
- kzalloc(sizeof(*perf_ctx), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
- ptr = xa_store(usp_xa, index, c, GFP_KERNEL);
- if (xa_is_err(ptr))
- return xa_err(ptr);
- perf_ctx = no_free_ptr(c);
- perf_ctx->port = parent_port;
- }
- /* Direct upstream link from EP bandwidth */
- rc = cxl_pci_get_bandwidth(pdev, pci_coord);
- if (rc < 0)
- return rc;
- /*
- * Min of upstream link bandwidth and Endpoint CDAT bandwidth from
- * DSLBIS.
- */
- cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord);
- /*
- * If grandparent port is root, then there's no switch involved and
- * the endpoint is connected to a root port.
- */
- if (!*gp_is_root) {
- /*
- * Retrieve the switch SSLBIS for switch downstream port
- * associated with the endpoint bandwidth.
- */
- rc = cxl_port_get_switch_dport_bandwidth(endpoint, sw_coord);
- if (rc)
- return rc;
- /*
- * Min of the earlier coordinates with the switch SSLBIS
- * bandwidth
- */
- cxl_coordinates_combine(ep_coord, ep_coord, sw_coord);
- }
- /*
- * Aggregate the computed bandwidth with the current aggregated bandwidth
- * of the endpoints with the same switch upstream device or RP.
- */
- cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord);
- return 0;
- }
- static void free_perf_xa(struct xarray *xa)
- {
- struct cxl_perf_ctx *ctx;
- unsigned long index;
- if (!xa)
- return;
- xa_for_each(xa, index, ctx)
- kfree(ctx);
- xa_destroy(xa);
- kfree(xa);
- }
- DEFINE_FREE(free_perf_xa, struct xarray *, if (_T) free_perf_xa(_T))
- /**
- * cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray
- * @cxlr: The region being operated on
- * @input_xa: xarray indexed by upstream device of a switch with data of 'struct
- * cxl_perf_ctx'
- * @gp_is_root: (output) bool of whether the grandparent is cxl root.
- *
- * Return: a xarray of resulting cxl_perf_ctx per parent switch or root port
- * or ERR_PTR(-errno)
- *
- * Iterate through the xarray. Take the minimum of the downstream calculated
- * bandwidth, the upstream link bandwidth, and the SSLBIS of the upstream
- * switch if exists. Sum the resulting bandwidth under the switch upstream
- * device or a RP device. The function can be iterated over multiple switches
- * if the switches are present.
- */
- static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr,
- struct xarray *input_xa,
- bool *gp_is_root)
- {
- struct xarray *res_xa __free(free_perf_xa) =
- kzalloc(sizeof(*res_xa), GFP_KERNEL);
- struct access_coordinate coords[ACCESS_COORDINATE_MAX];
- struct cxl_perf_ctx *ctx, *us_ctx;
- unsigned long index, us_index;
- int dev_count = 0;
- int gp_count = 0;
- void *ptr;
- int rc;
- if (!res_xa)
- return ERR_PTR(-ENOMEM);
- xa_init(res_xa);
- xa_for_each(input_xa, index, ctx) {
- struct device *dev = (struct device *)index;
- struct cxl_port *port = ctx->port;
- struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
- struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
- struct cxl_dport *dport = port->parent_dport;
- bool is_root = false;
- dev_count++;
- if (is_cxl_root(gp_port)) {
- is_root = true;
- gp_count++;
- }
- /*
- * If the grandparent is cxl root, then index is the root port,
- * otherwise it's the parent switch upstream device.
- */
- if (is_root)
- us_index = (unsigned long)port->parent_dport->dport_dev;
- else
- us_index = (unsigned long)parent_port->uport_dev;
- us_ctx = xa_load(res_xa, us_index);
- if (!us_ctx) {
- struct cxl_perf_ctx *n __free(kfree) =
- kzalloc(sizeof(*n), GFP_KERNEL);
- if (!n)
- return ERR_PTR(-ENOMEM);
- ptr = xa_store(res_xa, us_index, n, GFP_KERNEL);
- if (xa_is_err(ptr))
- return ERR_PTR(xa_err(ptr));
- us_ctx = no_free_ptr(n);
- us_ctx->port = parent_port;
- }
- /*
- * If the device isn't an upstream PCIe port, there's something
- * wrong with the topology.
- */
- if (!dev_is_pci(dev))
- return ERR_PTR(-EINVAL);
- /* Retrieve the upstream link bandwidth */
- rc = cxl_pci_get_bandwidth(to_pci_dev(dev), coords);
- if (rc)
- return ERR_PTR(-ENXIO);
- /*
- * Take the min of downstream bandwidth and the upstream link
- * bandwidth.
- */
- cxl_coordinates_combine(coords, coords, ctx->coord);
- /*
- * Take the min of the calculated bandwdith and the upstream
- * switch SSLBIS bandwidth if there's a parent switch
- */
- if (!is_root)
- cxl_coordinates_combine(coords, coords, dport->coord);
- /*
- * Aggregate the calculated bandwidth common to an upstream
- * switch.
- */
- cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords);
- }
- /* Asymmetric topology detected. */
- if (gp_count) {
- if (gp_count != dev_count) {
- dev_dbg(&cxlr->dev,
- "Asymmetric hierarchy detected, bandwidth not updated\n");
- return ERR_PTR(-EOPNOTSUPP);
- }
- *gp_is_root = true;
- }
- return no_free_ptr(res_xa);
- }
- /**
- * cxl_rp_gather_bandwidth - handle the root port level bandwidth collection
- * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
- * below each root port device.
- *
- * Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno)
- */
- static struct xarray *cxl_rp_gather_bandwidth(struct xarray *xa)
- {
- struct xarray *hb_xa __free(free_perf_xa) =
- kzalloc(sizeof(*hb_xa), GFP_KERNEL);
- struct cxl_perf_ctx *ctx;
- unsigned long index;
- if (!hb_xa)
- return ERR_PTR(-ENOMEM);
- xa_init(hb_xa);
- xa_for_each(xa, index, ctx) {
- struct cxl_port *port = ctx->port;
- unsigned long hb_index = (unsigned long)port->uport_dev;
- struct cxl_perf_ctx *hb_ctx;
- void *ptr;
- hb_ctx = xa_load(hb_xa, hb_index);
- if (!hb_ctx) {
- struct cxl_perf_ctx *n __free(kfree) =
- kzalloc(sizeof(*n), GFP_KERNEL);
- if (!n)
- return ERR_PTR(-ENOMEM);
- ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL);
- if (xa_is_err(ptr))
- return ERR_PTR(xa_err(ptr));
- hb_ctx = no_free_ptr(n);
- hb_ctx->port = port;
- }
- cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord);
- }
- return no_free_ptr(hb_xa);
- }
- /**
- * cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection
- * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
- * below each host bridge.
- *
- * Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno)
- */
- static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa)
- {
- struct xarray *mw_xa __free(free_perf_xa) =
- kzalloc(sizeof(*mw_xa), GFP_KERNEL);
- struct cxl_perf_ctx *ctx;
- unsigned long index;
- if (!mw_xa)
- return ERR_PTR(-ENOMEM);
- xa_init(mw_xa);
- xa_for_each(xa, index, ctx) {
- struct cxl_port *port = ctx->port;
- struct cxl_port *parent_port;
- struct cxl_perf_ctx *mw_ctx;
- struct cxl_dport *dport;
- unsigned long mw_index;
- void *ptr;
- parent_port = to_cxl_port(port->dev.parent);
- mw_index = (unsigned long)parent_port->uport_dev;
- mw_ctx = xa_load(mw_xa, mw_index);
- if (!mw_ctx) {
- struct cxl_perf_ctx *n __free(kfree) =
- kzalloc(sizeof(*n), GFP_KERNEL);
- if (!n)
- return ERR_PTR(-ENOMEM);
- ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL);
- if (xa_is_err(ptr))
- return ERR_PTR(xa_err(ptr));
- mw_ctx = no_free_ptr(n);
- }
- dport = port->parent_dport;
- cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord);
- cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord);
- }
- return no_free_ptr(mw_xa);
- }
- /**
- * cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
- * @cxlr: The region being operated on
- * @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance
- */
- static void cxl_region_update_bandwidth(struct cxl_region *cxlr,
- struct xarray *input_xa)
- {
- struct access_coordinate coord[ACCESS_COORDINATE_MAX];
- struct cxl_perf_ctx *ctx;
- unsigned long index;
- memset(coord, 0, sizeof(coord));
- xa_for_each(input_xa, index, ctx)
- cxl_bandwidth_add(coord, coord, ctx->coord);
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth;
- cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth;
- }
- }
- /**
- * cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for
- * the region
- * @cxlr: the cxl region to recalculate
- *
- * The function walks the topology from bottom up and calculates the bandwidth. It
- * starts at the endpoints, processes at the switches if any, processes at the rootport
- * level, at the host bridge level, and finally aggregates at the region.
- */
- void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
- {
- struct xarray *working_xa;
- int root_count = 0;
- bool is_root;
- int rc;
- lockdep_assert_held(&cxl_dpa_rwsem);
- struct xarray *usp_xa __free(free_perf_xa) =
- kzalloc(sizeof(*usp_xa), GFP_KERNEL);
- if (!usp_xa)
- return;
- xa_init(usp_xa);
- /* Collect bandwidth data from all the endpoints. */
- for (int i = 0; i < cxlr->params.nr_targets; i++) {
- struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i];
- is_root = false;
- rc = cxl_endpoint_gather_bandwidth(cxlr, cxled, usp_xa, &is_root);
- if (rc)
- return;
- root_count += is_root;
- }
- /* Detect asymmetric hierarchy with some direct attached endpoints. */
- if (root_count && root_count != cxlr->params.nr_targets) {
- dev_dbg(&cxlr->dev,
- "Asymmetric hierarchy detected, bandwidth not updated\n");
- return;
- }
- /*
- * Walk up one or more switches to deal with the bandwidth of the
- * switches if they exist. Endpoints directly attached to RPs skip
- * over this part.
- */
- if (!root_count) {
- do {
- working_xa = cxl_switch_gather_bandwidth(cxlr, usp_xa,
- &is_root);
- if (IS_ERR(working_xa))
- return;
- free_perf_xa(usp_xa);
- usp_xa = working_xa;
- } while (!is_root);
- }
- /* Handle the bandwidth at the root port of the hierarchy */
- working_xa = cxl_rp_gather_bandwidth(usp_xa);
- if (IS_ERR(working_xa))
- return;
- free_perf_xa(usp_xa);
- usp_xa = working_xa;
- /* Handle the bandwidth at the host bridge of the hierarchy */
- working_xa = cxl_hb_gather_bandwidth(usp_xa);
- if (IS_ERR(working_xa))
- return;
- free_perf_xa(usp_xa);
- usp_xa = working_xa;
- /*
- * Aggregate all the bandwidth collected per CFMWS (ACPI0017) and
- * update the region bandwidth with the final calculated values.
- */
- cxl_region_update_bandwidth(cxlr, usp_xa);
- }
- void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
- {
- struct cxl_dpa_perf *perf;
- lockdep_assert_held(&cxl_dpa_rwsem);
- perf = cxled_get_dpa_perf(cxled, cxlr->mode);
- if (IS_ERR(perf))
- return;
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- /* Get total bandwidth and the worst latency for the cxl region */
- cxlr->coord[i].read_latency = max_t(unsigned int,
- cxlr->coord[i].read_latency,
- perf->coord[i].read_latency);
- cxlr->coord[i].write_latency = max_t(unsigned int,
- cxlr->coord[i].write_latency,
- perf->coord[i].write_latency);
- cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
- cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
- }
- }
- int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access)
- {
- return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
- }
- bool cxl_need_node_perf_attrs_update(int nid)
- {
- return !acpi_node_backed_by_real_pxm(nid);
- }
|