Lines Matching refs:drvdata

37  * @drvdata		- The ETR drvdaga this buffer has been allocated for.
45 struct tmc_drvdata *drvdata;
602 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
607 struct device *real_dev = drvdata->csdev->dev.parent;
627 flat_buf->dev = &drvdata->csdev->dev;
701 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
706 struct device *dev = &drvdata->csdev->dev;
784 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
786 struct coresight_device *etr = drvdata->csdev;
818 struct tmc_drvdata *drvdata,
829 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
841 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
844 buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
845 buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata);
857 * @drvdata : ETR device details.
863 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
870 struct device *dev = &drvdata->csdev->dev;
880 if (drvdata->etr_mode != ETR_MODE_AUTO)
881 rc = tmc_etr_mode_alloc_buf(drvdata->etr_mode, drvdata,
897 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
900 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
903 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
953 * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
958 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
960 struct etr_buf *etr_buf = drvdata->etr_buf;
964 rrp = tmc_read_rrp(drvdata);
965 rwp = tmc_read_rwp(drvdata);
966 status = readl_relaxed(drvdata->base + TMC_STS);
973 dev_dbg(&drvdata->csdev->dev,
987 static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
990 struct etr_buf *etr_buf = drvdata->etr_buf;
993 CS_UNLOCK(drvdata->base);
996 rc = tmc_wait_for_tmcready(drvdata);
998 dev_err(&drvdata->csdev->dev,
1000 CS_LOCK(drvdata->base);
1004 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
1005 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
1007 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
1010 axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size);
1013 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
1021 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
1022 tmc_write_dba(drvdata, etr_buf->hwaddr);
1028 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
1029 tmc_write_rrp(drvdata, etr_buf->hwaddr);
1030 tmc_write_rwp(drvdata, etr_buf->hwaddr);
1031 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
1032 writel_relaxed(sts, drvdata->base + TMC_STS);
1038 drvdata->base + TMC_FFCR);
1039 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
1040 tmc_enable_hw(drvdata);
1042 CS_LOCK(drvdata->base);
1046 static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
1056 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
1059 if (WARN_ON(drvdata->etr_buf))
1062 rc = coresight_claim_device(drvdata->csdev);
1064 drvdata->etr_buf = etr_buf;
1065 rc = __tmc_etr_enable_hw(drvdata);
1067 drvdata->etr_buf = NULL;
1068 coresight_disclaim_device(drvdata->csdev);
1082 * We are protected here by drvdata->reading != 0, which ensures the
1085 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
1090 struct etr_buf *etr_buf = drvdata->sysfs_buf;
1105 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
1107 return tmc_alloc_etr_buf(drvdata, drvdata->size,
1118 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
1120 struct etr_buf *etr_buf = drvdata->etr_buf;
1122 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
1123 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
1124 drvdata->sysfs_buf = NULL;
1126 tmc_sync_etr_buf(drvdata);
1137 static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1139 CS_UNLOCK(drvdata->base);
1141 tmc_flush_and_stop(drvdata);
1146 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
1147 tmc_etr_sync_sysfs_buf(drvdata);
1149 tmc_disable_hw(drvdata);
1151 CS_LOCK(drvdata->base);
1155 void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1157 __tmc_etr_disable_hw(drvdata);
1158 coresight_disclaim_device(drvdata->csdev);
1160 drvdata->etr_buf = NULL;
1167 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1178 spin_lock_irqsave(&drvdata->spinlock, flags);
1179 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1180 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
1181 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1184 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
1189 spin_lock_irqsave(&drvdata->spinlock, flags);
1192 if (drvdata->reading || coresight_get_mode(csdev) == CS_MODE_PERF) {
1201 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1204 drvdata->sysfs_buf = new_buf;
1208 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1213 return ret ? ERR_PTR(ret) : drvdata->sysfs_buf;
1220 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1226 spin_lock_irqsave(&drvdata->spinlock, flags);
1238 ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
1245 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1281 alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1293 if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
1294 etr_buf = tmc_alloc_etr_buf(drvdata, ((ssize_t)nr_pages << PAGE_SHIFT),
1304 size = drvdata->size;
1306 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
1319 get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata,
1346 mutex_lock(&drvdata->idr_mutex);
1347 etr_buf = idr_find(&drvdata->idr, pid);
1350 mutex_unlock(&drvdata->idr_mutex);
1355 mutex_unlock(&drvdata->idr_mutex);
1357 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1362 mutex_lock(&drvdata->idr_mutex);
1363 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
1364 mutex_unlock(&drvdata->idr_mutex);
1383 get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
1391 return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1395 get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1399 return get_perf_etr_buf_per_thread(drvdata, event, nr_pages,
1402 return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages,
1407 tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1420 etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1432 etr_perf->drvdata = drvdata;
1444 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1446 etr_perf = tmc_etr_setup_perf_buf(drvdata, event,
1464 struct tmc_drvdata *drvdata = etr_perf->drvdata;
1470 mutex_lock(&drvdata->idr_mutex);
1473 mutex_unlock(&drvdata->idr_mutex);
1478 buf = idr_remove(&drvdata->idr, etr_perf->pid);
1479 mutex_unlock(&drvdata->idr_mutex);
1560 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1564 spin_lock_irqsave(&drvdata->spinlock, flags);
1568 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1572 if (WARN_ON(drvdata->perf_buf != etr_buf)) {
1574 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1578 CS_UNLOCK(drvdata->base);
1580 tmc_flush_and_stop(drvdata);
1581 tmc_sync_etr_buf(drvdata);
1583 CS_LOCK(drvdata->base);
1584 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1598 u32 mask = tmc_get_memwidth_mask(drvdata);
1649 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1653 spin_lock_irqsave(&drvdata->spinlock, flags);
1669 if (drvdata->pid != -1 && drvdata->pid != pid) {
1678 if (drvdata->pid == pid) {
1683 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
1686 drvdata->pid = pid;
1688 drvdata->perf_buf = etr_perf->etr_buf;
1693 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1713 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1715 spin_lock_irqsave(&drvdata->spinlock, flags);
1717 if (drvdata->reading) {
1718 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1724 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1730 tmc_etr_disable_hw(drvdata);
1732 drvdata->pid = -1;
1735 drvdata->perf_buf = NULL;
1737 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1755 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
1761 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1764 spin_lock_irqsave(&drvdata->spinlock, flags);
1765 if (drvdata->reading) {
1773 * If drvdata::sysfs_data is NULL the trace data has been read already.
1775 if (!drvdata->sysfs_buf) {
1781 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
1782 __tmc_etr_disable_hw(drvdata);
1784 drvdata->reading = true;
1786 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1791 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1797 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1800 spin_lock_irqsave(&drvdata->spinlock, flags);
1803 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
1806 * buffer. Since the tracer is still enabled drvdata::buf can't
1809 __tmc_etr_enable_hw(drvdata);
1815 sysfs_buf = drvdata->sysfs_buf;
1816 drvdata->sysfs_buf = NULL;
1819 drvdata->reading = false;
1820 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1859 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
1861 return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]);
1868 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
1873 drvdata->etr_mode = ETR_MODE_FLAT;
1875 drvdata->etr_mode = ETR_MODE_ETR_SG;
1877 drvdata->etr_mode = ETR_MODE_CATU;
1879 drvdata->etr_mode = ETR_MODE_AUTO;