Lines Matching refs:tx
1173 struct ice_ptp_tx *tx; in ice_ptp_tx_tstamp_work() local
1178 tx = container_of(work, struct ice_ptp_tx, work); in ice_ptp_tx_tstamp_work()
1179 if (!tx->init) in ice_ptp_tx_tstamp_work()
1182 ptp_port = container_of(tx, struct ice_ptp_port, tx); in ice_ptp_tx_tstamp_work()
1186 for_each_set_bit(idx, tx->in_use, tx->len) { in ice_ptp_tx_tstamp_work()
1188 u8 phy_idx = idx + tx->quad_offset; in ice_ptp_tx_tstamp_work()
1193 err = ice_read_phy_tstamp(hw, tx->quad, phy_idx, in ice_ptp_tx_tstamp_work()
1200 raw_tstamp == tx->tstamps[idx].cached_tstamp) in ice_ptp_tx_tstamp_work()
1206 spin_lock(&tx->lock); in ice_ptp_tx_tstamp_work()
1207 tx->tstamps[idx].cached_tstamp = raw_tstamp; in ice_ptp_tx_tstamp_work()
1208 clear_bit(idx, tx->in_use); in ice_ptp_tx_tstamp_work()
1209 skb = tx->tstamps[idx].skb; in ice_ptp_tx_tstamp_work()
1210 tx->tstamps[idx].skb = NULL; in ice_ptp_tx_tstamp_work()
1211 spin_unlock(&tx->lock); in ice_ptp_tx_tstamp_work()
1230 spin_lock(&tx->lock); in ice_ptp_tx_tstamp_work()
1231 if (!bitmap_empty(tx->in_use, tx->len)) in ice_ptp_tx_tstamp_work()
1232 kthread_queue_work(pf->ptp.kworker, &tx->work); in ice_ptp_tx_tstamp_work()
1233 spin_unlock(&tx->lock); in ice_ptp_tx_tstamp_work()
1241 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) in ice_ptp_request_ts() argument
1246 if (!tx->init) in ice_ptp_request_ts()
1249 spin_lock(&tx->lock); in ice_ptp_request_ts()
1251 idx = find_first_zero_bit(tx->in_use, tx->len); in ice_ptp_request_ts()
1252 if (idx < tx->len) { in ice_ptp_request_ts()
1257 set_bit(idx, tx->in_use); in ice_ptp_request_ts()
1258 tx->tstamps[idx].start = jiffies; in ice_ptp_request_ts()
1259 tx->tstamps[idx].skb = skb_get(skb); in ice_ptp_request_ts()
1263 spin_unlock(&tx->lock); in ice_ptp_request_ts()
1268 if (idx >= tx->len) in ice_ptp_request_ts()
1271 return idx + tx->quad_offset; in ice_ptp_request_ts()
1283 if (pf->ptp.port.tx.init) in ice_ptp_process_ts()
1284 kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work); in ice_ptp_process_ts()
1295 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) in ice_ptp_alloc_tx_tracker() argument
1297 tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL); in ice_ptp_alloc_tx_tracker()
1298 if (!tx->tstamps) in ice_ptp_alloc_tx_tracker()
1301 tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL); in ice_ptp_alloc_tx_tracker()
1302 if (!tx->in_use) { in ice_ptp_alloc_tx_tracker()
1303 kfree(tx->tstamps); in ice_ptp_alloc_tx_tracker()
1304 tx->tstamps = NULL; in ice_ptp_alloc_tx_tracker()
1308 spin_lock_init(&tx->lock); in ice_ptp_alloc_tx_tracker()
1309 kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work); in ice_ptp_alloc_tx_tracker()
1311 tx->init = 1; in ice_ptp_alloc_tx_tracker()
1322 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) in ice_ptp_flush_tx_tracker() argument
1326 for (idx = 0; idx < tx->len; idx++) { in ice_ptp_flush_tx_tracker()
1327 u8 phy_idx = idx + tx->quad_offset; in ice_ptp_flush_tx_tracker()
1329 spin_lock(&tx->lock); in ice_ptp_flush_tx_tracker()
1330 if (tx->tstamps[idx].skb) { in ice_ptp_flush_tx_tracker()
1331 dev_kfree_skb_any(tx->tstamps[idx].skb); in ice_ptp_flush_tx_tracker()
1332 tx->tstamps[idx].skb = NULL; in ice_ptp_flush_tx_tracker()
1334 clear_bit(idx, tx->in_use); in ice_ptp_flush_tx_tracker()
1335 spin_unlock(&tx->lock); in ice_ptp_flush_tx_tracker()
1339 ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx); in ice_ptp_flush_tx_tracker()
1351 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) in ice_ptp_release_tx_tracker() argument
1353 tx->init = 0; in ice_ptp_release_tx_tracker()
1355 kthread_cancel_work_sync(&tx->work); in ice_ptp_release_tx_tracker()
1357 ice_ptp_flush_tx_tracker(pf, tx); in ice_ptp_release_tx_tracker()
1359 kfree(tx->tstamps); in ice_ptp_release_tx_tracker()
1360 tx->tstamps = NULL; in ice_ptp_release_tx_tracker()
1362 kfree(tx->in_use); in ice_ptp_release_tx_tracker()
1363 tx->in_use = NULL; in ice_ptp_release_tx_tracker()
1365 tx->len = 0; in ice_ptp_release_tx_tracker()
1377 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) in ice_ptp_init_tx_e810() argument
1379 tx->quad = pf->hw.port_info->lport; in ice_ptp_init_tx_e810()
1380 tx->quad_offset = 0; in ice_ptp_init_tx_e810()
1381 tx->len = INDEX_PER_QUAD; in ice_ptp_init_tx_e810()
1383 return ice_ptp_alloc_tx_tracker(tx); in ice_ptp_init_tx_e810()
1397 static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx) in ice_ptp_tx_tstamp_cleanup() argument
1401 if (!tx->init) in ice_ptp_tx_tstamp_cleanup()
1404 for_each_set_bit(idx, tx->in_use, tx->len) { in ice_ptp_tx_tstamp_cleanup()
1409 if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ)) in ice_ptp_tx_tstamp_cleanup()
1413 ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset, in ice_ptp_tx_tstamp_cleanup()
1416 spin_lock(&tx->lock); in ice_ptp_tx_tstamp_cleanup()
1417 skb = tx->tstamps[idx].skb; in ice_ptp_tx_tstamp_cleanup()
1418 tx->tstamps[idx].skb = NULL; in ice_ptp_tx_tstamp_cleanup()
1419 clear_bit(idx, tx->in_use); in ice_ptp_tx_tstamp_cleanup()
1420 spin_unlock(&tx->lock); in ice_ptp_tx_tstamp_cleanup()
1438 ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx); in ice_ptp_periodic_work()
1549 ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx); in ice_ptp_init()
1597 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); in ice_ptp_release()