Lines Matching refs:dev_data
231 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_iface_init() local
234 dev_data->iface = iface; in eth_xlnx_gem_iface_init()
235 net_if_set_link_addr(iface, dev_data->mac_addr, 6, NET_LINK_ETHERNET); in eth_xlnx_gem_iface_init()
243 k_work_init(&dev_data->tx_done_work, eth_xlnx_gem_tx_done_work); in eth_xlnx_gem_iface_init()
244 k_work_init(&dev_data->rx_pend_work, eth_xlnx_gem_rx_pending_work); in eth_xlnx_gem_iface_init()
245 k_work_init_delayable(&dev_data->phy_poll_delayed_work, in eth_xlnx_gem_iface_init()
249 k_sem_init(&dev_data->tx_done_sem, 0, 1); in eth_xlnx_gem_iface_init()
255 k_sem_init(&dev_data->txbd_ring.ring_sem, 1, 1); in eth_xlnx_gem_iface_init()
262 k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT); in eth_xlnx_gem_iface_init()
276 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_isr() local
306 k_work_submit(&dev_data->tx_done_work); in eth_xlnx_gem_isr()
317 k_work_submit(&dev_data->rx_pend_work); in eth_xlnx_gem_isr()
356 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_send() local
370 if (!dev_data->started || dev_data->eff_link_speed == LINK_DOWN || in eth_xlnx_gem_send()
371 (!net_if_flag_is_set(dev_data->iface, NET_IF_UP))) { in eth_xlnx_gem_send()
373 dev_data->stats.tx_dropped++; in eth_xlnx_gem_send()
382 dev_data->stats.errors.tx++; in eth_xlnx_gem_send()
402 k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER); in eth_xlnx_gem_send()
408 if (bds_reqd > dev_data->txbd_ring.free_bds) { in eth_xlnx_gem_send()
412 dev_data->txbd_ring.free_bds); in eth_xlnx_gem_send()
415 k_sem_give(&(dev_data->txbd_ring.ring_sem)); in eth_xlnx_gem_send()
421 dev_data->stats.tx_dropped++; in eth_xlnx_gem_send()
426 curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_use; in eth_xlnx_gem_send()
427 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_send()
429 dev_data->txbd_ring.next_to_use = (first_bd_idx + bds_reqd) % in eth_xlnx_gem_send()
431 dev_data->txbd_ring.free_bds -= bds_reqd; in eth_xlnx_gem_send()
434 k_sem_give(&(dev_data->txbd_ring.ring_sem)); in eth_xlnx_gem_send()
447 tx_buffer_offs = (void *)(dev_data->first_tx_buffer + in eth_xlnx_gem_send()
465 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_send()
488 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_send()
500 dev_data->stats.bytes.sent += tx_data_length; in eth_xlnx_gem_send()
501 dev_data->stats.pkts.tx++; in eth_xlnx_gem_send()
505 sem_status = k_sem_take(&dev_data->tx_done_sem, K_MSEC(100)); in eth_xlnx_gem_send()
509 dev_data->stats.tx_timeout_count++; in eth_xlnx_gem_send()
531 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_start_device() local
534 if (dev_data->started) { in eth_xlnx_gem_start_device()
537 dev_data->started = true; in eth_xlnx_gem_start_device()
559 if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) == 0) { in eth_xlnx_gem_start_device()
560 k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT); in eth_xlnx_gem_start_device()
580 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_stop_device() local
583 if (!dev_data->started) { in eth_xlnx_gem_stop_device()
586 dev_data->started = false; in eth_xlnx_gem_stop_device()
589 if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) != 0) { in eth_xlnx_gem_stop_device()
590 k_work_cancel_delayable(&dev_data->phy_poll_delayed_work); in eth_xlnx_gem_stop_device()
729 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_set_config() local
746 memcpy(dev_data->mac_addr, config->mac_address.addr, sizeof(dev_data->mac_addr)); in eth_xlnx_gem_set_config()
748 net_if_set_link_addr(dev_data->iface, dev_data->mac_addr, in eth_xlnx_gem_set_config()
749 sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); in eth_xlnx_gem_set_config()
768 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_stats() local
770 return &dev_data->stats; in eth_xlnx_gem_stats()
833 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_configure_clocks() local
841 if ((!dev_conf->init_phy) || dev_data->eff_link_speed == LINK_DOWN) { in eth_xlnx_gem_configure_clocks()
857 } else if (dev_data->eff_link_speed != LINK_DOWN) { in eth_xlnx_gem_configure_clocks()
862 if (dev_data->eff_link_speed == LINK_10MBIT) { in eth_xlnx_gem_configure_clocks()
864 } else if (dev_data->eff_link_speed == LINK_100MBIT) { in eth_xlnx_gem_configure_clocks()
866 } else if (dev_data->eff_link_speed == LINK_1GBIT) { in eth_xlnx_gem_configure_clocks()
1068 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_set_nwcfg_link_speed() local
1079 if (dev_data->eff_link_speed == LINK_100MBIT) { in eth_xlnx_gem_set_nwcfg_link_speed()
1081 } else if (dev_data->eff_link_speed == LINK_1GBIT) { in eth_xlnx_gem_set_nwcfg_link_speed()
1102 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_set_mac_address() local
1106 regval_bot = (dev_data->mac_addr[0] & 0xFF); in eth_xlnx_gem_set_mac_address()
1107 regval_bot |= (dev_data->mac_addr[1] & 0xFF) << 8; in eth_xlnx_gem_set_mac_address()
1108 regval_bot |= (dev_data->mac_addr[2] & 0xFF) << 16; in eth_xlnx_gem_set_mac_address()
1109 regval_bot |= (dev_data->mac_addr[3] & 0xFF) << 24; in eth_xlnx_gem_set_mac_address()
1111 regval_top = (dev_data->mac_addr[4] & 0xFF); in eth_xlnx_gem_set_mac_address()
1112 regval_top |= (dev_data->mac_addr[5] & 0xFF) << 8; in eth_xlnx_gem_set_mac_address()
1119 dev_data->mac_addr[0], in eth_xlnx_gem_set_mac_address()
1120 dev_data->mac_addr[1], in eth_xlnx_gem_set_mac_address()
1121 dev_data->mac_addr[2], in eth_xlnx_gem_set_mac_address()
1122 dev_data->mac_addr[3], in eth_xlnx_gem_set_mac_address()
1123 dev_data->mac_addr[4], in eth_xlnx_gem_set_mac_address()
1124 dev_data->mac_addr[5]); in eth_xlnx_gem_set_mac_address()
1204 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_init_phy() local
1221 if (detect_rc == 0 && dev_data->phy_id != 0x00000000 && in eth_xlnx_gem_init_phy()
1222 dev_data->phy_id != 0xFFFFFFFF && in eth_xlnx_gem_init_phy()
1223 dev_data->phy_access_api != NULL) { in eth_xlnx_gem_init_phy()
1225 dev_data->phy_access_api->phy_reset_func(dev); in eth_xlnx_gem_init_phy()
1226 dev_data->phy_access_api->phy_configure_func(dev); in eth_xlnx_gem_init_phy()
1253 struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(dwork, in eth_xlnx_gem_poll_phy() local
1255 const struct device *dev = net_if_get_device(dev_data->iface); in eth_xlnx_gem_poll_phy()
1261 if (dev_data->phy_access_api != NULL) { in eth_xlnx_gem_poll_phy()
1263 phy_status = dev_data->phy_access_api->phy_poll_status_change_func(dev); in eth_xlnx_gem_poll_phy()
1274 link_status = dev_data->phy_access_api->phy_poll_link_status_func(dev); in eth_xlnx_gem_poll_phy()
1281 dev_data->eff_link_speed = LINK_DOWN; in eth_xlnx_gem_poll_phy()
1282 net_eth_carrier_off(dev_data->iface); in eth_xlnx_gem_poll_phy()
1293 dev_data->eff_link_speed = in eth_xlnx_gem_poll_phy()
1294 dev_data->phy_access_api->phy_poll_link_speed_func(dev); in eth_xlnx_gem_poll_phy()
1298 net_eth_carrier_on(dev_data->iface); in eth_xlnx_gem_poll_phy()
1301 (dev_data->eff_link_speed == LINK_1GBIT) in eth_xlnx_gem_poll_phy()
1303 : (dev_data->eff_link_speed == LINK_100MBIT) in eth_xlnx_gem_poll_phy()
1305 : (dev_data->eff_link_speed == LINK_10MBIT) in eth_xlnx_gem_poll_phy()
1314 k_work_reschedule(&dev_data->phy_poll_delayed_work, in eth_xlnx_gem_poll_phy()
1324 dev_data->eff_link_speed = dev_conf->max_link_speed; in eth_xlnx_gem_poll_phy()
1328 net_eth_carrier_on(dev_data->iface); in eth_xlnx_gem_poll_phy()
1352 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_configure_buffers() local
1365 bdptr = dev_data->rxbd_ring.first_bd; in eth_xlnx_gem_configure_buffers()
1370 bdptr->addr = (uint32_t)dev_data->first_rx_buffer + in eth_xlnx_gem_configure_buffers()
1384 bdptr->addr = ((uint32_t)dev_data->first_rx_buffer + in eth_xlnx_gem_configure_buffers()
1393 bdptr = dev_data->txbd_ring.first_bd; in eth_xlnx_gem_configure_buffers()
1398 bdptr->addr = (uint32_t)dev_data->first_tx_buffer + in eth_xlnx_gem_configure_buffers()
1410 bdptr->addr = (uint32_t)dev_data->first_tx_buffer + in eth_xlnx_gem_configure_buffers()
1414 dev_data->rxbd_ring.next_to_process = 0; in eth_xlnx_gem_configure_buffers()
1415 dev_data->rxbd_ring.next_to_use = 0; in eth_xlnx_gem_configure_buffers()
1416 dev_data->rxbd_ring.free_bds = dev_conf->rxbd_count; in eth_xlnx_gem_configure_buffers()
1417 dev_data->txbd_ring.next_to_process = 0; in eth_xlnx_gem_configure_buffers()
1418 dev_data->txbd_ring.next_to_use = 0; in eth_xlnx_gem_configure_buffers()
1419 dev_data->txbd_ring.free_bds = dev_conf->txbd_count; in eth_xlnx_gem_configure_buffers()
1422 sys_write32((uint32_t)dev_data->rxbd_ring.first_bd, in eth_xlnx_gem_configure_buffers()
1424 sys_write32((uint32_t)dev_data->txbd_ring.first_bd, in eth_xlnx_gem_configure_buffers()
1443 struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item, in eth_xlnx_gem_rx_pending_work() local
1445 const struct device *dev = net_if_get_device(dev_data->iface); in eth_xlnx_gem_rx_pending_work()
1467 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_handle_rx_pending() local
1488 curr_bd_idx = dev_data->rxbd_ring.next_to_process; in eth_xlnx_gem_handle_rx_pending()
1490 reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].addr); in eth_xlnx_gem_handle_rx_pending()
1491 reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].ctrl); in eth_xlnx_gem_handle_rx_pending()
1523 reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[last_bd_idx].ctrl); in eth_xlnx_gem_handle_rx_pending()
1536 dev_data->rxbd_ring.next_to_process = (last_bd_idx + 1) % in eth_xlnx_gem_handle_rx_pending()
1543 pkt = net_pkt_rx_alloc_with_buffer(dev_data->iface, rx_data_length, in eth_xlnx_gem_handle_rx_pending()
1549 dev_data->stats.errors.rx++; in eth_xlnx_gem_handle_rx_pending()
1550 dev_data->stats.error_details.rx_no_buffer_count++; in eth_xlnx_gem_handle_rx_pending()
1564 (dev_data->rxbd_ring.first_bd[curr_bd_idx].addr & in eth_xlnx_gem_handle_rx_pending()
1577 reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[curr_bd_idx].addr); in eth_xlnx_gem_handle_rx_pending()
1587 if (net_recv_data(dev_data->iface, pkt) < 0) { in eth_xlnx_gem_handle_rx_pending()
1594 dev_data->stats.bytes.received += rx_data_length; in eth_xlnx_gem_handle_rx_pending()
1595 dev_data->stats.pkts.rx++; in eth_xlnx_gem_handle_rx_pending()
1623 struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item, in eth_xlnx_gem_tx_done_work() local
1625 const struct device *dev = net_if_get_device(dev_data->iface); in eth_xlnx_gem_tx_done_work()
1647 struct eth_xlnx_gem_dev_data *dev_data = dev->data; in eth_xlnx_gem_handle_tx_done() local
1665 k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER); in eth_xlnx_gem_handle_tx_done()
1668 curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_process; in eth_xlnx_gem_handle_tx_done()
1669 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_handle_tx_done()
1699 reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); in eth_xlnx_gem_handle_tx_done()
1707 dev_data->txbd_ring.next_to_process = in eth_xlnx_gem_handle_tx_done()
1708 (dev_data->txbd_ring.next_to_process + bds_processed) % in eth_xlnx_gem_handle_tx_done()
1710 dev_data->txbd_ring.free_bds += bds_processed; in eth_xlnx_gem_handle_tx_done()
1713 k_sem_give(&(dev_data->txbd_ring.ring_sem)); in eth_xlnx_gem_handle_tx_done()
1724 k_sem_give(&dev_data->tx_done_sem); in eth_xlnx_gem_handle_tx_done()