/* Cleopatre project {{{ * * Copyright (C) 2012 Spidcom * * <<>> * * }}} */ /** * \file src/frame.c * \brief Interfaces to send/receive/alloc/free frames for firmware. * \ingroup plcdrv */ #include #include #include #include #include #include #include #include #include #include #include #include #include "plcdrv.h" #include "debug_dump.h" #include "frame.h" #include "mmtypes.h" #include "common/ipmbox/msg.h" #include "common/ipmbox/protocol.h" /** Define HPAV MME format */ #define ETH_MME_VERSION_OFFSET (sizeof (struct ethhdr)) #define ETH_MME_VERSION_SIZE (1) #define ETH_MME_TYPE_OFFSET (ETH_MME_VERSION_OFFSET + ETH_MME_VERSION_SIZE) /** Frame info magic word. */ #define FRAME_INFO_MAGIC 0xc742a74e /** * Frame information stored before data (in headroom of the skb). * This way, we are able to get the corresponding skb from the data buffer. */ typedef struct frame_info_t { /** Magic word. */ uint32_t magic; /** Corresponding skb. */ struct sk_buff *skb; /** \warning must be smaller than NET_SKB_PAD. */ } frame_info_t; /** Compute an aligned pointer address before data in skb headroom. */ #define FRAME_INFO_PTR_ALIGN(data) \ ((frame_info_t *) \ (((uint32_t) (data) - sizeof (frame_info_t)) & ~(sizeof (uint32_t) - 1))) /** * Find the Ethernet MME type. * \param eth_frame Ethernet frame pointer * \return HPAV MME type */ static inline uint16_t frame_get_eth_mme_type (uint8_t *eth_frame) { /* Warning: mme type is stored in little endian. */ return *(uint16_t *) (eth_frame + ETH_MME_TYPE_OFFSET); } /** * Prepare a sk buff for sending to firmware. * \param priv PLC device private context * \param skb sk buff to send to firmware * \param data_dir DMA_TO_DEVICE if skb is filled with data to send, * DMA_FROM_DEVICE if skb is empty and to be filled by firmware * \return physical address to send to firmware */ static uint32_t frame_skb_to_fw (plcdrv_t *priv, struct sk_buff *skb, enum dma_data_direction data_dir) { frame_info_t *frame_info; size_t len; int delta = 0; /* Reserve some space to store the skb pointer. */ if (skb_headroom (skb) < NET_SKB_PAD) delta = NET_SKB_PAD; if (delta || skb_header_cloned (skb)) { atomic_inc (&priv->plcdrv_stats.skb_to_fw_no_headroom); BUG_ON (pskb_expand_head (skb, delta, 0, GFP_ATOMIC)); } /* Store frame information. */ frame_info = FRAME_INFO_PTR_ALIGN (skb->data); frame_info->magic = FRAME_INFO_MAGIC; frame_info->skb = skb; /* Get length. */ if (data_dir == DMA_TO_DEVICE) len = skb->len; else len = PKT_BUF_SZ; /* Map in DMA zone, dma_map_single is always happy on our architecture. */ return dma_map_single (NULL, skb->data, len, data_dir); } /** * Retrieve sk buff from a buffer received from firmware. * \param buffer physical buffer address received from firmware * \param data_dir DMA_TO_DEVICE if buffer is empty, DMA_FROM_DEVICE if a * filled buffer is received with data * \return associated sk buff or NULL on invalid magic word */ static struct sk_buff * frame_skb_from_fw (uint32_t buffer, enum dma_data_direction data_dir) { struct sk_buff *skb; frame_info_t *frame_info; size_t len; /* Retrieve frame information. */ frame_info = FRAME_INFO_PTR_ALIGN (dma_to_virt (NULL, buffer)); if (frame_info->magic != FRAME_INFO_MAGIC) return NULL; skb = frame_info->skb; /* Clear magic. */ frame_info->magic = 0; /* Get length. */ if (data_dir == DMA_TO_DEVICE) len = skb->len; else len = PKT_BUF_SZ; /* Unmap from DMA zone. */ dma_unmap_single (NULL, buffer, len, data_dir); return skb; } bool frame_buffer_alloc (plcdrv_t *priv) { struct sk_buff *skb; uint32_t skb_data_addr[PLCDRV_RX_POOL]; unsigned int i; unsigned int new_skb_nb = PLCDRV_RX_POOL - skb_queue_len (&priv->rx_pool); for (i = 0; i < new_skb_nb; i++) { /* Allocate an sk_buff. */ skb = dev_alloc_skb (PKT_BUF_SZ); if (!skb) break; /* Add it to the RX pool. */ __skb_queue_head (&priv->rx_pool, skb); /* Store sk buff physical data address to send it to firmware. */ skb_data_addr[i] = frame_skb_to_fw (priv, skb, DMA_FROM_DEVICE); } /* Send it to firmware. */ if (i) ipmbox_send_empty_buf (&priv->ipmbox, skb_data_addr, i); if (skb_queue_len (&priv->rx_pool) != PLCDRV_RX_POOL) return false; return true; } void frame_buffer_free (plcdrv_t *priv, uint32_t buffer) { struct sk_buff *skb; struct sk_buff_head *pool; /* Get skb. */ skb = frame_skb_from_fw (buffer, DMA_TO_DEVICE); if (!skb) { printk (KERN_CRIT "bad buffer in frame_buffer_free\n"); return; } /* Remove it from the right TX pool. */ pool = *(struct sk_buff_head **) skb->cb; __skb_unlink (skb, pool); /* Free it. */ kfree_skb (skb); } void frame_rx_data (plcdrv_t *priv, uint32_t data_addr, uint32_t data_length) { struct sk_buff *skb; /* Retrieve skb. */ skb = frame_skb_from_fw (data_addr, DMA_FROM_DEVICE); if (!skb) { priv->stats.rx_errors++; printk (KERN_CRIT "bad buffer in frame_rx_data\n"); return; } /* Remove it from the RX pool, this is not our buffer anymore. */ __skb_unlink (skb, &priv->rx_pool); /* Prepare skb for linux receive level. */ skb->dev = priv->dev; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_put (skb, data_length); /* Apply QOS mark. */ qos_frame_set_mark (&priv->qos, skb); skb->protocol = eth_type_trans (skb, priv->dev); /* Check sequence number on receive. */ seq_check_rx (&priv->seq_check_ctx, skb); priv->stats.rx_packets++; priv->stats.rx_bytes += skb->len; /* Pass data to the Linux internal receive level. */ netif_receive_skb (skb); } void frame_rx_mme_priv (plcdrv_t *priv, uint32_t data_addr, uint32_t data_length) { struct sk_buff *skb, *nlskb; struct nlmsghdr *nlh; netlink_t *nl; /* Retrieve skb. */ skb = frame_skb_from_fw (data_addr, DMA_FROM_DEVICE); if (!skb) { printk (KERN_CRIT "bad buffer in frame_rx_mme_priv\n"); return; } /* Remove it from the RX pool, this is not our buffer anymore. */ __skb_unlink (skb, &priv->rx_pool); /* Prepare sk buff for linux receive level. */ skb->dev = priv->dev; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_put (skb, data_length); /* Allocate a new sk_buff to add netlink header. */ nlskb = alloc_skb (NLMSG_LENGTH (skb->len), GFP_ATOMIC); if (!nlskb) goto msg_failure; /* Fill netlink header. */ nlh = NLMSG_PUT (nlskb, 0, 0, NLMSG_DONE, NLMSG_LENGTH (skb->len) - sizeof(*nlh)); NETLINK_CB (nlskb).pid = 0; /* From kernel. */ NETLINK_CB (nlskb).dst_group = 0; /* Unicast. */ /* Fill this new sk_buff with the old one after netlink header. */ memcpy (NLMSG_DATA (nlh), skb->data, skb->len); /* Suppress old sk_buff. */ kfree_skb (skb); /* Get destination netlink. */ BUG_ON (data_length <= ETH_MME_TYPE_OFFSET); if (MMTYPE_IS_DRV (frame_get_eth_mme_type (NLMSG_DATA (nlh)))) nl = &priv->nl_plcd; else nl = &priv->nl_managerd; /* Send to netlink. */ netlink_unicast (nl->sock, nlskb, nl->pid, MSG_DONTWAIT); return; /* nlmsg_failure is used by NLMSG_PUT (yeark!). */ nlmsg_failure: kfree_skb (nlskb); msg_failure: kfree_skb (skb); } void frame_rx_debug_dump (plcdrv_t *priv, uint32_t data_addr, uint32_t data_length) { /* Check parameter. */ BUG_ON (!priv); BUG_ON (data_length > DEBUG_DUMP_BUFFER_LENGTH); if (priv->debug_dump.started) { /* Sanity check. */ BUG_ON (priv->debug_dump.waiting_for_buffer == false); /* We do not use data_length here because we have allocated a fixed size, * without regards of the actual size of the data. */ dma_unmap_single (NULL, data_addr, DEBUG_DUMP_BUFFER_LENGTH, DMA_FROM_DEVICE); /* Copy received length. */ priv->debug_dump.buffer_received_length = data_length; /* Wake up our process. */ wake_up_interruptible (&priv->debug_dump.wait_queue); } } int frame_tx_data (struct sk_buff *skb, struct net_device *dev) { unsigned short vlan_prio; uint32_t phy_addr; plcdrv_t *priv = netdev_priv (dev); /* Check there is an Ethernet header. */ if (skb->len < sizeof (struct ethhdr)) { /* Packet is too small to be transmitted. Free skb because we have * handled it (ret is ok). */ kfree_skb (skb); priv->stats.tx_errors++; priv->stats.tx_fifo_errors++; return NETDEV_TX_OK; } /* Sequence check. */ seq_check_tx (&priv->seq_check_ctx, skb); /* Get VLAN priority. */ vlan_prio = qos_frame_prio_get (&priv->qos, skb); /* TX pool full? This can not happen because netif queue would have been * stopped before data queue is full. */ BUG_ON (skb_queue_len (&priv->tx_pool_data) == PLCDRV_TX_POOL_DATA); /* Update TX pool. */ __skb_queue_head (&priv->tx_pool_data, skb); *(struct sk_buff_head **) skb->cb = &priv->tx_pool_data; /* Map it to DMA. */ phy_addr = frame_skb_to_fw (priv, skb, DMA_TO_DEVICE); /* Send it to firmware. */ ipmbox_send_data (&priv->ipmbox, phy_addr, ipmbox_msg_create_header_data (skb->len, vlan_prio)); priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len; /* Update trans start to jiffies. */ dev->trans_start = jiffies; /* Queue is now full? */ if (skb_queue_len (&priv->tx_pool_data) == PLCDRV_TX_POOL_DATA) { /* Stop queue. */ netif_stop_queue (dev); } return NETDEV_TX_OK; } void frame_tx_mbx_mme_priv (plcdrv_t *priv, struct sk_buff *skb) { uint32_t phy_addr, header; /* Check parameters. */ BUG_ON (!priv); BUG_ON (!skb); /* Update TX pool, with lock. */ skb_queue_head (&priv->tx_pool_mme, skb); *(struct sk_buff_head **) skb->cb = &priv->tx_pool_mme; /* Get physical address. */ phy_addr = frame_skb_to_fw (priv, skb, DMA_TO_DEVICE); /* Build message header. */ header = ipmbox_msg_create_header_mme_priv (skb->len); /* Send to firmware. */ ipmbox_send_mbx (&priv->ipmbox, phy_addr, header); } void frame_tx_mbx_debug_dump (plcdrv_t *priv, uint32_t *buffer, unsigned int length) { uint32_t phy_addr, header; /* Check parameters. */ BUG_ON (!priv); BUG_ON (!buffer); BUG_ON (!length); /* Get physical address. */ phy_addr = dma_map_single (NULL, buffer, length, DMA_FROM_DEVICE); /* Build message header. */ header = ipmbox_msg_create_header_debug_dump (length); /* Send to firmware. */ ipmbox_send_mbx (&priv->ipmbox, phy_addr, header); }