2015-04-01

This introduces the Freescale Data Path Acceleration Architecture

(DPAA) Ethernet driver (dpaa_eth) that builds upon the DPAA QMan,

BMan, PAMU and FMan drivers to deliver Ethernet connectivity on

the Freescale DPAA QorIQ platforms.

Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>

---

drivers/net/ethernet/freescale/Kconfig | 2 +

drivers/net/ethernet/freescale/Makefile | 1 +

drivers/net/ethernet/freescale/dpaa/Kconfig | 49 +

drivers/net/ethernet/freescale/dpaa/Makefile | 13 +

drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 835 +++++++++++++

drivers/net/ethernet/freescale/dpaa/dpaa_eth.h | 446 +++++++

.../net/ethernet/freescale/dpaa/dpaa_eth_common.c | 1288 ++++++++++++++++++++

.../net/ethernet/freescale/dpaa/dpaa_eth_common.h | 119 ++

drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c | 428 +++++++

9 files changed, 3181 insertions(+)

create mode 100644 drivers/net/ethernet/freescale/dpaa/Kconfig

create mode 100644 drivers/net/ethernet/freescale/dpaa/Makefile

create mode 100644 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c

create mode 100644 drivers/net/ethernet/freescale/dpaa/dpaa_eth.h

create mode 100644 drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c

create mode 100644 drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h

create mode 100644 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c

diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig

index ba84c4a..dd485f1 100644

--- a/drivers/net/ethernet/freescale/Kconfig

+++ b/drivers/net/ethernet/freescale/Kconfig

@@ -95,4 +95,6 @@ config GIANFAR

This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,

and MPC86xx family of chips, and the FEC on the 8540.

+source "drivers/net/ethernet/freescale/dpaa/Kconfig"

+

endif # NET_VENDOR_FREESCALE

diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile

index 71debd1..2768178 100644

--- a/drivers/net/ethernet/freescale/Makefile

+++ b/drivers/net/ethernet/freescale/Makefile

@@ -12,6 +12,7 @@ obj-$(CONFIG_FS_ENET) += fs_enet/

obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o

obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o

obj-$(CONFIG_GIANFAR) += gianfar_driver.o

+obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/

obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o

gianfar_driver-objs := gianfar.o \

gianfar_ethtool.o

diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig

new file mode 100644

index 0000000..7ef703c

--- /dev/null

+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig

@@ -0,0 +1,49 @@

+menuconfig FSL_DPAA_ETH

+ tristate "DPAA Ethernet"

+ depends on FSL_SOC && FSL_BMAN && FSL_QMAN && FSL_FMAN

+ select PHYLIB

+ select FSL_FMAN_MAC

+ ---help---

+ Data Path Acceleration Architecture Ethernet driver,

+ supporting the Freescale QorIQ chips.

+ Depends on Freescale Buffer Manager and Queue Manager

+ driver and Frame Manager Driver.

+

+if FSL_DPAA_ETH

+

+config FSL_DPAA_CS_THRESHOLD_1G

+ hex "Egress congestion threshold on 1G ports"

+ depends on FSL_DPAA_ETH

+ range 0x1000 0x10000000

+ default "0x06000000"

+ ---help---

+ The size in bytes of the egress Congestion State notification threshold on 1G ports.

+ The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop

+ (e.g. by sending UDP datagrams at "while(1) speed"),

+ and the larger the frame size, the more acute the problem.

+ So we have to find a balance between these factors:

+ - avoiding the device staying congested for a prolonged time (risking

+ the netdev watchdog to fire - see also the tx_timeout module param);

+ - affecting performance of protocols such as TCP, which otherwise

+ behave well under the congestion notification mechanism;

+ - preventing the Tx cores from tightly-looping (as if the congestion

+ threshold was too low to be effective);

+ - running out of memory if the CS threshold is set too high.

+

+config FSL_DPAA_CS_THRESHOLD_10G

+ hex "Egress congestion threshold on 10G ports"

+ depends on FSL_DPAA_ETH

+ range 0x1000 0x20000000

+ default "0x10000000"

+ ---help ---

+ The size in bytes of the egress Congestion State notification threshold on 10G ports.

+

+config FSL_DPAA_INGRESS_CS_THRESHOLD

+ hex "Ingress congestion threshold on FMan ports"

+ depends on FSL_DPAA_ETH

+ default "0x10000000"

+ ---help---

+ The size in bytes of the ingress tail-drop threshold on FMan ports.

+ Traffic piling up above this value will be rejected by QMan and discarded by FMan.

+

+endif # FSL_DPAA_ETH

diff --git a/drivers/net/ethernet/freescale/dpaa/Makefile b/drivers/net/ethernet/freescale/dpaa/Makefile

new file mode 100644

index 0000000..60f29db

--- /dev/null

+++ b/drivers/net/ethernet/freescale/dpaa/Makefile

@@ -0,0 +1,13 @@

+#

+# Makefile for the Freescale DPAA Ethernet controllers

+#

+

+# Include FMan headers

+FMAN = $(srctree)/drivers/soc/fsl/fman

+ccflags-y += -I$(FMAN)

+ccflags-y += -I$(FMAN)/inc

+ccflags-y += -I$(FMAN)/flib

+

+obj-$(CONFIG_FSL_DPAA_ETH) += fsl_dpa.o

+

+fsl_dpa-objs += dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c

new file mode 100644

index 0000000..ee65ca2

--- /dev/null

+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c

@@ -0,0 +1,835 @@

+/* Copyright 2008 - 2015 Freescale Semiconductor Inc.

+ *

+ * Redistribution and use in source and binary forms, with or without

+ * modification, are permitted provided that the following conditions are met:

+ * * Redistributions of source code must retain the above copyright

+ * notice, this list of conditions and the following disclaimer.

+ * * Redistributions in binary form must reproduce the above copyright

+ * notice, this list of conditions and the following disclaimer in the

+ * documentation and/or other materials provided with the distribution.

+ * * Neither the name of Freescale Semiconductor nor the

+ * names of its contributors may be used to endorse or promote products

+ * derived from this software without specific prior written permission.

+ *

+ * ALTERNATIVELY, this software may be distributed under the terms of the

+ * GNU General Public License ("GPL") as published by the Free Software

+ * Foundation, either version 2 of that License or (at your option) any

+ * later version.

+ *

+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY

+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED

+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE

+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY

+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES

+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;

+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND

+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT

+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS

+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ */

+

+#define pr_fmt(fmt) \

+ KBUILD_MODNAME ": " fmt

+

+#include <linux/init.h>

+#include <linux/module.h>

+#include <linux/of_mdio.h>

+#include <linux/of_net.h>

+#include <linux/kthread.h>

+#include <linux/io.h>

+#include <linux/if_arp.h> /* arp_hdr_len() */

+#include <linux/if_vlan.h> /* VLAN_HLEN */

+#include <linux/icmp.h> /* struct icmphdr */

+#include <linux/ip.h> /* struct iphdr */

+#include <linux/ipv6.h> /* struct ipv6hdr */

+#include <linux/udp.h> /* struct udphdr */

+#include <linux/tcp.h> /* struct tcphdr */

+#include <linux/net.h> /* net_ratelimit() */

+#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */

+#include <linux/highmem.h>

+#include <linux/percpu.h>

+#include <linux/dma-mapping.h>

+#include <soc/fsl/bman.h>

+

+#include "fsl_fman.h"

+#include "fm_ext.h"

+#include "fm_port_ext.h"

+

+#include "mac.h"

+#include "dpaa_eth.h"

+#include "dpaa_eth_common.h"

+

+#define DPA_NAPI_WEIGHT 64

+

+/* Valid checksum indication */

+#define DPA_CSUM_VALID 0xFFFF

+

+#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"

+

+static u8 debug = -1;

+module_param(debug, byte, S_IRUGO);

+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");

+

+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */

+static u16 tx_timeout = 1000;

+module_param(tx_timeout, ushort, S_IRUGO);

+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");

+

+/* BM */

+

+#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)

+

+static u8 dpa_priv_common_bpid;

+

+static void _dpa_rx_error(struct net_device *net_dev,

+ const struct dpa_priv_s *priv,

+ struct dpa_percpu_priv_s *percpu_priv,

+ const struct qm_fd *fd,

+ u32 fqid)

+{

+ /* limit common, possibly innocuous Rx FIFO Overflow errors'

+ * interference with zero-loss convergence benchmark results.

+ */

+ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))

+ pr_warn_once("non-zero error counters in fman statistics (sysfs)\n");

+ else

+ if (netif_msg_hw(priv) && net_ratelimit())

+ netdev_err(net_dev, "Err FD status = 0x%08x\n",

+ fd->status & FM_FD_STAT_RX_ERRORS);

+

+ percpu_priv->stats.rx_errors++;

+

+ dpa_fd_release(net_dev, fd);

+}

+

+static void _dpa_tx_error(struct net_device *net_dev,

+ const struct dpa_priv_s *priv,

+ struct dpa_percpu_priv_s *percpu_priv,

+ const struct qm_fd *fd,

+ u32 fqid)

+{

+ struct sk_buff *skb;

+

+ if (netif_msg_hw(priv) && net_ratelimit())

+ netdev_warn(net_dev, "FD status = 0x%08x\n",

+ fd->status & FM_FD_STAT_TX_ERRORS);

+

+ percpu_priv->stats.tx_errors++;

+

+ /* If we intended the buffers from this frame to go into the bpools

+ * when the FMan transmit was done, we need to put it in manually.

+ */

+ if (fd->cmd & FM_FD_CMD_FCO) {

+ dpa_fd_release(net_dev, fd);

+ return;

+ }

+

+ skb = _dpa_cleanup_tx_fd(priv, fd);

+ dev_kfree_skb(skb);

+}

+

+static int dpaa_eth_poll(struct napi_struct *napi, int budget)

+{

+ struct dpa_napi_portal *np =

+ container_of(napi, struct dpa_napi_portal, napi);

+

+ int cleaned = qman_p_poll_dqrr(np->p, budget);

+

+ if (cleaned < budget) {

+ int tmp;

+

+ napi_complete(napi);

+ tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);

+ DPA_ERR_ON(tmp);

+ }

+

+ return cleaned;

+}

+

+static void __hot _dpa_tx_conf(struct net_device *net_dev,

+ const struct dpa_priv_s *priv,

+ struct dpa_percpu_priv_s *percpu_priv,

+ const struct qm_fd *fd,

+ u32 fqid)

+{

+ struct sk_buff *skb;

+

+ if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {

+ if (netif_msg_hw(priv) && net_ratelimit())

+ netdev_warn(net_dev, "FD status = 0x%08x\n",

+ fd->status & FM_FD_STAT_TX_ERRORS);

+

+ percpu_priv->stats.tx_errors++;

+ }

+

+ skb = _dpa_cleanup_tx_fd(priv, fd);

+

+ dev_kfree_skb(skb);

+}

+

+static enum qman_cb_dqrr_result

+priv_rx_error_dqrr(struct qman_portal *portal,

+ struct qman_fq *fq,

+ const struct qm_dqrr_entry *dq)

+{

+ struct net_device *net_dev;

+ struct dpa_priv_s *priv;

+ struct dpa_percpu_priv_s *percpu_priv;

+ int *count_ptr;

+

+ net_dev = ((struct dpa_fq *)fq)->net_dev;

+ priv = netdev_priv(net_dev);

+

+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);

+ count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);

+

+ if (dpaa_eth_napi_schedule(percpu_priv, portal))

+ return qman_cb_dqrr_stop;

+

+ if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))

+ /* Unable to refill the buffer pool due to insufficient

+ * system memory. Just release the frame back into the pool,

+ * otherwise we'll soon end up with an empty buffer pool.

+ */

+ dpa_fd_release(net_dev, &dq->fd);

+ else

+ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);

+

+ return qman_cb_dqrr_consume;

+}

+

+static enum qman_cb_dqrr_result __hot

+priv_rx_default_dqrr(struct qman_portal *portal,

+ struct qman_fq *fq,

+ const struct qm_dqrr_entry *dq)

+{

+ struct net_device *net_dev;

+ struct dpa_priv_s *priv;

+ struct dpa_percpu_priv_s *percpu_priv;

+ int *count_ptr;

+ struct dpa_bp *dpa_bp;

+

+ net_dev = ((struct dpa_fq *)fq)->net_dev;

+ priv = netdev_priv(net_dev);

+ dpa_bp = priv->dpa_bp;

+

+ /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */

+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);

+ count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);

+

+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))

+ return qman_cb_dqrr_stop;

+

+ /* Vale of plenty: make sure we didn't run out of buffers */

+

+ if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))

+ /* Unable to refill the buffer pool due to insufficient

+ * system memory. Just release the frame back into the pool,

+ * otherwise we'll soon end up with an empty buffer pool.

+ */

+ dpa_fd_release(net_dev, &dq->fd);

+ else

+ _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,

+ count_ptr);

+

+ return qman_cb_dqrr_consume;

+}

+

+static enum qman_cb_dqrr_result

+priv_tx_conf_error_dqrr(struct qman_portal *portal,

+ struct qman_fq *fq,

+ const struct qm_dqrr_entry *dq)

+{

+ struct net_device *net_dev;

+ struct dpa_priv_s *priv;

+ struct dpa_percpu_priv_s *percpu_priv;

+

+ net_dev = ((struct dpa_fq *)fq)->net_dev;

+ priv = netdev_priv(net_dev);

+

+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);

+

+ if (dpaa_eth_napi_schedule(percpu_priv, portal))

+ return qman_cb_dqrr_stop;

+

+ _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);

+

+ return qman_cb_dqrr_consume;

+}

+

+static enum qman_cb_dqrr_result __hot

+priv_tx_conf_default_dqrr(struct qman_portal *portal,

+ struct qman_fq *fq,

+ const struct qm_dqrr_entry *dq)

+{

+ struct net_device *net_dev;

+ struct dpa_priv_s *priv;

+ struct dpa_percpu_priv_s *percpu_priv;

+

+ net_dev = ((struct dpa_fq *)fq)->net_dev;

+ priv = netdev_priv(net_dev);

+

+ /* Non-migratable context, safe to use raw_cpu_ptr */

+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);

+

+ if (dpaa_eth_napi_schedule(percpu_priv, portal))

+ return qman_cb_dqrr_stop;

+

+ _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);

+

+ return qman_cb_dqrr_consume;

+}

+

+static void priv_ern(struct qman_portal *portal,

+ struct qman_fq *fq,

+ const struct qm_mr_entry *msg)

+{

+ struct net_device *net_dev;

+ const struct dpa_priv_s *priv;

+ struct sk_buff *skb;

+ struct dpa_percpu_priv_s *percpu_priv;

+ struct qm_fd fd = msg->ern.fd;

+

+ net_dev = ((struct dpa_fq *)fq)->net_dev;

+ priv = netdev_priv(net_dev);

+ /* Non-migratable context, safe to use raw_cpu_ptr */

+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);

+

+ percpu_priv->stats.tx_dropped++;

+ percpu_priv->stats.tx_fifo_errors++;

+

+ /* If we intended this buffer to go into the pool

+ * when the FM was done, we need to put it in

+ * manually.

+ */

+ if (msg->ern.fd.cmd & FM_FD_CMD_FCO) {

+ dpa_fd_release(net_dev, &fd);

+ return;

+ }

+

+ skb = _dpa_cleanup_tx_fd(priv, &fd);

+ dev_kfree_skb_any(skb);

+}

+

+static const struct dpa_fq_cbs_t private_fq_cbs = {

+ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },

+ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },

+ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },

+ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },

+ .egress_ern = { .cb = { .ern = priv_ern } }

+};

+

+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)

+{

+ struct dpa_percpu_priv_s *percpu_priv;

+ int i, j;

+

+ for_each_possible_cpu(i) {

+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);

+

+ for (j = 0; j < qman_portal_max; j++)

+ napi_enable(&percpu_priv->np[j].napi);

+ }

+}

+

+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)

+{

+ struct dpa_percpu_priv_s *percpu_priv;

+ int i, j;

+

+ for_each_possible_cpu(i) {

+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);

+

+ for (j = 0; j < qman_portal_max; j++)

+ napi_disable(&percpu_priv->np[j].napi);

+ }

+}

+

+static int __cold dpa_eth_priv_start(struct net_device *net_dev)

+{

+ int err;

+ struct dpa_priv_s *priv;

+

+ priv = netdev_priv(net_dev);

+

+ dpaa_eth_napi_enable(priv);

+

+ err = dpa_start(net_dev);

+ if (err < 0)

+ dpaa_eth_napi_disable(priv);

+

+ return err;

+}

+

+static int __cold dpa_eth_priv_stop(struct net_device *net_dev)

+{

+ int _errno;

+ struct dpa_priv_s *priv;

+

+ _errno = dpa_stop(net_dev);

+ /* Allow NAPI to consume any frame still in the Rx/TxConfirm

+ * ingress queues. This is to avoid a race between the current

+ * context and ksoftirqd which could leave NAPI disabled while

+ * in fact there's still Rx traffic to be processed.

+ */

+ usleep_range(5000, 10000);

+

+ priv = netdev_priv(net_dev);

+ dpaa_eth_napi_disable(priv);

+

+ return _errno;

+}

+

+static const struct net_device_ops dpa_private_ops = {

+ .ndo_open = dpa_eth_priv_start,

+ .ndo_start_xmit = dpa_tx,

+ .ndo_stop = dpa_eth_priv_stop,

+ .ndo_tx_timeout = dpa_timeout,

+ .ndo_get_stats64 = dpa_get_stats64,

+ .ndo_set_mac_address = dpa_set_mac_address,

+ .ndo_validate_addr = eth_validate_addr,

+ .ndo_change_mtu = dpa_change_mtu,

+ .ndo_set_rx_mode = dpa_set_rx_mode,

+ .ndo_init = dpa_ndo_init,

+ .ndo_set_features = dpa_set_features,

+ .ndo_fix_features = dpa_fix_features,

+};

+

+static int dpa_private_napi_add(struct net_device *net_dev)

+{

+ struct dpa_priv_s *priv = netdev_priv(net_dev);

+ struct dpa_percpu_priv_s *percpu_priv;

+ int i, cpu;

+

+ for_each_possible_cpu(cpu) {

+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);

+

+ percpu_priv->np = devm_kzalloc(net_dev->dev.parent,

+ qman_portal_max * sizeof(struct dpa_napi_portal),

+ GFP_KERNEL);

+

+ if (unlikely(!percpu_priv->np)) {

+ dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");

+ return -ENOMEM;

+ }

+

+ for (i = 0; i < qman_portal_max; i++)

+ netif_napi_add(net_dev, &percpu_priv->np.napi,

+ dpaa_eth_poll, DPA_NAPI_WEIGHT);

+ }

+

+ return 0;

+}

+

+void dpa_private_napi_del(struct net_device *net_dev)

+{

+ struct dpa_priv_s *priv = netdev_priv(net_dev);

+ struct dpa_percpu_priv_s *percpu_priv;

+ int i, cpu;

+

+ for_each_possible_cpu(cpu) {

+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);

+

+ if (percpu_priv->np) {

+ for (i = 0; i < qman_portal_max; i++)

+ netif_napi_del(&percpu_priv->np.napi);

+

+ devm_kfree(net_dev->dev.parent, percpu_priv->np);

+ }

+ }

+}

+EXPORT_SYMBOL(dpa_private_napi_del);

+

+static int dpa_private_netdev_init(struct net_device *net_dev)

+{

+ int i;

+ struct dpa_priv_s *priv = netdev_priv(net_dev);

+ struct dpa_percpu_priv_s *percpu_priv;

+ const u8 *mac_addr;

+

+ /* Although we access another CPU's private data here

+ * we do it at initialization so it is safe

+ */

+ for_each_possible_cpu(i) {

+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);

+ percpu_priv->net_dev = net_dev;

+ }

+

+ net_dev->netdev_ops = &dpa_private_ops;

+ mac_addr = priv->mac_dev->addr;

+

+ net_dev->mem_start = priv->mac_dev->res->start;

+ net_dev->mem_end = priv->mac_dev->res->end;

+

+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |

+ NETIF_F_LLTX);

+

+ net_dev->features |= NETIF_F_GSO;

+

+ return dpa_netdev_init(net_dev, mac_addr, tx_timeout);

+}

+

+static struct dpa_bp * __cold

+dpa_priv_bp_probe(struct device *dev)

+{

+ struct dpa_bp *dpa_bp;

+

+ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);

+ if (unlikely(!dpa_bp)) {

+ dev_err(dev, "devm_kzalloc() failed\n");

+ return ERR_PTR(-ENOMEM);

+ }

+

+ dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);

+ dpa_bp->target_count = FSL_DPAA_ETH_MAX_BUF_COUNT;

+

+ dpa_bp->seed_cb = dpa_bp_priv_seed;

+ dpa_bp->free_buf_cb = _dpa_bp_free_pf;

+

+ return dpa_bp;

+}

+

+/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.

+ * We won't be sending congestion notifications to FMan; for now, we just use

+ * this CGR to generate enqueue rejections to FMan in order to drop the frames

+ * before they reach our ingress queues and eat up memory.

+ */

+static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)

+{

+ struct qm_mcc_initcgr initcgr;

+ u32 cs_th;

+ int err;

+

+ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);

+ if (err < 0) {

+ pr_err("Error %d allocating CGR ID\n", err);

+ goto out_error;

+ }

+

+ /* Enable CS TD, but disable Congestion State Change Notifications. */

+ initcgr.we_mask = QM_CGR_WE_CS_THRES;

+ initcgr.cgr.cscn_en = QM_CGR_EN;

+ cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;

+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);

+

+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;

+ initcgr.cgr.cstd_en = QM_CGR_EN;

+

+ /* This is actually a hack, because this CGR will be associated with

+ * our affine SWP. However, we'll place our ingress FQs in it.

+ */

+ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,

+ &initcgr);

+ if (err < 0) {

+ pr_err("Error %d creating ingress CGR with ID %d\n", err,

+ priv->ingress_cgr.cgrid);

+ qman_release_cgrid(priv->ingress_cgr.cgrid);

+ goto out_error;

+ }

+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",

+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);

+

+ /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255

+ * range), but we have no common initialization path between the

+ * different variants of the DPAA Eth driver, so we do it here rather

+ * than modifying every other variant than "private Eth".

+ */

+ priv->use_ingress_cgr = true;

+

+out_error:

+ return err;

+}

+

+static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,

+ size_t count)

+{

+ struct dpa_priv_s *priv = netdev_priv(net_dev);

+ int i;

+

+ if (netif_msg_probe(priv))

+ dev_dbg(net_dev->dev.parent,

+ "Using private BM buffer pools\n");

+

+ priv->bp_count = count;

+

+ for (i = 0; i < count; i++) {

+ int err;

+

+ err = dpa_bp_alloc(&dpa_bp);

+ if (err < 0) {

+ dpa_bp_free(priv);

+ priv->dpa_bp = NULL;

+ return err;

+ }

+

+ priv->dpa_bp = &dpa_bp;

+ }

+

+ dpa_priv_common_bpid = priv->dpa_bp->bpid;

+ return 0;

+}

+

+static const struct of_device_id dpa_match[];

+

+static int

+dpaa_eth_priv_probe(struct platform_device *pdev)

+{

+ int err = 0, i, channel;

+ struct device *dev;

+ struct dpa_bp *dpa_bp;

+ struct dpa_fq *dpa_fq, *tmp;

+ size_t count = 1;

+ struct net_device *net_dev = NULL;

+ struct dpa_priv_s *priv = NULL;

+ struct dpa_percpu_priv_s *percpu_priv;

+ struct fm_port_fqs port_fqs;

+ struct dpa_buffer_layout_s *buf_layout = NULL;

+ struct mac_device *mac_dev;

+ struct task_struct *kth;

+

+ dev = &pdev->dev;

+

+ /* Get the buffer pool assigned to this interface;

+ * run only once the default pool probing code

+ */

+ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :

+ dpa_priv_bp_probe(dev);

+ if (IS_ERR(dpa_bp))

+ return PTR_ERR(dpa_bp);

+

+ /* Allocate this early, so we can store relevant information in

+ * the private area

+ */

+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);

+ if (!net_dev) {

+ dev_err(dev, "alloc_etherdev_mq() failed\n");

+ goto alloc_etherdev_mq_failed;

+ }

+

+ snprintf(net_dev->name, IFNAMSIZ, "fm%d-mac%d",

+ dpa_mac_fman_index_get(pdev),

+ dpa_mac_hw_index_get(pdev));

+

+ /* Do this here, so we can be verbose early */

+ SET_NETDEV_DEV(net_dev, dev);

+ dev_set_drvdata(dev, net_dev);

+

+ priv = netdev_priv(net_dev);

+ priv->net_dev = net_dev;

+

+ priv->msg_enable = netif_msg_init(debug, -1);

+

+ mac_dev = dpa_mac_dev_get(pdev);

+ if (IS_ERR(mac_dev) || !mac_dev) {

+ err = PTR_ERR(mac_dev);

+ goto mac_probe_failed;

+ }

+

+ /* We have physical ports, so we need to establish

+ * the buffer layout.

+ */

+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),

+ GFP_KERNEL);

+ if (!buf_layout)

+ goto alloc_failed;

+

+ dpa_set_buffers_layout(mac_dev, buf_layout);

+

+ /* For private ports, need to compute the size of the default

+ * buffer pool, based on FMan port buffer layout;also update

+ * the maximum buffer size for private ports if necessary

+ */

+ dpa_bp->size = dpa_bp_size(&buf_layout[RX]);

+

+ INIT_LIST_HEAD(&priv->dpa_fq_list);

+

+ memset(&port_fqs, 0, sizeof(port_fqs));

+

+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);

+ if (!err)

+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,

+ &port_fqs, true, TX);

+

+ if (err < 0)

+ goto fq_probe_failed;

+

+ /* bp init */

+

+ err = dpa_priv_bp_create(net_dev, dpa_bp, count);

+

+ if (err < 0)

+ goto bp_create_failed;

+

+ priv->mac_dev = mac_dev;

+

+ channel = dpa_get_channel();

+

+ if (channel < 0) {

+ err = channel;

+ goto get_channel_failed;

+ }

+

+ priv->channel = (u16)channel;

+

+ /* Start a thread that will walk the cpus with affine portals

+ * and add this pool channel to each's dequeue mask.

+ */

+ kth = kthread_run(dpaa_eth_add_channel,

+ (void *)(unsigned long)priv->channel,

+ "dpaa_%p:%d", net_dev, priv->channel);

+ if (!kth) {

+ err = -ENOMEM;

+ goto add_channel_failed;

+ }

+

+ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);

+

+ /* Create a congestion group for this netdev, with

+ * dynamically-allocated CGR ID.

+ * Must be executed after probing the MAC, but before

+ * assigning the egress FQs to the CGRs.

+ */

+ err = dpaa_eth_cgr_init(priv);

+ if (err < 0) {

+ dev_err(dev, "Error initializing CGR\n");

+ goto tx_cgr_init_failed;

+ }

+ err = dpaa_eth_priv_ingress_cgr_init(priv);

+ if (err < 0) {

+ dev_err(dev, "Error initializing ingress CGR\n");

+ goto rx_cgr_init_failed;

+ }

+

+ /* Add the FQs to the interface, and make them active */

+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {

+ err = dpa_fq_init(dpa_fq, false);

+ if (err < 0)

+ goto fq_alloc_failed;

+ }

+

+ priv->buf_layout = buf_layout;

+ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);

+ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);

+

+ /* All real interfaces need their ports initialized */

+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,

+ buf_layout, dev);

+

+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);

+

+ if (!priv->percpu_priv) {

+ dev_err(dev, "devm_alloc_percpu() failed\n");

+ err = -ENOMEM;

+ goto alloc_percpu_failed;

+ }

+ for_each_possible_cpu(i) {

+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);

+ memset(percpu_priv, 0, sizeof(*percpu_priv));

+ }

+

+ /* Initialize NAPI */

+ err = dpa_private_napi_add(net_dev);

+

+ if (err < 0)

+ goto napi_add_failed;

+

+ err = dpa_private_netdev_init(net_dev);

+

+ if (err < 0)

+ goto netdev_init_failed;

+

+ pr_info("Probed interface %s\n", net_dev->name);

+

+ return 0;

+

+netdev_init_failed:

+napi_add_failed:

+ dpa_private_napi_del(net_dev);

+alloc_percpu_failed:

+ dpa_fq_free(dev, &priv->dpa_fq_list);

+fq_alloc_failed:

+ qman_delete_cgr_safe(&priv->ingress_cgr);

+ qman_release_cgrid(priv->ingress_cgr.cgrid);

+rx_cgr_init_failed:

+ qman_delete_cgr_safe(&priv->cgr_data.cgr);

+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);

+tx_cgr_init_failed:

+add_channel_failed:

+get_channel_failed:

+ dpa_bp_free(priv);

+bp_create_failed:

+fq_probe_failed:

+alloc_failed:

+mac_probe_failed:

+ dev_set_drvdata(dev, NULL);

+ free_netdev(net_dev);

+alloc_etherdev_mq_failed:

+ if (atomic_read(&dpa_bp->refs) == 0)

+ devm_kfree(dev, dpa_bp);

+

+ return err;

+}

+

+static struct platform_device_id dpa_devtype[] = {

+ {

+ .name = "dpaa-ethernet",

+ .driver_data = 0,

+ }, {

+ }

+};

+MODULE_DEVICE_TABLE(platform, dpa_devtype);

+

+static struct platform_driver dpa_driver = {

+ .driver = {

+ .name = KBUILD_MODNAME,

+ .owner = THIS_MODULE,

+ },

+ .id_table = dpa_devtype,

+ .probe = dpaa_eth_priv_probe,

+ .remove = dpa_remove

+};

+

+static int __init __cold dpa_load(void)

+{

+ int _errno;

+

+ pr_info(DPA_DESCRIPTION "\n");

+

+ /* initialise dpaa_eth mirror values */

+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();

+ dpa_max_frm = fm_get_max_frm();

+

+ _errno = platform_driver_register(&dpa_driver);

+ if (unlikely(_errno < 0)) {

+ pr_err(KBUILD_MODNAME

+ ": %s:%hu:%s(): platform_driver_register() = %d\n",

+ KBUILD_BASENAME ".c", __LINE__, __func__, _errno);

+ }

+

+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",

+ KBUILD_BASENAME ".c", __func__);

+

+ return _errno;

+}

+module_init(dpa_load);

+

+static void __exit __cold dpa_unload(void)

+{

+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",

+ KBUILD_BASENAME ".c", __func__);

+

+ platform_driver_unregister(&dpa_driver);

+

+ /* Only one channel is used and needs to be relased after all

+ * interfaces are removed

+ */

+ dpa_release_channel();

+

+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",

+ KBUILD_BASENAME ".c", __func__);

+}

+module_exit(dpa_unload);

+

+MODULE_LICENSE("Dual BSD/GPL");

+MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");

+MODULE_DESCRIPTION(DPA_DESCRIPTION);

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h

new file mode 100644

index 0000000..b643c52

--- /dev/null

+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h

@@ -0,0 +1,446 @@

+/* Copyright 2008 - 2015 Freescale Semiconductor Inc.

+ *

+ * Redistribution and use in source and binary forms, with or without

+ * modification, are permitted provided that the following conditions are met:

+ * * Redistributions of source code must retain the above copyright

+ * notice, this list of conditions and the following disclaimer.

+ * * Redistributions in binary form must reproduce the above copyright

+ * notice, this list of conditions and the following disclaimer in the

+ * documentation and/or other materials provided with the distribution.

+ * * Neither the name of Freescale Semiconductor nor the

+ * names of its contributors may be used to endorse or promote products

+ * derived from this software without specific prior written permission.

+ *

+ * ALTERNATIVELY, this software may be distributed under the terms of the

+ * GNU General Public License ("GPL") as published by the Free Software

+ * Foundation, either version 2 of that License or (at your option) any

+ * later version.

+ *

+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY

+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED

+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE

+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY

+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES

+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;

+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND

+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT

+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS

+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ */

+

+#ifndef __DPA_H

+#define __DPA_H

+

+#include <linux/netdevice.h>

+#include <soc/fsl/qman.h> /* struct qman_fq */

+

+#include "fm_ext.h"

+#include "mac.h"

+

+extern int dpa_rx_extra_headroom;

+extern int dpa_max_frm;

+

+#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom

+#define dpa_get_max_frm() dpa_max_frm

+

+#define dpa_get_max_mtu() \

+ (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))

+

+#define __hot

+

+/* Simple enum of FQ types - used for array indexing */

+enum port_type {RX, TX};

+

+struct dpa_buffer_layout_s {

+ u16 priv_data_size;

+ bool parse_results;

+ bool time_stamp;

+ bool hash_results;

+ u16 data_align;

+};

+

+#define DPA_ERR_ON(cond)

+

+#define DPA_TX_PRIV_DATA_SIZE 16

+#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result)

+#define DPA_TIME_STAMP_SIZE 8

+#define DPA_HASH_RESULTS_SIZE 8

+#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \

+ dpa_get_rx_extra_headroom())

+

+#define FM_FD_STAT_RX_ERRORS \

+ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \

+ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \

+ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \

+ FM_PORT_FRM_ERR_PRS_TIMEOUT | FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \

+ FM_PORT_FRM_ERR_PRS_HDR_ERR)

+

+#define FM_FD_STAT_TX_ERRORS \

+ (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \

+ FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)

+

+/* The raw buffer size must be cacheline aligned.

+ * Normally we use 2K buffers.

+ */

+#define DPA_BP_RAW_SIZE 2048

+

+/* This is what FMan is ever allowed to use.

+ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is

+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,

+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us

+ * half-page-aligned buffers (can we?), so we reserve some more space

+ * for start-of-buffer alignment.

+ */

+#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \

+ SMP_CACHE_BYTES)

+/* We must ensure that skb_shinfo is always cacheline-aligned. */

+#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))

+

+/* Largest value that the FQD's OAL field can hold.

+ * This is DPAA-1.x specific.

+ */

+#define FSL_QMAN_MAX_OAL 127

+

+/* Default alignment for start of data in an Rx FD */

+#define DPA_FD_DATA_ALIGNMENT 16

+

+/* Values for the L3R field of the FM Parse Results

+ */

+/* L3 Type field: First IP Present IPv4 */

+#define FM_L3_PARSE_RESULT_IPV4 0x8000

+/* L3 Type field: First IP Present IPv6 */

+#define FM_L3_PARSE_RESULT_IPV6 0x4000

+

+/* Values for the L4R field of the FM Parse Results

+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.

+ */

+/* L4 Type field: UDP */

+#define FM_L4_PARSE_RESULT_UDP 0x40

+/* L4 Type field: TCP */

+#define FM_L4_PARSE_RESULT_TCP 0x20

+

+#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL

+

+/* number of Tx queues to FMan */

+#define DPAA_ETH_TX_QUEUES NR_CPUS

+

+#define DPAA_ETH_RX_QUEUES 128

+

+#define FSL_DPAA_ETH_MAX_BUF_COUNT 128

+#define FSL_DPAA_ETH_REFILL_THRESHOLD 80

+

+/* More detailed FQ types - used for fine-grained WQ assignments */

+enum dpa_fq_type {

+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */

+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */

+ FQ_TYPE_RX_PCD, /* User-defined PCDs */

+ FQ_TYPE_TX, /* "Real" Tx FQs */

+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */

+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */

+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */

+};

+

+struct dpa_fq {

+ struct qman_fq fq_base;

+ struct list_head list;

+ struct net_device *net_dev;

+ bool init;

+ u32 fqid;

+ u32 flags;

+ u16 channel;

+ u8 wq;

+ enum dpa_fq_type fq_type;

+};

+

+struct dpa_fq_cbs_t {

+ struct qman_fq rx_defq;

+ struct qman_fq tx_defq;

+ struct qman_fq rx_errq;

+ struct qman_fq tx_errq;

+ struct qman_fq egress_ern;

+};

+

+struct fqid_cell {

+ u32 start;

+ u32 count;

+};

+

+struct dpa_bp {

+ struct bman_pool *pool;

+ u8 bpid;

+ struct device *dev;

+ union {

+ /* The buffer pools used for the private ports are initialized

+ * with target_count buffers for each CPU; at runtime the

+ * number of buffers per CPU is constantly brought back to this

+ * level

+ */

+ int target_count;

+ /* The configured value for the number of buffers in the pool,

+ * used for shared port buffer pools

+ */

+ int config_count;

+ };

+ size_t size;

+ bool seed_pool;

+ /* physical address of the contiguous memory used by the pool to store

+ * the buffers

+ */

+ dma_addr_t paddr;

+ /* virtual address of the contiguous memory used by the pool to store

+ * the buffers

+ */

+ void __iomem *vaddr;

+ /* current number of buffers in the bpool alloted to this CPU */

+ int __percpu *percpu_count;

+ atomic_t refs;

+ /* some bpools need to be seeded before use by this cb */

+ int (*seed_cb)(struct dpa_bp *);

+ /* some bpools need to be emptied before freeing; this cb is used

+ * for freeing of individual buffers taken from the pool

+ */

+ void (*free_buf_cb)(void *addr);

+};

+

+struct dpa_napi_portal {

+ struct napi_struct napi;

+ struct qman_portal *p;

+};

+

+struct dpa_percpu_priv_s {

+ struct net_device *net_dev;

+ struct dpa_napi_portal *np;

+ struct rtnl_link_stats64 stats;

+};

+

+struct dpa_priv_s {

+ struct dpa_percpu_priv_s __percpu *percpu_priv;

+ struct dpa_bp *dpa_bp;

+ /* Store here the needed Tx headroom for convenience and speed

+ * (even though it can be computed based on the fields of buf_layout)

+ */

+ u16 tx_headroom;

+ struct net_device *net_dev;

+ struct mac_device *mac_dev;

+ struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];

+ struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];

+

+ size_t bp_count;

+

+ u16 channel; /* "fsl,qman-channel-id" */

+ struct list_head dpa_fq_list;

+

+ u32 msg_enable; /* net_device message level */

+

+ struct {

+ /**

+ * All egress queues to a given net device belong to one

+ * (and the same) congestion group.

+ */

+ struct qman_cgr cgr;

+ } cgr_data;

+ /* Use a per-port CGR for ingress traffic. */

+ bool use_ingress_cgr;

+ struct qman_cgr ingress_cgr;

+

+ struct dpa_buffer_layout_s *buf_layout;

+ u16 rx_headroom;

+};

+

+struct fm_port_fqs {

+ struct dpa_fq *tx_defq;

+ struct dpa_fq *tx_errq;

+ struct dpa_fq *rx_defq;

+ struct dpa_fq *rx_errq;

+};

+

+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);

+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);

+void __hot _dpa_rx(struct net_device *net_dev,

+ struct qman_portal *portal,

+ const struct dpa_priv_s *priv,

+ struct dpa_percpu_priv_s *percpu_priv,

+ const struct qm_fd *fd,

+ u32 fqid,

+ int *count_ptr);

+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);

+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,

+ const struct qm_fd *fd);

+

+/* Turn on HW checksum computation for this outgoing frame.

+ * If the current protocol is not something we support in this regard

+ * (or if the stack has already computed the SW checksum), we do nothing.

+ *

+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value

+ * otherwise.

+ *

+ * Note that this function may modify the fd->cmd field and the skb data buffer

+ * (the Parse Results area).

+ */

+int dpa_enable_tx_csum(struct dpa_priv_s *priv, struct sk_buff *skb,

+ struct qm_fd *fd, char *parse_results);

+

+static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,

+ struct qman_portal *portal)

+{

+ /* In case of threaded ISR for RT enable kernel,

+ * in_irq() does not return appropriate value, so use

+ * in_serving_softirq to distinguish softirq or irq context.

+ */

+ if (unlikely(in_irq() || !in_serving_softirq())) {

+ /* Disable QMan IRQ and invoke NAPI */

+ int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);

+

+ if (likely(!ret)) {

+ const struct qman_portal_config *pc =

+ qman_p_get_portal_config(portal);

+ struct dpa_napi_portal *np =

+ &percpu_priv->np[pc->channel];

+

+ np->p = portal;

+ napi_schedule(&np->napi);

+ return 1;

+ }

+ }

+ return 0;

+}

+

+static inline ssize_t __const __must_check __attribute__((nonnull))

+dpa_fd_length(const struct qm_fd *fd)

+{

+ return fd->length20;

+}

+

+static inline ssize_t __const __must_check __attribute__((nonnull))

+dpa_fd_offset(const struct qm_fd *fd)

+{

+ return fd->offset;

+}

+

+/* Verifies if the skb length is below the interface MTU */

+static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)

+{

+ if (unlikely(skb->len > mtu))

+ if ((skb->protocol != ETH_P_8021Q) || (skb->len > mtu + 4))

+ return -1;

+

+ return 0;

+}

+

+static inline u16 dpa_get_headroom(struct dpa_buffer_layout_s *bl)

+{

+ u16 headroom;

+ /* The frame headroom must accommodate:

+ * - the driver private data area

+ * - parse results, hash results, timestamp if selected

+ * If either hash results or time stamp are selected, both will

+ * be copied to/from the frame headroom, as TS is located between PR and

+ * HR in the IC and IC copy size has a granularity of 16bytes

+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)

+ *

+ * Also make sure the headroom is a multiple of data_align bytes

+ */

+ headroom = (u16)(bl->priv_data_size +

+ (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +

+ (bl->hash_results || bl->time_stamp ?

+ DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0));

+

+ return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;

+}

+

+void dpa_private_napi_del(struct net_device *net_dev);

+

+static inline void clear_fd(struct qm_fd *fd)

+{

+ fd->opaque_addr = 0;

+ fd->opaque = 0;

+ fd->cmd = 0;

+}

+

+static inline struct qman_fq *_dpa_get_tx_conf_queue(

+ const struct dpa_priv_s *priv,

+ struct qman_fq *tx_fq)

+{

+ int i;

+

+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)

+ if (priv->egress_fqs == tx_fq)

+ return priv->conf_fqs;

+

+ return NULL;

+}

+

+static inline int __hot dpa_xmit(struct dpa_priv_s *priv,

+ struct rtnl_link_stats64 *percpu_stats,

+ int queue,

+ struct qm_fd *fd)

+{

+ int err, i;

+ struct qman_fq *egress_fq;

+

+ egress_fq = priv->egress_fqs[queue];

+ if (fd->bpid == 0xff)

+ fd->cmd |= qman_fq_fqid(

+ _dpa_get_tx_conf_queue(priv, egress_fq)

+ );

+

+ for (i = 0; i < 100000; i++) {

+ err = qman_enqueue(egress_fq, fd, 0);

+ if (err != -EBUSY)

+ break;

+ }

+

+ if (unlikely(err < 0)) {

+ percpu_stats->tx_errors++;

+ percpu_stats->tx_fifo_errors++;

+ return err;

+ }

+

+ percpu_stats->tx_packets++;

+ percpu_stats->tx_bytes += dpa_fd_length(fd);

+

+ return 0;

+}

+

+/* Use multiple WQs for FQ assignment:

+ * - Tx Confirmation queues go to WQ1.

+ * - Rx Default and Tx queues go to WQ3 (no differentiation between

+ * Rx and Tx traffic).

+ * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance

+ * to be scheduled, in case there are many more FQs in WQ3).

+ * This ensures that Tx-confirmed buffers are timely released. In particular,

+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they

+ * are greatly outnumbered by other FQs in the system, while

+ * dequeue scheduling is round-robin.

+ */

+static inline void _dpa_assign_wq(struct dpa_fq *fq)

+{

+ switch (fq->fq_type) {

+ case FQ_TYPE_TX_CONFIRM:

+ case FQ_TYPE_TX_CONF_MQ:

+ fq->wq = 1;

+ break;

+ case FQ_TYPE_RX_DEFAULT:

+ case FQ_TYPE_TX:

+ fq->wq = 3;

+ break;

+ case FQ_TYPE_RX_ERROR:

+ case FQ_TYPE_TX_ERROR:

+ fq->wq = 2;

+ break;

+ default:

+ WARN(1, "Invalid FQ type %d for FQID %d!\n",

+ fq->fq_type, fq->fqid);

+ }

+}

+

+/* Use the queue selected by XPS */

+#define dpa_get_queue_mapping(skb) \

+ skb_get_queue_mapping(skb)

+

+static inline void _dpa_bp_free_pf(void *addr)

+{

+ put_page(virt_to_head_page(addr));

+}

+

+#endif /* __DPA_H */

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c

new file mode 100644

index 0000000..7094a45

--- /dev/null

+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c

@@ -0,0 +1,1288 @@

+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.

+ *

+ * Redistribution and use in source and binary forms, with or without

+ * modification, are permitted provided that the following conditions are met:

+ * * Redistributions of source code must retain the above copyright

+ * notice, this list of conditions and the following disclaimer.

+ * * Redistributions in binary form must reproduce the above copyright

+ * notice, this list of conditions and the following disclaimer in the

+ * documentation and/or other materials provided with the distribution.

+ * * Neither the name of Freescale Semiconductor nor the

+ * names of its contributors may be used to endorse or promote products

+ * derived from this software without specific prior written permission.

+ *

+ * ALTERNATIVELY, this software may be distributed under the terms of the

+ * GNU General Public License ("GPL") as published by the Free Software

+ * Foundation, either version 2 of that License or (at your option) any

+ * later version.

+ *

+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY

+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED

+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE

+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY

+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES

+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;

+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND

+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT

+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS

+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ */

+

+#include <linux/init.h>

+#include <linux/module.h>

+#include <linux/of_platform.h>

+#include <linux/of_net.h>

+#include <linux/etherdevice.h>

+#include <linux/kthread.h>

+#include <linux/percpu.h>

+#include <linux/highmem.h>

+#include <linux/sort.h>

+#include <soc/fsl/qman.h>

+#include <linux/ip.h>

+#include <linux/ipv6.h>

+#include <linux/if_vlan.h> /* vlan_eth_hdr */

+#include "dpaa_eth.h"

+#include "dpaa_eth_common.h"

+#include "mac.h"

+

+/* Size in bytes of the FQ taildrop threshold */

+#define DPA_FQ_TD 0x200000

+

+static struct dpa_bp *dpa_bp_array[64];

+

+int dpa_max_frm;

+EXPORT_SYMBOL(dpa_max_frm);

+

+int dpa_rx_extra_headroom;

+EXPORT_SYMBOL(dpa_rx_extra_headroom);

+

+static const struct fqid_cell tx_confirm_fqids[] = {

+ {0, DPAA_ETH_TX_QUEUES}

+};

+

+static const struct fqid_cell default_fqids[][3] = {

+ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },

+ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }

+};

+

+int dpa_netdev_init(struct net_device *net_dev,

+ const u8 *mac_addr,

+ u16 tx_timeout)

+{

+ int err;

+ struct dpa_priv_s *priv = netdev_priv(net_dev);

+ struct device *dev = net_dev->dev.parent;

+

+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;

+

+ net_dev->features |= net_dev->hw_features;

+ net_dev->vlan_features = net_dev->features;

+

+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);

+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);

+

+ net_dev->needed_headroom = priv->tx_headroom;

+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);

+

+ err = register_netdev(net_dev);

+ if (err < 0) {

+ dev_err(dev, "register_netdev() = %d\n", err);

+ return err;

+ }

+

+ return 0;

+}

+EXPORT_SYMBOL(dpa_netdev_init);

+

+int __cold dpa_start(struct net_device *net_dev)

+{

+ int err, i;

+ struct dpa_priv_s *priv;

+ struct mac_device *mac_dev;

+

+ priv = netdev_priv(net_dev);

+ mac_dev = priv->mac_dev;

+

+ err = mac_dev->init_phy(net_dev, priv->mac_dev);

+ if (err < 0) {

+ if (netif_msg_ifup(priv))

+ netdev_err(net_dev, "init_phy() = %d\n", err);

+ return err;

+ }

+

+ for_each_port_device(i, mac_dev->port_dev) {

+ err = fm_port_enable(mac_dev->port_dev);

+ if (err)

+ goto mac_start_failed;

+ }

+

+ err = priv->mac_dev->start(mac_dev);

+ if (err < 0) {

+ if (netif_msg_ifup(priv))

+ netdev_err(net_dev, "mac_dev->start() = %d\n", err);

+ goto mac_start_failed;

+ }

+

+ netif_tx_start_all_queues(net_dev);

+

+ return 0;

+

+mac_start_failed:

+ for_each_port_device(i, mac_dev->port_dev)

+ fm_port_disable(mac_dev->port_dev);

+

+ return err;

+}

+EXPORT_SYMBOL(dpa_start);

+

+int __cold dpa_stop(struct net_device *net_dev)

+{

+ int _errno, i, err;

+ struct dpa_priv_s *priv;

+ struct mac_device *mac_dev;

+

+ priv = netdev_priv(net_dev);

+ mac_dev = priv->mac_dev;

+

+ netif_tx_stop_all_queues(net_dev);

+ /* Allow the Fman (Tx) port to process in-flight frames before we

+ * try switching it off.

+ */

+ usleep_range(5000, 10000);

+

+ _errno = mac_dev->stop(mac_dev);

+ if (unlikely(_errno < 0))

+ if (netif_msg_ifdown(priv))

+ netdev_err(net_dev, "mac_dev->stop() = %d\n",

+ _errno);

+

+ for_each_port_device(i, mac_dev->port_dev) {

+ err = fm_port_disable(mac_dev->port_dev);

+ _errno = err ? err : _errno;

+ }

+

+ if (mac_dev->phy_dev)

+ phy_disconnect(mac_dev->phy_dev);

+ mac_dev->phy_dev = NULL;

+

+ return _errno;

+}

+EXPORT_SYMBOL(dpa_stop);

+

+void __cold dpa_timeout(struct net_device *net_dev)

+{

+ const struct dpa_priv_s *priv;

+ struct dpa_percpu_priv_s *percpu_priv;

+

+ priv = netdev_priv(net_dev);

+ percpu_priv = raw_cpu_ptr(priv->percpu_priv);

+

+ if (netif_msg_timer(priv))

+ netdev_crit(net_dev, "Transmit timeout latency: %u ms\n",

+ jiffies_to_msecs(jiffies - net_dev->trans_start));

+

+ percpu_priv->stats.tx_errors++;

+}

+EXPORT_SYMBOL(dpa_timeout);

+

+/* net_device */

+

+/**

+ * @param net_dev the device for which statistics are calculated

+ * @param stats the function fills this structure with the device's statistics

+ * @return the address of the structure containing the statistics

+ *

+ * Calculates the statistics for the given device by adding the statistics

+ * collected by each CPU.

+ */

+struct rtnl_link_stats64 * __cold

+dpa_get_stats64(struct net_device *net_dev,

+ struct rtnl_link_stats64 *stats)

+{

+ struct dpa_priv_s *priv = netdev_priv(net_dev);

+ u64 *cpustats;

+ u64 *netstats = (u64 *)stats;

+ int i, j;

+ struct dpa_percpu_priv_s *percpu_priv;

+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);

+

+ for_each_possible_cpu(i) {

+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);

+

+ cpustats = (u64 *)&percpu_priv->stats;

+

+ for (j = 0; j < numstats; j++)

+ netstats[j] += cpustats[j];

+ }

+

+ return stats;

+}

+EXPORT_SYMBOL(dpa_get_stats64);

+

+int dpa_change_mtu(struct net_device *net_dev, int new_mtu)

+{

+ const int max_mtu = dpa_get_max_mtu();

+

+ /* Make sure we don't exceed the Ethernet controller's MAXFRM */

+ if (new_mtu < 68 || new_mtu > max_mtu) {

+ netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",

+ new_mtu, 68, max_mtu);

+ return -EINVAL;

+ }

+ net_dev->mtu = new_mtu;

+

+ return 0;

+}

+EXPORT_SYMBOL(dpa_change_mtu);

+

+/* .ndo_init callback */

+int dpa_ndo_init(struct net_device *net_dev)

+{

+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,

+ * we choose conservatively and let the user explicitly set a higher

+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs

+ * in the same LAN.

+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,

+ * start with the maximum allowed.

+ */

+ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);

+

+ pr_debug("Setting initial MTU on net device: %d\n", init_mtu);

+ net_dev->mtu = init_mtu;

+

+ return 0;

+}

+EXPORT_SYMBOL(dpa_ndo_init);

+

+int dpa_set_features(struct net_device *dev, netdev_features_t features)

+{

+ /* Not much to do here for now */

+ dev->features = features;

+ return 0;

+}

+EXPORT_SYMBOL(dpa_set_features);

+

+netdev_features_t dpa_fix_features(struct net_device *dev,

+ netdev_features_t features)

+{

+ netdev_features_t unsupported_features = 0;

+

+ /* In theory we should never be requested to enable features that

+ * we didn't set in netdev->features and netdev->hw_features at probe

+ * time, but double check just to be on the safe side.

+ * We don't support enabling Rx csum through ethtool yet

+ */

+ unsupported_features |= NETIF_F_RXCSUM;

+

+ features &= ~unsupported_features;

+

+ return features;

+}

+EXPORT_SYMBOL(dpa_fix_features);

+

+int __cold dpa_remove(struct platform_device *pdev)

+{

+ int err;

+ struct device *dev;

+ struct net_device *net_dev;

+ struct dpa_priv_s *priv;

+

+ dev = &pdev->dev;

+ net_dev = dev_get_drvdata(dev);

+

+ priv = netdev_priv(net_dev);

+

+ dev_set_drvdata(dev, NULL);

+ unregister_netdev(net_dev);

+

+ err = dpa_fq_free(dev, &priv->dpa_fq_list);

+

+ qman_delete_cgr_safe(&priv->ingress_cgr);

+ qman_release_cgrid(priv->ingress_cgr.cgrid);

+ qman_delete_cgr_safe(&priv->cgr_data.cgr);

+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);

+

+ dpa_private_napi_del(net_dev);

+

+ dpa_bp_free(priv);

+

+ if (priv->buf_layout)

+ devm_kfree(dev, priv->buf_layout);

+

+ free_netdev(net_dev);

+

+ return err;

+}

+EXPORT_SYMBOL(dpa_remove);

+

+struct mac_device * __cold __must_check

+__attribute__((nonnull))

+dpa_mac_dev_get(struct platform_device *pdev)

+{

+ struct device *dpa_dev, *dev;

+ struct device_node *mac_node;

+ struct platform_device *of_dev;

+ struct mac_device *mac_dev;

+ struct dpaa_eth_data *eth_data;

+

+ dpa_dev = &pdev->dev;

+ eth_data = dpa_dev->platform_data;

+ if (!eth_data)

+ return ERR_PTR(-ENODEV);

+

+ mac_node = eth_data->mac_node;

+

+ of_dev = of_find_device_by_node(mac_node);

+ if (unlikely(!of_dev)) {

+ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",

+ mac_node->full_name);

+ of_node_put(mac_node);

+ return ERR_PTR(-EINVAL);

+ }

+ of_node_put(mac_node);

+

+ dev = &of_dev->dev;

+

+ mac_dev = dev_get_drvdata(dev);

+ if (unlikely(!mac_dev)) {

+ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",

+ dev_name(dev));

+ return ERR_PTR(-EINVAL);

+ }

+

+ return mac_dev;

+}

+EXPORT_SYMBOL(dpa_mac_dev_get);

+

+int dpa_mac_hw_index_get(struct platform_device *pdev)

+{

+ struct device *dpa_dev;

+ struct dpaa_eth_data *eth_data;

+

+ dpa_dev = &pdev->dev;

+ eth_data = dpa_dev->platform_data;

+

+ return eth_data->mac_hw_id;

+}

+

+int dpa_mac_fman_index_get(struct platform_device *pdev)

+{

+ struct device *dpa_dev;

+ struct dpaa_eth_data *eth_data;

+

+ dpa_dev = &pdev->dev;

+ eth_data = dpa_dev->platform_data;

+

+ return eth_data->fman_hw_id;

+}

+

+int dpa_set_mac_address(struct net_device *net_dev, void *addr)

+{

+ const struct dpa_priv_s *priv;

+ int _errno;

+ struct mac_device *mac_dev;

+

+ priv = netdev_priv(net_dev);

+

+ _errno = eth_mac_addr(net_dev, addr);

+ if (_errno < 0) {

+ if (netif_msg_drv(priv))

+ netdev_err(net_dev,

+ "eth_mac_addr() = %d\n",

+ _errno);

+ return _errno;

+ }

+

+ mac_dev = priv->mac_dev;

+

+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),

+ (enet_addr_t *)net_dev->dev_addr);

+ if (_errno < 0) {

+ if (netif_msg_drv(priv))

+ netdev_err(net_dev,

+ "mac_dev->change_addr() = %d\n",

+ _errno);

+ return _errno;

+ }

+

+ return 0;

+}

+EXPORT_SYMBOL(dpa_set_mac_address);

+

+void dpa_set_rx_mode(struct net_device *net_dev)

+{

+ int _errno;

+ const struct dpa_priv_s *priv;

+

+ priv = netdev_priv(net_dev);

+

+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {

+ priv->mac_dev->promisc = !priv->mac_dev->promisc;

+ _errno = priv->mac_dev->set_promisc(

+ priv->mac_dev->get_mac_handle(priv->mac_dev),

+ priv->mac_dev->promisc);

+ if (unlikely(_errno < 0) && netif_msg_drv(priv))

+ netdev_err(net_dev,

+ "mac_dev->set_promisc() = %d\n",

+ _errno);

+ }

+

+ _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);

+ if (unlikely(_errno < 0) && netif_msg_drv(priv))

+ netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);

+}

+EXPORT_SYMBOL(dpa_set_rx_mode);

+

+void dpa_set_buffers_layout(struct mac_device *mac_dev,

+ struct dpa_buffer_layout_s *layout)

+{

+ struct fm_port_params params;

+

+ /* Rx */

+ layout[RX].priv_data_size = (u16)DPA_RX_PRIV_DATA_SIZE;

+ layout[RX].parse_results = true;

+ layout[RX].hash_results = true;

+

+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);

+ /* a value of zero for data alignment means "don't care", so align to

+ * a non-zero value to prevent FMD from using its own default

+ */

+ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;

+

+ /* Tx */

+ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;

+ layout[TX].parse_results = true;

+ layout[TX].hash_results = true;

+

+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);

+ layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;

+}

+EXPORT_SYMBOL(dpa_set_buffers_layout);

+

+int __attribute__((nonnull))

+dpa_bp_alloc(struct dpa_bp *dpa_bp)

+{

+ int err;

+ struct bman_pool_params bp_params;

+ struct platform_device *pdev;

+

+ if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {

+ pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");

+ return -EINVAL;

+ }

+

+ memset(&bp_params, 0, sizeof(struct bman_pool_params));

+

+ /* If the pool is already specified, we only create one per bpid */

+ if (dpa_bpid2pool_use(dpa_bp->bpid))

+ return 0;

+

+ if (dpa_bp->bpid == 0)

+ bp_params.flags |= BMAN_POOL_FLAG_DYNA

Show more