Enable IEEE1588 support code.
config FSL_DPAA_ETH_MAX_BUF_COUNT
- int "Maximum nuber of buffers in private bpool"
+ int "Maximum number of buffers in the private bpool"
depends on FSL_SDK_DPAA_ETH
range 64 2048
default "128"
help
- The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
- buffer pool. One needn't normally modify this, as it has probably been tuned for performance
+ The maximum number of buffers to be by default allocated in the DPAA-Ethernet
+ private port's buffer pool per CPU per user (ports that share the same pool).
+ One needn't normally modify this, as it has probably been tuned for performance
already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
config FSL_DPAA_ETH_REFILL_THRESHOLD
default "80"
help
The DPAA-Ethernet driver will start replenishing buffer pools whose count
- falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
- modify this value unless one has very specific performance reasons.
+ falls below this threshold per CPU. This must be related to DPAA_ETH_MAX_BUF_COUNT.
+ One needn't normally modify this value unless one has very specific performance reasons.
config FSL_DPAA_CS_THRESHOLD_1G
hex "Egress congestion threshold on 1G ports"
priv = netdev_priv(net_dev);
percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+ count_ptr = raw_cpu_ptr(priv->percpu_count);
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
/* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
percpu_priv = raw_cpu_ptr(priv->percpu_priv);
- count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
+ count_ptr = raw_cpu_ptr(priv->percpu_count);
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
return qman_cb_dqrr_stop;
return ERR_PTR(-ENOMEM);
}
- dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
- dpa_bp->seed_cb = dpa_bp_priv_seed;
dpa_bp->free_buf_cb = _dpa_bp_free_pf;
return dpa_bp;
return 0;
}
+static void dpa_priv_bp_seed(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ int i;
+
+ /* Give each CPU an allotment of buffers */
+ for_each_possible_cpu(i) {
+ /* Although we access another CPU's counters here
+ * we do it at boot time so it is safe
+ */
+ int *count_ptr = per_cpu_ptr(priv->percpu_count, i);
+
+ dpaa_eth_refill_bpools(dpa_bp, count_ptr);
+ }
+}
+
static const struct of_device_id dpa_match[];
#ifdef CONFIG_FSL_DPAA_DBG_LOOP
err = -ENOMEM;
goto alloc_percpu_failed;
}
+
for_each_possible_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
memset(percpu_priv, 0, sizeof(*percpu_priv));
}
+ priv->percpu_count = devm_alloc_percpu(dev, *priv->percpu_count);
+ if (!priv->percpu_count) {
+ dev_err(dev, "devm_alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+
+ for_each_possible_cpu(i) {
+ int *percpu_count = per_cpu_ptr(priv->percpu_count, i);
+ *percpu_count = 0;
+ }
+
/* Initialize NAPI */
err = dpa_private_napi_add(net_dev);
err = dpa_private_netdev_init(net_dev);
+ dpa_priv_bp_seed(net_dev);
+
if (err < 0)
goto netdev_init_failed;
* the buffers
*/
void __iomem *vaddr;
- /* current number of buffers in the bpool alloted to this CPU */
- int __percpu *percpu_count;
atomic_t refs;
- /* some bpools need to be seeded before use by this cb */
- int (*seed_cb)(struct dpa_bp *);
/* some bpools need to be emptied before freeing; this cb is used
* for freeing of individual buffers taken from the pool
*/
struct dpa_priv_s {
struct dpa_percpu_priv_s __percpu *percpu_priv;
struct dpa_bp *dpa_bp;
+ /* current number of buffers in the bpool allotted to this CPU */
+ int __percpu *percpu_count;
/* Store here the needed Tx headroom for convenience and speed
* (even though it can be computed based on the fields of buf_layout)
*/
extern struct net_device *dpa_loop_netdevs[20];
#endif
-/* functions with different implementation for SG and non-SG: */
-int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
void __hot _dpa_rx(struct net_device *net_dev,
struct qman_portal *portal,
dpa_bp->dev = dev;
- if (dpa_bp->seed_cb) {
- err = dpa_bp->seed_cb(dpa_bp);
- if (err)
- goto bman_free_pool;
- }
-
dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
return 0;
build_skb_failed:
netdev_alloc_failed:
- net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
+ net_err_ratelimited("%s failed\n", __func__);
WARN_ONCE(1, "Memory allocation failure on Rx\n");
bm_buffer_set64(&bmb[i], 0);
return 0;
}
-/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
-static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
-{
- int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
- *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
-}
-
-int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
-{
- int i;
-
- /* Give each CPU an allotment of "config_count" buffers */
- for_each_possible_cpu(i) {
- int j;
-
- /* Although we access another CPU's counters here
- * we do it at boot time so it is safe
- */
- for (j = 0; j < dpa_bp->config_count; j += 8)
- dpa_bp_add_8_bufs(dpa_bp, i);
- }
- return 0;
-}
-EXPORT_SYMBOL(dpa_bp_priv_seed);
-
/* Add buffers/(pages) for Rx processing whenever bpool count falls below
* REFILL_THRESHOLD.
*/
/* Non-migratable context, safe to use raw_cpu_ptr */
percpu_priv = raw_cpu_ptr(priv->percpu_priv);
percpu_stats = &percpu_priv->stats;
- countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+ countptr = raw_cpu_ptr(priv->percpu_count);
clear_fd(&fd);
struct dpa_ern_cnt ern_cnt;
struct dpa_priv_s *priv;
unsigned int num_cpus, offset;
- struct dpa_bp *dpa_bp;
int total_stats, i;
total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
priv = netdev_priv(net_dev);
- dpa_bp = priv->dpa_bp;
num_cpus = num_online_cpus();
bp_count = 0;
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- if (dpa_bp->percpu_count)
- bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
+ if (priv->percpu_count)
+ bp_count = *(per_cpu_ptr(priv->percpu_count, i));
rx_errors.dme += percpu_priv->rx_errors.dme;
rx_errors.fpe += percpu_priv->rx_errors.fpe;