Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner:
"The irq department provides:
- a major update to the auto affinity management code, which is used
by multi-queue devices
- move of the microblaze irq chip driver into the common driver code
so it can be shared between microblaze, powerpc and MIPS
- a series of updates to the ARM GICV3 interrupt controller
- the usual pile of fixes and small improvements all over the place"
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
powerpc/virtex: Use generic xilinx irqchip driver
irqchip/xilinx: Try to fall back if xlnx,kind-of-intr not provided
irqchip/xilinx: Add support for parent intc
irqchip/xilinx: Rename get_irq to xintc_get_irq
irqchip/xilinx: Restructure and use jump label api
irqchip/xilinx: Clean up print messages
microblaze/irqchip: Move intc driver to irqchip
ARM: virt: Select ARM_GIC_V3_ITS
ARM: gic-v3-its: Add 32bit support to GICv3 ITS
irqchip/gic-v3-its: Specialise readq and writeq accesses
irqchip/gic-v3-its: Specialise flush_dcache operation
irqchip/gic-v3-its: Narrow down Entry Size when used as a divider
irqchip/gic-v3-its: Change unsigned types for AArch32 compatibility
irqchip/gic-v3: Use nops macro for Cavium ThunderX erratum 23154
irqchip/gic-v3: Convert arm64 GIC accessors to {read,write}_sysreg_s
genirq/msi: Drop artificial PCI dependency
irqchip/bcm7038-l1: Implement irq_cpu_offline() callback
genirq/affinity: Use default affinity mask for reserved vectors
genirq/affinity: Take reserved vectors into account when spreading irqs
PCI: Remove the irq_affinity mask from struct pci_dev
...
This commit is contained in:
+37
-35
@@ -51,16 +51,17 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
|
||||
|
||||
/**
|
||||
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
|
||||
* @affinity: The affinity mask to spread. If NULL cpu_online_mask
|
||||
* is used
|
||||
* @nvecs: The number of vectors
|
||||
* @nvecs: The total number of vectors
|
||||
* @affd: Description of the affinity requirements
|
||||
*
|
||||
* Returns the masks pointer or NULL if allocation failed.
|
||||
*/
|
||||
struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
|
||||
int nvec)
|
||||
struct cpumask *
|
||||
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
{
|
||||
int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0;
|
||||
int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
|
||||
int affv = nvecs - affd->pre_vectors - affd->post_vectors;
|
||||
int last_affv = affv + affd->pre_vectors;
|
||||
nodemask_t nodemsk = NODE_MASK_NONE;
|
||||
struct cpumask *masks;
|
||||
cpumask_var_t nmsk;
|
||||
@@ -68,46 +69,47 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
|
||||
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
|
||||
return NULL;
|
||||
|
||||
masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL);
|
||||
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
|
||||
if (!masks)
|
||||
goto out;
|
||||
|
||||
/* Fill out vectors at the beginning that don't need affinity */
|
||||
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
|
||||
cpumask_copy(masks + curvec, irq_default_affinity);
|
||||
|
||||
/* Stabilize the cpumasks */
|
||||
get_online_cpus();
|
||||
/* If the supplied affinity mask is NULL, use cpu online mask */
|
||||
if (!affinity)
|
||||
affinity = cpu_online_mask;
|
||||
|
||||
nodes = get_nodes_in_cpumask(affinity, &nodemsk);
|
||||
nodes = get_nodes_in_cpumask(cpu_online_mask, &nodemsk);
|
||||
|
||||
/*
|
||||
* If the number of nodes in the mask is less than or equal the
|
||||
* number of vectors we just spread the vectors across the nodes.
|
||||
*/
|
||||
if (nvec <= nodes) {
|
||||
if (affv <= nodes) {
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
cpumask_copy(masks + curvec, cpumask_of_node(n));
|
||||
if (++curvec == nvec)
|
||||
if (++curvec == last_affv)
|
||||
break;
|
||||
}
|
||||
goto outonl;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Spread the vectors per node */
|
||||
vecs_per_node = nvec / nodes;
|
||||
vecs_per_node = affv / nodes;
|
||||
/* Account for rounding errors */
|
||||
extra_vecs = nvec - (nodes * vecs_per_node);
|
||||
extra_vecs = affv - (nodes * vecs_per_node);
|
||||
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
int ncpus, v, vecs_to_assign = vecs_per_node;
|
||||
|
||||
/* Get the cpus on this node which are in the mask */
|
||||
cpumask_and(nmsk, affinity, cpumask_of_node(n));
|
||||
cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
|
||||
|
||||
/* Calculate the number of cpus per vector */
|
||||
ncpus = cpumask_weight(nmsk);
|
||||
|
||||
for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) {
|
||||
for (v = 0; curvec < last_affv && v < vecs_to_assign;
|
||||
curvec++, v++) {
|
||||
cpus_per_vec = ncpus / vecs_to_assign;
|
||||
|
||||
/* Account for extra vectors to compensate rounding errors */
|
||||
@@ -119,36 +121,36 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
|
||||
irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
|
||||
}
|
||||
|
||||
if (curvec >= nvec)
|
||||
if (curvec >= last_affv)
|
||||
break;
|
||||
}
|
||||
|
||||
outonl:
|
||||
done:
|
||||
put_online_cpus();
|
||||
|
||||
/* Fill out vectors at the end that don't need affinity */
|
||||
for (; curvec < nvecs; curvec++)
|
||||
cpumask_copy(masks + curvec, irq_default_affinity);
|
||||
out:
|
||||
free_cpumask_var(nmsk);
|
||||
return masks;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_calc_affinity_vectors - Calculate to optimal number of vectors for a given affinity mask
|
||||
* @affinity: The affinity mask to spread. If NULL cpu_online_mask
|
||||
* is used
|
||||
* @maxvec: The maximum number of vectors available
|
||||
* irq_calc_affinity_vectors - Calculate the optimal number of vectors
|
||||
* @maxvec: The maximum number of vectors available
|
||||
* @affd: Description of the affinity requirements
|
||||
*/
|
||||
int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
|
||||
int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
|
||||
{
|
||||
int cpus, ret;
|
||||
int resv = affd->pre_vectors + affd->post_vectors;
|
||||
int vecs = maxvec - resv;
|
||||
int cpus;
|
||||
|
||||
/* Stabilize the cpumasks */
|
||||
get_online_cpus();
|
||||
/* If the supplied affinity mask is NULL, use cpu online mask */
|
||||
if (!affinity)
|
||||
affinity = cpu_online_mask;
|
||||
|
||||
cpus = cpumask_weight(affinity);
|
||||
ret = (cpus < maxvec) ? cpus : maxvec;
|
||||
|
||||
cpus = cpumask_weight(cpu_online_mask);
|
||||
put_online_cpus();
|
||||
return ret;
|
||||
|
||||
return min(cpus, vecs) + resv;
|
||||
}
|
||||
|
||||
+1
-3
@@ -14,9 +14,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/msi.h>
|
||||
|
||||
/* Temparory solution for building, will be removed later */
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* alloc_msi_entry - Allocate an initialize msi_entry
|
||||
|
||||
Reference in New Issue
Block a user