Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
+20
-13
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < array->map.max_entries; i++)
|
||||
for (i = 0; i < array->map.max_entries; i++) {
|
||||
free_percpu(array->pptrs[i]);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
@@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
return -ENOMEM;
|
||||
}
|
||||
array->pptrs[i] = ptr;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
|
||||
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
int ret, numa_node = bpf_map_attr_numa_node(attr);
|
||||
u32 elem_size, index_mask, max_entries;
|
||||
bool unpriv = !capable(CAP_SYS_ADMIN);
|
||||
u64 cost, array_size, mask64;
|
||||
struct bpf_array *array;
|
||||
u64 array_size, mask64;
|
||||
|
||||
elem_size = round_up(attr->value_size, 8);
|
||||
|
||||
@@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
array_size += (u64) max_entries * elem_size;
|
||||
|
||||
/* make sure there is no u32 overflow later in round_up() */
|
||||
if (array_size >= U32_MAX - PAGE_SIZE)
|
||||
cost = array_size;
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (percpu) {
|
||||
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
ret = bpf_map_precharge_memlock(cost);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* allocate all map elements and zero-initialize them */
|
||||
array = bpf_map_area_alloc(array_size, numa_node);
|
||||
@@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
|
||||
/* copy mandatory map attributes */
|
||||
bpf_map_init_from_attr(&array->map, attr);
|
||||
array->map.pages = cost;
|
||||
array->elem_size = elem_size;
|
||||
|
||||
if (!percpu)
|
||||
goto out;
|
||||
|
||||
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
|
||||
|
||||
if (array_size >= U32_MAX - PAGE_SIZE ||
|
||||
bpf_array_alloc_percpu(array)) {
|
||||
if (percpu && bpf_array_alloc_percpu(array)) {
|
||||
bpf_map_area_free(array);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
out:
|
||||
array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
return &array->map;
|
||||
}
|
||||
|
||||
+1
-1
@@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
* so always copy 'cnt' prog_ids to the user.
|
||||
* In a rare race the user will see zero prog_ids
|
||||
*/
|
||||
ids = kcalloc(cnt, sizeof(u32), GFP_USER);
|
||||
ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
|
||||
if (!ids)
|
||||
return -ENOMEM;
|
||||
rcu_read_lock();
|
||||
|
||||
+1
-1
@@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
|
||||
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
|
||||
int map_id)
|
||||
{
|
||||
gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
|
||||
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
|
||||
struct bpf_cpu_map_entry *rcpu;
|
||||
int numa, err;
|
||||
|
||||
|
||||
@@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
|
||||
struct lpm_trie_node __rcu **slot;
|
||||
struct lpm_trie_node *node;
|
||||
|
||||
raw_spin_lock(&trie->lock);
|
||||
/* Wait for outstanding programs to complete
|
||||
* update/lookup/delete/get_next_key and free the trie.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
/* Always start at the root and walk down to a node that has no
|
||||
* children. Then free that node, nullify its reference in the parent
|
||||
@@ -566,10 +569,9 @@ static void trie_free(struct bpf_map *map)
|
||||
slot = &trie->root;
|
||||
|
||||
for (;;) {
|
||||
node = rcu_dereference_protected(*slot,
|
||||
lockdep_is_held(&trie->lock));
|
||||
node = rcu_dereference_protected(*slot, 1);
|
||||
if (!node)
|
||||
goto unlock;
|
||||
goto out;
|
||||
|
||||
if (rcu_access_pointer(node->child[0])) {
|
||||
slot = &node->child[0];
|
||||
@@ -587,8 +589,8 @@ static void trie_free(struct bpf_map *map)
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
raw_spin_unlock(&trie->lock);
|
||||
out:
|
||||
kfree(trie);
|
||||
}
|
||||
|
||||
static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
|
||||
|
||||
@@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
|
||||
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_stab *stab;
|
||||
int err = -EINVAL;
|
||||
u64 cost;
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
@@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
|
||||
/* make sure page count doesn't overflow */
|
||||
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
|
||||
err = -EINVAL;
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
goto free_stab;
|
||||
|
||||
|
||||
+2
-13
@@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm)
|
||||
* is dropped: either by a lazy thread or by
|
||||
* mmput. Free the page directory and the mm.
|
||||
*/
|
||||
static void __mmdrop(struct mm_struct *mm)
|
||||
void __mmdrop(struct mm_struct *mm)
|
||||
{
|
||||
BUG_ON(mm == &init_mm);
|
||||
mm_free_pgd(mm);
|
||||
@@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm)
|
||||
put_user_ns(mm->user_ns);
|
||||
free_mm(mm);
|
||||
}
|
||||
|
||||
void mmdrop(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* The implicit full barrier implied by atomic_dec_and_test() is
|
||||
* required by the membarrier system call before returning to
|
||||
* user-space, after storing to rq->curr.
|
||||
*/
|
||||
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
|
||||
__mmdrop(mm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmdrop);
|
||||
EXPORT_SYMBOL_GPL(__mmdrop);
|
||||
|
||||
static void mmdrop_async_fn(struct work_struct *work)
|
||||
{
|
||||
|
||||
+1
-1
@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
|
||||
{
|
||||
struct rchan_buf *buf;
|
||||
|
||||
if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
|
||||
if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
|
||||
return NULL;
|
||||
|
||||
buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
|
||||
|
||||
+4
-2
@@ -1076,14 +1076,16 @@ long seccomp_get_metadata(struct task_struct *task,
|
||||
|
||||
size = min_t(unsigned long, size, sizeof(kmd));
|
||||
|
||||
if (copy_from_user(&kmd, data, size))
|
||||
if (size < sizeof(kmd.filter_off))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
|
||||
return -EFAULT;
|
||||
|
||||
filter = get_nth_filter(task, kmd.filter_off);
|
||||
if (IS_ERR(filter))
|
||||
return PTR_ERR(filter);
|
||||
|
||||
memset(&kmd, 0, sizeof(kmd));
|
||||
if (filter->log)
|
||||
kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
|
||||
|
||||
|
||||
@@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&query, uquery, sizeof(query)))
|
||||
return -EFAULT;
|
||||
if (query.ids_len > BPF_TRACE_MAX_PROGS)
|
||||
return -E2BIG;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
|
||||
|
||||
@@ -101,6 +101,7 @@ struct user_struct root_user = {
|
||||
.sigpending = ATOMIC_INIT(0),
|
||||
.locked_shm = 0,
|
||||
.uid = GLOBAL_ROOT_UID,
|
||||
.ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -191,6 +192,8 @@ struct user_struct *alloc_uid(kuid_t uid)
|
||||
|
||||
new->uid = uid;
|
||||
atomic_set(&new->__count, 1);
|
||||
ratelimit_state_init(&new->ratelimit, HZ, 100);
|
||||
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
|
||||
|
||||
/*
|
||||
* Before adding this, check whether we raced
|
||||
|
||||
@@ -4179,6 +4179,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
|
||||
|
||||
/**
|
||||
* current_work - retrieve %current task's work struct
|
||||
*
|
||||
* Determine if %current task is a workqueue worker and what it's working on.
|
||||
* Useful to find out the context that the %current task is running in.
|
||||
*
|
||||
* Return: work struct if %current task is a workqueue worker, %NULL otherwise.
|
||||
*/
|
||||
struct work_struct *current_work(void)
|
||||
{
|
||||
struct worker *worker = current_wq_worker();
|
||||
|
||||
return worker ? worker->current_work : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(current_work);
|
||||
|
||||
/**
|
||||
* current_is_workqueue_rescuer - is %current workqueue rescuer?
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user