mirror of
https://github.com/fail0verflow/sony-psvr-linux.git
synced 2025-05-04 01:09:02 -04:00
psvr 3.00
This commit is contained in:
parent
6bf1031e0f
commit
ec3a7a2e7c
7 changed files with 76 additions and 21 deletions
|
@ -127,7 +127,7 @@ extern int ion_buffer_put(struct ion_buffer *buffer);
|
|||
extern struct ion_handle *ion_handle_lookup(struct ion_client *client,
|
||||
struct ion_buffer *buffer);
|
||||
extern void ion_handle_get(struct ion_handle *handle);
|
||||
extern int ion_handle_put(struct ion_handle *handle);
|
||||
extern int ion_handle_put_nolock(struct ion_handle *handle);
|
||||
extern int ion_handle_add(struct ion_client *client, struct ion_handle *handle);
|
||||
extern struct ion_handle *ion_handle_create(struct ion_client *client,
|
||||
struct ion_buffer *buffer);
|
||||
|
@ -1422,7 +1422,7 @@ struct ion_handle *ion_gethandle(struct ion_client *client, unsigned int gid)
|
|||
}
|
||||
ret = ion_handle_add(client, handle);
|
||||
if (ret) {
|
||||
ion_handle_put(handle);
|
||||
ion_handle_put_nolock(handle);
|
||||
handle = NULL;
|
||||
}
|
||||
end:
|
||||
|
|
|
@ -412,13 +412,18 @@ void ion_handle_get(struct ion_handle *handle)
|
|||
kref_get(&handle->ref);
|
||||
}
|
||||
|
||||
int ion_handle_put(struct ion_handle *handle)
|
||||
int ion_handle_put_nolock(struct ion_handle *handle)
|
||||
{
|
||||
return kref_put(&handle->ref, ion_handle_destroy);
|
||||
}
|
||||
|
||||
static int ion_handle_put(struct ion_handle *handle)
|
||||
{
|
||||
struct ion_client *client = handle->client;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&client->lock);
|
||||
ret = kref_put(&handle->ref, ion_handle_destroy);
|
||||
ret = ion_handle_put_nolock(handle);
|
||||
mutex_unlock(&client->lock);
|
||||
|
||||
return ret;
|
||||
|
@ -441,18 +446,28 @@ struct ion_handle *ion_handle_lookup(struct ion_client *client,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
|
||||
static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
|
||||
int id)
|
||||
{
|
||||
struct ion_handle *handle;
|
||||
|
||||
handle = idr_find(&client->idr, id);
|
||||
if (handle)
|
||||
ion_handle_get(handle);
|
||||
|
||||
return handle ? handle : ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
|
||||
int id)
|
||||
{
|
||||
struct ion_handle *handle;
|
||||
|
||||
mutex_lock(&client->lock);
|
||||
handle = idr_find(&client->idr, id);
|
||||
if (handle)
|
||||
ion_handle_get(handle);
|
||||
handle = ion_handle_get_by_id_nolock(client, id);
|
||||
mutex_unlock(&client->lock);
|
||||
|
||||
return handle ? handle : ERR_PTR(-EINVAL);
|
||||
return handle;
|
||||
}
|
||||
|
||||
static bool ion_handle_validate(struct ion_client *client,
|
||||
|
@ -639,22 +654,28 @@ retry:
|
|||
}
|
||||
EXPORT_SYMBOL(ion_alloc);
|
||||
|
||||
void ion_free(struct ion_client *client, struct ion_handle *handle)
|
||||
static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
|
||||
{
|
||||
bool valid_handle;
|
||||
|
||||
BUG_ON(client != handle->client);
|
||||
|
||||
mutex_lock(&client->lock);
|
||||
valid_handle = ion_handle_validate(client, handle);
|
||||
|
||||
if (!valid_handle) {
|
||||
WARN(1, "%s: invalid handle passed to free.\n", __func__);
|
||||
mutex_unlock(&client->lock);
|
||||
return;
|
||||
}
|
||||
ion_handle_put_nolock(handle);
|
||||
}
|
||||
|
||||
void ion_free(struct ion_client *client, struct ion_handle *handle)
|
||||
{
|
||||
BUG_ON(client != handle->client);
|
||||
|
||||
mutex_lock(&client->lock);
|
||||
ion_free_nolock(client, handle);
|
||||
mutex_unlock(&client->lock);
|
||||
ion_handle_put(handle);
|
||||
}
|
||||
EXPORT_SYMBOL(ion_free);
|
||||
|
||||
|
@ -941,11 +962,13 @@ void ion_client_destroy(struct ion_client *client)
|
|||
struct rb_node *n;
|
||||
|
||||
pr_debug("%s: %d\n", __func__, __LINE__);
|
||||
mutex_lock(&client->lock);
|
||||
while ((n = rb_first(&client->handles))) {
|
||||
struct ion_handle *handle = rb_entry(n, struct ion_handle,
|
||||
node);
|
||||
ion_handle_destroy(&handle->ref);
|
||||
}
|
||||
mutex_unlock(&client->lock);
|
||||
|
||||
idr_destroy(&client->idr);
|
||||
|
||||
|
@ -1406,11 +1429,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
{
|
||||
struct ion_handle *handle;
|
||||
|
||||
handle = ion_handle_get_by_id(client, data.handle.handle);
|
||||
if (IS_ERR(handle))
|
||||
mutex_lock(&client->lock);
|
||||
handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
|
||||
if (IS_ERR(handle)) {
|
||||
mutex_unlock(&client->lock);
|
||||
return PTR_ERR(handle);
|
||||
ion_free(client, handle);
|
||||
ion_handle_put(handle);
|
||||
}
|
||||
ion_free_nolock(client, handle);
|
||||
ion_handle_put_nolock(handle);
|
||||
mutex_unlock(&client->lock);
|
||||
break;
|
||||
}
|
||||
case ION_IOC_SHARE:
|
||||
|
|
|
@ -126,7 +126,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
|
|||
{
|
||||
struct ion_carveout_heap *carveout_heap;
|
||||
|
||||
carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
|
||||
carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
|
||||
if (!carveout_heap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
|
|||
if (allocated_size > chunk_heap->size - chunk_heap->allocated)
|
||||
return -ENOMEM;
|
||||
|
||||
table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
|
||||
table = kzalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
|
||||
|
|
|
@ -3723,6 +3723,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
|
||||
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
|
||||
EXT4_DESC_PER_BLOCK(sb);
|
||||
if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG)) {
|
||||
if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
|
||||
ext4_msg(sb, KERN_WARNING,
|
||||
"first meta block group too large: %u "
|
||||
"(group descriptor block count %u)",
|
||||
le32_to_cpu(es->s_first_meta_bg), db_count);
|
||||
goto failed_mount;
|
||||
}
|
||||
}
|
||||
sbi->s_group_desc = ext4_kvmalloc(db_count *
|
||||
sizeof(struct buffer_head *),
|
||||
GFP_KERNEL);
|
||||
|
|
|
@ -1527,6 +1527,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|||
goto stat;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
|
||||
* possible to, falsely, observe p->on_cpu == 0.
|
||||
*
|
||||
* One must be running (->on_cpu == 1) in order to remove oneself
|
||||
* from the runqueue.
|
||||
*
|
||||
* [S] ->on_cpu = 1; [L] ->on_rq
|
||||
* UNLOCK rq->lock
|
||||
* RMB
|
||||
* LOCK rq->lock
|
||||
* [S] ->on_rq = 0; [L] ->on_cpu
|
||||
*
|
||||
* Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
|
||||
* from the consecutive calls to schedule(); the first switching to our
|
||||
* task, the second putting it to sleep.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
* If the owning (remote) cpu is still in the middle of schedule() with
|
||||
* this task as prev, wait until its done referencing the task.
|
||||
|
|
|
@ -727,7 +727,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
|
|||
val = min_t(u32, val, sysctl_wmem_max);
|
||||
set_sndbuf:
|
||||
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
|
||||
sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
|
||||
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
|
||||
/* Wake up sending tasks if we upped the value. */
|
||||
sk->sk_write_space(sk);
|
||||
break;
|
||||
|
@ -763,7 +763,7 @@ set_rcvbuf:
|
|||
* returning the value we actually used in getsockopt
|
||||
* is the most desirable behavior.
|
||||
*/
|
||||
sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
|
||||
sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
|
||||
break;
|
||||
|
||||
case SO_RCVBUFFORCE:
|
||||
|
|
Loading…
Add table
Reference in a new issue