POWERSUCKER
 
 
XD)
Currently Offline
Recent Activity
649 hrs on record
last played on 27 Dec, 2024
362 hrs on record
last played on 26 Dec, 2024
48 hrs on record
last played on 22 Dec, 2024
^:D 5 Jul, 2024 @ 12:24pm 
я с 2 акка)
i wn to die 1 Feb, 2024 @ 10:32am 
This comment is awaiting analysis by our automated content check system. It will be temporarily hidden until we verify that it does not contain harmful content (e.g. links to websites that attempt to steal information).
سام 31 Oct, 2023 @ 11:01am 
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info
int nblocks;

int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info) return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;

else {
for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;
out_undo_partial_alloc:

while (--i >= 0) {

free_
سام 31 Oct, 2023 @ 11:01am 
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info
int nblocks;

int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info) return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;

else {
for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;
out_undo_partial_alloc:

while (--i >= 0) {

free_
سام 31 Oct, 2023 @ 11:01am 
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info
int nblocks;

int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info) return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;

else {
for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;
out_undo_partial_alloc:

while (--i >= 0) {

free_
سام 31 Oct, 2023 @ 11:01am 
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info
int nblocks;

int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info) return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;

else {
for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;
out_undo_partial_alloc:

while (--i >= 0) {

free_