POWERSUCKER
 
 
XD)
Зараз не в мережі
Остання активність
32 год. загалом
востаннє зіграно 19 січ.
649 год. загалом
востаннє зіграно 19 січ.
362 год. загалом
востаннє зіграно 26 груд. 2024
^:D 5 лип. 2024 о 12:24 
я с 2 акка)
i wn to die 1 лют. 2024 о 10:32 
Цей коментар ще не було перевірено нашою системою автоматичної перевірки вмісту. Його буде тимчасово приховано, доки ми не переконаємося, що він не містить шкідливого вмісту (наприклад, посилань на сайти, що викрадають інформацію).
سام 31 жовт. 2023 о 11:01 
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info
int nblocks;

int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info) return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;

else {
for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;
out_undo_partial_alloc:

while (--i >= 0) {

free_
سام 31 жовт. 2023 о 11:01 
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info
int nblocks;

int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info) return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;

else {
for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;
out_undo_partial_alloc:

while (--i >= 0) {

free_
سام 31 жовт. 2023 о 11:01 
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info
int nblocks;

int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info) return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;

else {
for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;
out_undo_partial_alloc:

while (--i >= 0) {

free_
سام 31 жовт. 2023 о 11:01 
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info
int nblocks;

int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */ nblocks = nblocks ? : 1; group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info) return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;

else {
for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;
out_undo_partial_alloc:

while (--i >= 0) {

free_