Bug Summary

File:obj-scan-build/../i386/i386/user_ldt.c
Location:line 83, column 13
Description:Dereference of null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1994,1993,1992,1991 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * User LDT management.
28 * Each thread in a task may have its own LDT.
29 */
30
31#include <string.h>
32
33#include <kern/kalloc.h>
34#include <kern/thread.h>
35
36#include <vm/vm_kern.h>
37
38#include <i3861/pcb.h>
39#include <i3861/seg.h>
40#include <i3861/thread.h>
41#include <i3861/user_ldt.h>
42#include <stddef.h>
43#include "ldt.h"
44#include "vm_param.h"
45
46char acc_type[8][3] = {
47 /* code stack data */
48 { 0, 0, 1 }, /* data */
49 { 0, 1, 1 }, /* data, writable */
50 { 0, 0, 1 }, /* data, expand-down */
51 { 0, 1, 1 }, /* data, writable, expand-down */
52 { 1, 0, 0 }, /* code */
53 { 1, 0, 1 }, /* code, readable */
54 { 1, 0, 0 }, /* code, conforming */
55 { 1, 0, 1 }, /* code, readable, conforming */
56};
57
58boolean_t selector_check(thread, sel, type)
59 thread_t thread;
60 int sel;
61 int type; /* code, stack, data */
62{
63 struct user_ldt *ldt;
64 int access;
65
66 ldt = thread->pcb->ims.ldt;
67 if (ldt == 0) {
1
Assuming 'ldt' is equal to null
2
Taking true branch
68 switch (type) {
3
'Default' branch taken. Execution continues on line 80
69 case S_CODE0:
70 return sel == USER_CS0x17;
71 case S_STACK1:
72 return sel == USER_DS0x1f;
73 case S_DATA2:
74 return sel == 0 ||
75 sel == USER_CS0x17 ||
76 sel == USER_DS0x1f;
77 }
78 }
79
80 if (type != S_DATA2 && sel == 0)
4
Assuming 'sel' is not equal to 0
5
Taking false branch
81 return FALSE((boolean_t) 0);
82 if ((sel & (SEL_LDT0x04|SEL_PL0x03)) != (SEL_LDT0x04|SEL_PL_U0x03)
83 || sel > ldt->desc.limit_low)
6
Dereference of null pointer
84 return FALSE((boolean_t) 0);
85
86 access = ldt->ldt[sel_idx(sel)((sel)>>3)].access;
87
88 if ((access & (ACC_P0x80|ACC_PL0x60|ACC_TYPE_USER0x10|SZ_640x2))
89 != (ACC_P0x80|ACC_PL_U0x60|ACC_TYPE_USER0x10))
90 return FALSE((boolean_t) 0);
91 /* present, pl == pl.user, not system, not 64bits */
92
93 return acc_type[(access & 0xe)>>1][type];
94}
95
96/*
97 * Add the descriptors to the LDT, starting with
98 * the descriptor for 'first_selector'.
99 */
100kern_return_t
101i386_set_ldt(thread, first_selector, desc_list, count, desc_list_inline)
102 thread_t thread;
103 int first_selector;
104 struct real_descriptor *desc_list;
105 unsigned int count;
106 boolean_t desc_list_inline;
107{
108 user_ldt_t new_ldt, old_ldt, temp;
109 struct real_descriptor *dp;
110 int i;
111 int min_selector = 0;
112 pcb_t pcb;
113 vm_size_t ldt_size_needed;
114 int first_desc = sel_idx(first_selector)((first_selector)>>3);
115 vm_map_copy_t old_copy_object = NULL((void *) 0); /* Suppress gcc warning */
116
117 if (thread == THREAD_NULL((thread_t) 0))
118 return KERN_INVALID_ARGUMENT4;
119 if (thread == current_thread()(active_threads[(0)]))
120 min_selector = LDTSZ4;
121 if (first_desc < min_selector || first_desc > 8191)
122 return KERN_INVALID_ARGUMENT4;
123 if (first_desc + count >= 8192)
124 return KERN_INVALID_ARGUMENT4;
125
126 /*
127 * If desc_list is not inline, it is in copyin form.
128 * We must copy it out to the kernel map, and wire
129 * it down (we touch it while the PCB is locked).
130 *
131 * We make a copy of the copyin object, and clear
132 * out the old one, so that returning KERN_INVALID_ARGUMENT
133 * will not try to deallocate the data twice.
134 */
135 if (!desc_list_inline) {
136 kern_return_t kr;
137 vm_offset_t dst_addr;
138
139 old_copy_object = (vm_map_copy_t) desc_list;
140
141 kr = vm_map_copyout(ipc_kernel_map, &dst_addr,
142 vm_map_copy_copy(old_copy_object));
143 if (kr != KERN_SUCCESS0)
144 return kr;
145
146 (void) vm_map_pageable(ipc_kernel_map,vm_map_pageable_common(ipc_kernel_map, dst_addr, dst_addr + count
* sizeof(struct real_descriptor), ((vm_prot_t) 0x01)|((vm_prot_t
) 0x02), ((boolean_t) 0))
147 dst_addr,vm_map_pageable_common(ipc_kernel_map, dst_addr, dst_addr + count
* sizeof(struct real_descriptor), ((vm_prot_t) 0x01)|((vm_prot_t
) 0x02), ((boolean_t) 0))
148 dst_addr + count * sizeof(struct real_descriptor),vm_map_pageable_common(ipc_kernel_map, dst_addr, dst_addr + count
* sizeof(struct real_descriptor), ((vm_prot_t) 0x01)|((vm_prot_t
) 0x02), ((boolean_t) 0))
149 VM_PROT_READ|VM_PROT_WRITE)vm_map_pageable_common(ipc_kernel_map, dst_addr, dst_addr + count
* sizeof(struct real_descriptor), ((vm_prot_t) 0x01)|((vm_prot_t
) 0x02), ((boolean_t) 0))
;
150 desc_list = (struct real_descriptor *)dst_addr;
151 }
152
153 for (i = 0, dp = desc_list;
154 i < count;
155 i++, dp++)
156 {
157 switch (dp->access & ~ACC_A0x01) {
158 case 0:
159 case ACC_P0x80:
160 /* valid empty descriptor */
161 break;
162 case ACC_P0x80 | ACC_CALL_GATE0x0c:
163 /* Mach kernel call */
164 *dp = *(struct real_descriptor *)
165 &ldt[sel_idx(USER_SCALL)((0x07)>>3)];
166 break;
167 case ACC_P0x80 | ACC_PL_U0x60 | ACC_DATA0x10:
168 case ACC_P0x80 | ACC_PL_U0x60 | ACC_DATA_W0x12:
169 case ACC_P0x80 | ACC_PL_U0x60 | ACC_DATA_E0x14:
170 case ACC_P0x80 | ACC_PL_U0x60 | ACC_DATA_EW0x16:
171 case ACC_P0x80 | ACC_PL_U0x60 | ACC_CODE0x18:
172 case ACC_P0x80 | ACC_PL_U0x60 | ACC_CODE_R0x1a:
173 case ACC_P0x80 | ACC_PL_U0x60 | ACC_CODE_C0x1c:
174 case ACC_P0x80 | ACC_PL_U0x60 | ACC_CODE_CR0x1e:
175 case ACC_P0x80 | ACC_PL_U0x60 | ACC_CALL_GATE_160x04:
176 case ACC_P0x80 | ACC_PL_U0x60 | ACC_CALL_GATE0x0c:
177 break;
178 default:
179 return KERN_INVALID_ARGUMENT4;
180 }
181 }
182 ldt_size_needed = sizeof(struct real_descriptor)
183 * (first_desc + count);
184
185 pcb = thread->pcb;
186 new_ldt = 0;
187 Retry:
188 simple_lock(&pcb->lock);
189 old_ldt = pcb->ims.ldt;
190 if (old_ldt == 0 ||
191 old_ldt->desc.limit_low + 1 < ldt_size_needed)
192 {
193 /*
194 * No old LDT, or not big enough
195 */
196 if (new_ldt == 0) {
197 simple_unlock(&pcb->lock);
198
199#ifdef MACH_PV_DESCRIPTORS
200 /* LDT needs to be aligned on a page */
201 vm_offset_t alloc = kalloc(ldt_size_needed + PAGE_SIZE(1 << 12) + offsetof(struct user_ldt, ldt)__builtin_offsetof (struct user_ldt, ldt));
202 new_ldt = (user_ldt_t) (round_page((alloc + offsetof(struct user_ldt, ldt)))((vm_offset_t)((((vm_offset_t)((alloc + __builtin_offsetof (struct
user_ldt, ldt)))) + ((1 << 12)-1)) & ~((1 <<
12)-1)))
- offsetof(struct user_ldt, ldt)__builtin_offsetof (struct user_ldt, ldt));
203 new_ldt->alloc = alloc;
204
205#else /* MACH_PV_DESCRIPTORS */
206 new_ldt = (user_ldt_t)
207 kalloc(ldt_size_needed
208 + sizeof(struct real_descriptor));
209#endif /* MACH_PV_DESCRIPTORS */
210 /*
211 * Build a descriptor that describes the
212 * LDT itself
213 */
214 {
215 vm_offset_t ldt_base;
216
217 ldt_base = kvtolin(&new_ldt->ldt[0])((vm_offset_t)(&new_ldt->ldt[0]) - 0xC0000000UL + ((0xc0000000UL
)))
;
218
219 new_ldt->desc.limit_low = ldt_size_needed - 1;
220 new_ldt->desc.limit_high = 0;
221 new_ldt->desc.base_low = ldt_base & 0xffff;
222 new_ldt->desc.base_med = (ldt_base >> 16) & 0xff;
223 new_ldt->desc.base_high = ldt_base >> 24;
224 new_ldt->desc.access = ACC_P0x80 | ACC_LDT0x02;
225 new_ldt->desc.granularity = 0;
226 }
227
228 goto Retry;
229 }
230
231 /*
232 * Have new LDT. If there was a an old ldt, copy descriptors
233 * from old to new. Otherwise copy the default ldt.
234 */
235 if (old_ldt) {
236 memcpy(&new_ldt->ldt[0],
237 &old_ldt->ldt[0],
238 old_ldt->desc.limit_low + 1);
239 }
240 else {
241 struct real_descriptor template = {0, 0, 0, ACC_P0x80, 0, 0 ,0};
242
243 for (dp = &new_ldt->ldt[0], i = 0; i < first_desc; i++, dp++) {
244 if (i < LDTSZ4)
245 *dp = *(struct real_descriptor *) &ldt[i];
246 else
247 *dp = template;
248 }
249 }
250
251 temp = old_ldt;
252 old_ldt = new_ldt; /* use new LDT from now on */
253 new_ldt = temp; /* discard old LDT */
254
255 pcb->ims.ldt = old_ldt; /* set LDT for thread */
256
257 /*
258 * If we are modifying the LDT for the current thread,
259 * make sure it is properly set.
260 */
261 if (thread == current_thread()(active_threads[(0)]))
262 switch_ktss(pcb);
263 }
264
265 /*
266 * Install new descriptors.
267 */
268 memcpy(&old_ldt->ldt[first_desc],
269 desc_list,
270 count * sizeof(struct real_descriptor));
271
272 simple_unlock(&pcb->lock);
273
274 if (new_ldt)
275#ifdef MACH_PV_DESCRIPTORS
276 {
277 int i;
278#ifdef MACH_PV_PAGETABLES
279 for (i=0; i<(new_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE(1 << 12)/sizeof(struct real_descriptor))
280 pmap_set_page_readwrite(&new_ldt->ldt[i]);
281#endif /* MACH_PV_PAGETABLES*/
282 kfree(new_ldt->alloc, new_ldt->desc.limit_low + 1
283 + PAGE_SIZE(1 << 12) + offsetof(struct user_ldt, ldt)__builtin_offsetof (struct user_ldt, ldt));
284 }
285#else /* MACH_PV_DESCRIPTORS */
286 kfree((vm_offset_t)new_ldt,
287 new_ldt->desc.limit_low + 1
288 + sizeof(struct real_descriptor));
289#endif /* MACH_PV_DESCRIPTORS */
290
291 /*
292 * Free the descriptor list, if it was
293 * out-of-line. Also discard the original
294 * copy object for it.
295 */
296 if (!desc_list_inline) {
297 (void) kmem_free(ipc_kernel_map,
298 (vm_offset_t) desc_list,
299 count * sizeof(struct real_descriptor));
300 vm_map_copy_discard(old_copy_object);
301 }
302
303 return KERN_SUCCESS0;
304}
305
306kern_return_t
307i386_get_ldt(thread, first_selector, selector_count, desc_list, count)
308 thread_t thread;
309 int first_selector;
310 int selector_count; /* number wanted */
311 struct real_descriptor **desc_list; /* in/out */
312 unsigned int *count; /* in/out */
313{
314 struct user_ldt *user_ldt;
315 pcb_t pcb = thread->pcb;
316 int first_desc = sel_idx(first_selector)((first_selector)>>3);
317 unsigned int ldt_count;
318 vm_size_t ldt_size;
319 vm_size_t size, size_needed;
320 vm_offset_t addr;
321
322 if (thread == THREAD_NULL((thread_t) 0))
323 return KERN_INVALID_ARGUMENT4;
324 if (first_desc < 0 || first_desc > 8191)
325 return KERN_INVALID_ARGUMENT4;
326 if (first_desc + selector_count >= 8192)
327 return KERN_INVALID_ARGUMENT4;
328
329 addr = 0;
330 size = 0;
331
332 for (;;) {
333 simple_lock(&pcb->lock);
334 user_ldt = pcb->ims.ldt;
335 if (user_ldt == 0) {
336 simple_unlock(&pcb->lock);
337 if (addr)
338 kmem_free(ipc_kernel_map, addr, size);
339 *count = 0;
340 return KERN_SUCCESS0;
341 }
342
343 /*
344 * Find how many descriptors we should return.
345 */
346 ldt_count = (user_ldt->desc.limit_low + 1) /
347 sizeof (struct real_descriptor);
348 ldt_count -= first_desc;
349 if (ldt_count > selector_count)
350 ldt_count = selector_count;
351
352 ldt_size = ldt_count * sizeof(struct real_descriptor);
353
354 /*
355 * Do we have the memory we need?
356 */
357 if (ldt_count <= *count)
358 break; /* fits in-line */
359
360 size_needed = round_page(ldt_size)((vm_offset_t)((((vm_offset_t)(ldt_size)) + ((1 << 12)-
1)) & ~((1 << 12)-1)))
;
361 if (size_needed <= size)
362 break;
363
364 /*
365 * Unlock the pcb and allocate more memory
366 */
367 simple_unlock(&pcb->lock);
368
369 if (size != 0)
370 kmem_free(ipc_kernel_map, addr, size);
371
372 size = size_needed;
373
374 if (kmem_alloc(ipc_kernel_map, &addr, size)
375 != KERN_SUCCESS0)
376 return KERN_RESOURCE_SHORTAGE6;
377 }
378
379 /*
380 * copy out the descriptors
381 */
382 memcpy(*desc_list,
383 &user_ldt->ldt[first_desc],
384 ldt_size);
385 *count = ldt_count;
386 simple_unlock(&pcb->lock);
387
388 if (addr) {
389 vm_size_t size_used, size_left;
390 vm_map_copy_t memory;
391
392 /*
393 * Free any unused memory beyond the end of the last page used
394 */
395 size_used = round_page(ldt_size)((vm_offset_t)((((vm_offset_t)(ldt_size)) + ((1 << 12)-
1)) & ~((1 << 12)-1)))
;
396 if (size_used != size)
397 kmem_free(ipc_kernel_map,
398 addr + size_used, size - size_used);
399
400 /*
401 * Zero the remainder of the page being returned.
402 */
403 size_left = size_used - ldt_size;
404 if (size_left > 0)
405 memset((char *)addr + ldt_size, 0, size_left);
406
407 /*
408 * Make memory into copyin form - this unwires it.
409 */
410 (void) vm_map_copyin(ipc_kernel_map, addr, size_used,
411 TRUE((boolean_t) 1), &memory);
412 *desc_list = (struct real_descriptor *)memory;
413 }
414
415 return KERN_SUCCESS0;
416}
417
418void
419user_ldt_free(user_ldt)
420 user_ldt_t user_ldt;
421{
422#ifdef MACH_PV_DESCRIPTORS
423 int i;
424#ifdef MACH_PV_PAGETABLES
425 for (i=0; i<(user_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE(1 << 12)/sizeof(struct real_descriptor))
426 pmap_set_page_readwrite(&user_ldt->ldt[i]);
427#endif /* MACH_PV_PAGETABLES */
428 kfree(user_ldt->alloc, user_ldt->desc.limit_low + 1
429 + PAGE_SIZE(1 << 12) + offsetof(struct user_ldt, ldt)__builtin_offsetof (struct user_ldt, ldt));
430#else /* MACH_PV_DESCRIPTORS */
431 kfree((vm_offset_t)user_ldt,
432 user_ldt->desc.limit_low + 1
433 + sizeof(struct real_descriptor));
434#endif /* MACH_PV_DESCRIPTORS */
435}
436
437
438kern_return_t
439i386_set_gdt (thread_t thread, int *selector, struct real_descriptor desc)
440{
441 int idx;
442
443 if (thread == THREAD_NULL((thread_t) 0))
444 return KERN_INVALID_ARGUMENT4;
445
446 if (*selector == -1)
447 {
448 for (idx = 0; idx < USER_GDT_SLOTS2; ++idx)
449 if ((thread->pcb->ims.user_gdt[idx].access & ACC_P0x80) == 0)
450 {
451 *selector = ((idx + sel_idx(USER_GDT)((0x48)>>3)) << 3) | SEL_PL_U0x03;
452 break;
453 }
454 if (idx == USER_GDT_SLOTS2)
455 return KERN_NO_SPACE3; /* ? */
456 }
457 else if ((*selector & (SEL_LDT0x04|SEL_PL0x03)) != SEL_PL_U0x03
458 || sel_idx (*selector)((*selector)>>3) < sel_idx(USER_GDT)((0x48)>>3)
459 || sel_idx (*selector)((*selector)>>3) >= sel_idx(USER_GDT)((0x48)>>3) + USER_GDT_SLOTS2)
460 return KERN_INVALID_ARGUMENT4;
461 else
462 idx = sel_idx (*selector)((*selector)>>3) - sel_idx(USER_GDT)((0x48)>>3);
463
464 if ((desc.access & ACC_P0x80) == 0)
465 memset (&thread->pcb->ims.user_gdt[idx], 0,
466 sizeof thread->pcb->ims.user_gdt[idx]);
467 else if ((desc.access & (ACC_TYPE_USER0x10|ACC_PL0x60)) != (ACC_TYPE_USER0x10|ACC_PL_U0x60) || (desc.granularity & SZ_640x2))
468
469 return KERN_INVALID_ARGUMENT4;
470 else
471 thread->pcb->ims.user_gdt[idx] = desc;
472
473 /*
474 * If we are modifying the GDT for the current thread,
475 * make sure it is properly set.
476 */
477 if (thread == current_thread()(active_threads[(0)]))
478 switch_ktss(thread->pcb);
479
480 return KERN_SUCCESS0;
481}
482
483kern_return_t
484i386_get_gdt (thread_t thread, int selector, struct real_descriptor *desc)
485{
486 if (thread == THREAD_NULL((thread_t) 0))
487 return KERN_INVALID_ARGUMENT4;
488
489 if ((selector & (SEL_LDT0x04|SEL_PL0x03)) != SEL_PL_U0x03
490 || sel_idx (selector)((selector)>>3) < sel_idx(USER_GDT)((0x48)>>3)
491 || sel_idx (selector)((selector)>>3) >= sel_idx(USER_GDT)((0x48)>>3) + USER_GDT_SLOTS2)
492 return KERN_INVALID_ARGUMENT4;
493
494 *desc = thread->pcb->ims.user_gdt[sel_idx (selector)((selector)>>3) - sel_idx(USER_GDT)((0x48)>>3)];
495
496 return KERN_SUCCESS0;
497}