Bug Summary

File:obj-scan-build/../i386/i386/fpu.c
Location:line 643, column 6
Description:Access to field 'fp_valid' results in a dereference of a null pointer (loaded from field 'ifps')

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1992-1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26
27/*
28 * Copyright (C) 1994 Linus Torvalds
29 *
30 * Pentium III FXSR, SSE support
31 * General FPU state handling cleanups
32 * Gareth Hughes <gareth@valinux.com>, May 2000
33 */
34
35/*
36 * Support for 80387 floating point or FP emulator.
37 */
38
39#include <string.h>
40
41#include <mach/exception.h>
42#include <mach/machine/thread_status.h>
43#include <mach/machine/fp_reg.h>
44
45#include <kern/debug.h>
46#include <machine/machspl.h> /* spls */
47#include <kern/printf.h>
48#include <kern/thread.h>
49#include <kern/slab.h>
50
51#include <i3861/thread.h>
52#include <i3861/fpu.h>
53#include <i3861/pio.h>
54#include <i3861/pic.h>
55#include <i3861/locore.h>
56#include "cpu_number.h"
57
58#if 0
59#include <i3861/ipl.h>
60#define ASSERT_IPL(L) \
61{ \
62 if (curr_ipl != L) { \
63 printf("IPL is %d, expected %d\n", curr_ipl, L); \
64 panic("fpu: wrong ipl"); \
65 } \
66}
67#else
68#define ASSERT_IPL(L)
69#endif
70
71extern void i386_exception();
72
73int fp_kind = FP_3873; /* 80387 present */
74struct kmem_cache ifps_cache; /* cache for FPU save area */
75static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */
76
77void fp_save(thread_t thread);
78void fp_load(thread_t thread);
79
80#if NCPUS1 == 1
81volatile thread_t fp_thread = THREAD_NULL((thread_t) 0);
82 /* thread whose state is in FPU */
83 /* always THREAD_NULL if emulating
84 FPU */
85volatile thread_t fp_intr_thread = THREAD_NULL((thread_t) 0);
86
87
88#define clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
\
89 { \
90 set_ts()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); })
; \
91 fp_thread = THREAD_NULL((thread_t) 0); \
92 }
93
94#else /* NCPUS > 1 */
95#define clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
\
96 { \
97 set_ts()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); })
; \
98 }
99
100#endif
101
102
103/*
104 * Look for FPU and initialize it.
105 * Called on each CPU.
106 */
107void
108init_fpu()
109{
110 unsigned short status, control;
111
112#ifdef MACH_RING1
113 clear_ts()asm volatile("clts");
114#else /* MACH_RING1 */
115 unsigned int native = 0;
116
117 if (machine_slot[cpu_number()(0)].cpu_type >= CPU_TYPE_I486((cpu_type_t) 17))
118 native = CR0_NE0x00000020;
119
120 /*
121 * Check for FPU by initializing it,
122 * then trying to read the correct bit patterns from
123 * the control and status registers.
124 */
125 set_cr0((get_cr0() & ~(CR0_EM|CR0_TS)) | native)({ register unsigned long _temp__ = ((({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) & ~(0x00000004|0x00000008)) | native); asm volatile(
"mov %0, %%cr0" : : "r" (_temp__)); })
; /* allow use of FPU */
126#endif /* MACH_RING1 */
127
128 fninit()asm volatile("fninit");
129 status = fnstsw()({ unsigned short _status__; asm("fnstsw %0" : "=ma" (_status__
)); _status__; })
;
130 fnstcw(&control)asm("fnstcw %0" : "=m" (*(unsigned short *)(&control)));
131
132 if ((status & 0xff) == 0 &&
133 (control & 0x103f) == 0x3f)
134 {
135 /*
136 * We have a FPU of some sort.
137 * Compare -infinity against +infinity
138 * to check whether we have a 287 or a 387.
139 */
140 volatile double fp_infinity, fp_one, fp_zero;
141 fp_one = 1.0;
142 fp_zero = 0.0;
143 fp_infinity = fp_one / fp_zero;
144 if (fp_infinity == -fp_infinity) {
145 /*
146 * We have an 80287.
147 */
148 fp_kind = FP_2872;
149 asm volatile(".byte 0xdb; .byte 0xe4"); /* fnsetpm */
150 }
151 else {
152 /*
153 * We have a 387.
154 */
155 if (CPU_HAS_FEATURE(CPU_FEATURE_FXSR)(cpu_features[(24) / 32] & (1 << ((24) % 32)))) {
156 static /* because we _need_ alignment */
157 struct i386_xfp_save save;
158 unsigned long mask;
159 fp_kind = FP_387X4;
160#ifndef MACH_RING1
161 set_cr4(get_cr4() | CR4_OSFXSR)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr4, %0" : "=r" (_temp__)); _temp__
; }) | 0x0200); asm volatile("mov %0, %%cr4" : : "r" (_temp__
)); })
;
162#endif /* MACH_RING1 */
163 fxsave(&save)asm volatile("fxsave %0" : "=m" (*&save));
164 mask = save.fp_mxcsr_mask;
165 if (!mask)
166 mask = 0x0000ffbf;
167 mxcsr_feature_mask &= mask;
168 } else
169 fp_kind = FP_3873;
170 }
171#ifdef MACH_RING1
172 set_ts()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); })
;
173#else /* MACH_RING1 */
174 /*
175 * Trap wait instructions. Turn off FPU for now.
176 */
177 set_cr0(get_cr0() | CR0_TS | CR0_MP)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008 | 0x00000002); asm volatile("mov %0, %%cr0"
: : "r" (_temp__)); })
;
178#endif /* MACH_RING1 */
179 }
180 else {
181 /*
182 * NO FPU.
183 */
184 panic("No FPU!");
185 }
186}
187
188/*
189 * Initialize FP handling.
190 */
191void
192fpu_module_init()
193{
194 kmem_cache_init(&ifps_cache, "i386_fpsave_state",
195 sizeof(struct i386_fpsave_state), 16,
196 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
197}
198
199/*
200 * Free a FPU save area.
201 * Called only when thread terminating - no locking necessary.
202 */
203void
204fp_free(fps)
205 struct i386_fpsave_state *fps;
206{
207ASSERT_IPL(SPL0);
208#if NCPUS1 == 1
209 if ((fp_thread != THREAD_NULL((thread_t) 0)) && (fp_thread->pcb->ims.ifps == fps)) {
210 /*
211 * Make sure we don't get FPU interrupts later for
212 * this thread
213 */
214 clear_ts()asm volatile("clts");
215 fwait()asm("fwait");;
216
217 /* Mark it free and disable access */
218 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
219 }
220#endif /* NCPUS == 1 */
221 kmem_cache_free(&ifps_cache, (vm_offset_t) fps);
222}
223
224/* The two following functions were stolen from Linux's i387.c */
225static inline unsigned short
226twd_i387_to_fxsr (unsigned short twd)
227{
228 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
229
230 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
231 tmp = ~twd;
232 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
233 /* and move the valid bits to the lower byte. */
234 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
235 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
236 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
237 return tmp;
238}
239
240static inline unsigned long
241twd_fxsr_to_i387 (struct i386_xfp_save *fxsave)
242{
243 struct {
244 unsigned short significand[4];
245 unsigned short exponent;
246 unsigned short padding[3];
247 } *st = NULL((void *) 0);
248 unsigned long tos = (fxsave->fp_status >> 11) & 7;
249 unsigned long twd = (unsigned long) fxsave->fp_tag;
250 unsigned long tag;
251 unsigned long ret = 0xffff0000u;
252 int i;
253
254#define FPREG_ADDR(f, n)((void *)&(f)->fp_reg_word + (n) * 16); ((void *)&(f)->fp_reg_word + (n) * 16);
255
256 for (i = 0 ; i < 8 ; i++) {
257 if (twd & 0x1) {
258 st = FPREG_ADDR (fxsave, (i - tos) & 7)((void *)&(fxsave)->fp_reg_word + ((i - tos) & 7) *
16);
;
259
260 switch (st->exponent & 0x7fff) {
261 case 0x7fff:
262 tag = 2; /* Special */
263 break;
264 case 0x0000:
265 if (!st->significand[0] &&
266 !st->significand[1] &&
267 !st->significand[2] &&
268 !st->significand[3] ) {
269 tag = 1; /* Zero */
270 } else {
271 tag = 2; /* Special */
272 }
273 break;
274 default:
275 if (st->significand[3] & 0x8000) {
276 tag = 0; /* Valid */
277 } else {
278 tag = 2; /* Special */
279 }
280 break;
281 }
282 } else {
283 tag = 3; /* Empty */
284 }
285 ret |= (tag << (2 * i));
286 twd = twd >> 1;
287 }
288 return ret;
289}
290
291/*
292 * Set the floating-point state for a thread.
293 * If the thread is not the current thread, it is
294 * not running (held). Locking needed against
295 * concurrent fpu_set_state or fpu_get_state.
296 */
297kern_return_t
298fpu_set_state(thread, state)
299 thread_t thread;
300 struct i386_float_state *state;
301{
302 register pcb_t pcb = thread->pcb;
303 register struct i386_fpsave_state *ifps;
304 register struct i386_fpsave_state *new_ifps;
305
306ASSERT_IPL(SPL0);
307 if (fp_kind == FP_NO0)
308 return KERN_FAILURE5;
309
310#if NCPUS1 == 1
311
312 /*
313 * If this thread`s state is in the FPU,
314 * discard it; we are replacing the entire
315 * FPU state.
316 */
317 if (fp_thread == thread) {
318 clear_ts()asm volatile("clts");
319 fwait()asm("fwait");; /* wait for possible interrupt */
320 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
; /* no state in FPU */
321 }
322#endif
323
324 if (state->initialized == 0) {
325 /*
326 * new FPU state is 'invalid'.
327 * Deallocate the fp state if it exists.
328 */
329 simple_lock(&pcb->lock);
330 ifps = pcb->ims.ifps;
331 pcb->ims.ifps = 0;
332 simple_unlock(&pcb->lock);
333
334 if (ifps != 0) {
335 kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
336 }
337 }
338 else {
339 /*
340 * Valid state. Allocate the fp state if there is none.
341 */
342 register struct i386_fp_save *user_fp_state;
343 register struct i386_fp_regs *user_fp_regs;
344
345 user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
346 user_fp_regs = (struct i386_fp_regs *)
347 &state->hw_state[sizeof(struct i386_fp_save)];
348
349 new_ifps = 0;
350 Retry:
351 simple_lock(&pcb->lock);
352 ifps = pcb->ims.ifps;
353 if (ifps == 0) {
354 if (new_ifps == 0) {
355 simple_unlock(&pcb->lock);
356 new_ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
357 goto Retry;
358 }
359 ifps = new_ifps;
360 new_ifps = 0;
361 pcb->ims.ifps = ifps;
362 }
363
364 /*
365 * Ensure that reserved parts of the environment are 0.
366 */
367 memset(&ifps->fp_save_state, 0, sizeof(struct i386_fp_save));
368
369 if (fp_kind == FP_387X4) {
370 int i;
371
372 ifps->xfp_save_state.fp_control = user_fp_state->fp_control;
373 ifps->xfp_save_state.fp_status = user_fp_state->fp_status;
374 ifps->xfp_save_state.fp_tag = twd_i387_to_fxsr(user_fp_state->fp_tag);
375 ifps->xfp_save_state.fp_eip = user_fp_state->fp_eip;
376 ifps->xfp_save_state.fp_cs = user_fp_state->fp_cs;
377 ifps->xfp_save_state.fp_opcode = user_fp_state->fp_opcode;
378 ifps->xfp_save_state.fp_dp = user_fp_state->fp_dp;
379 ifps->xfp_save_state.fp_ds = user_fp_state->fp_ds;
380 for (i=0; i<8; i++)
381 memcpy(&ifps->xfp_save_state.fp_reg_word[i], &user_fp_regs[i], sizeof(user_fp_regs[i]));
382 } else {
383 ifps->fp_save_state.fp_control = user_fp_state->fp_control;
384 ifps->fp_save_state.fp_status = user_fp_state->fp_status;
385 ifps->fp_save_state.fp_tag = user_fp_state->fp_tag;
386 ifps->fp_save_state.fp_eip = user_fp_state->fp_eip;
387 ifps->fp_save_state.fp_cs = user_fp_state->fp_cs;
388 ifps->fp_save_state.fp_opcode = user_fp_state->fp_opcode;
389 ifps->fp_save_state.fp_dp = user_fp_state->fp_dp;
390 ifps->fp_save_state.fp_ds = user_fp_state->fp_ds;
391 ifps->fp_regs = *user_fp_regs;
392 }
393
394 simple_unlock(&pcb->lock);
395 if (new_ifps != 0)
396 kmem_cache_free(&ifps_cache, (vm_offset_t) new_ifps);
397 }
398
399 return KERN_SUCCESS0;
400}
401
402/*
403 * Get the floating-point state for a thread.
404 * If the thread is not the current thread, it is
405 * not running (held). Locking needed against
406 * concurrent fpu_set_state or fpu_get_state.
407 */
408kern_return_t
409fpu_get_state(thread, state)
410 thread_t thread;
411 register struct i386_float_state *state;
412{
413 register pcb_t pcb = thread->pcb;
414 register struct i386_fpsave_state *ifps;
415
416ASSERT_IPL(SPL0);
417 if (fp_kind == FP_NO0)
418 return KERN_FAILURE5;
419
420 simple_lock(&pcb->lock);
421 ifps = pcb->ims.ifps;
422 if (ifps == 0) {
423 /*
424 * No valid floating-point state.
425 */
426 simple_unlock(&pcb->lock);
427 memset(state, 0, sizeof(struct i386_float_state));
428 return KERN_SUCCESS0;
429 }
430
431 /* Make sure we`ve got the latest fp state info */
432 /* If the live fpu state belongs to our target */
433#if NCPUS1 == 1
434 if (thread == fp_thread)
435#else
436 if (thread == current_thread()(active_threads[(0)]))
437#endif
438 {
439 clear_ts()asm volatile("clts");
440 fp_save(thread);
441 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
442 }
443
444 state->fpkind = fp_kind;
445 state->exc_status = 0;
446
447 {
448 register struct i386_fp_save *user_fp_state;
449 register struct i386_fp_regs *user_fp_regs;
450
451 state->initialized = ifps->fp_valid;
452
453 user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
454 user_fp_regs = (struct i386_fp_regs *)
455 &state->hw_state[sizeof(struct i386_fp_save)];
456
457 /*
458 * Ensure that reserved parts of the environment are 0.
459 */
460 memset(user_fp_state, 0, sizeof(struct i386_fp_save));
461
462 if (fp_kind == FP_387X4) {
463 int i;
464
465 user_fp_state->fp_control = ifps->xfp_save_state.fp_control;
466 user_fp_state->fp_status = ifps->xfp_save_state.fp_status;
467 user_fp_state->fp_tag = twd_fxsr_to_i387(&ifps->xfp_save_state);
468 user_fp_state->fp_eip = ifps->xfp_save_state.fp_eip;
469 user_fp_state->fp_cs = ifps->xfp_save_state.fp_cs;
470 user_fp_state->fp_opcode = ifps->xfp_save_state.fp_opcode;
471 user_fp_state->fp_dp = ifps->xfp_save_state.fp_dp;
472 user_fp_state->fp_ds = ifps->xfp_save_state.fp_ds;
473 for (i=0; i<8; i++)
474 memcpy(&user_fp_regs[i], &ifps->xfp_save_state.fp_reg_word[i], sizeof(user_fp_regs[i]));
475 } else {
476 user_fp_state->fp_control = ifps->fp_save_state.fp_control;
477 user_fp_state->fp_status = ifps->fp_save_state.fp_status;
478 user_fp_state->fp_tag = ifps->fp_save_state.fp_tag;
479 user_fp_state->fp_eip = ifps->fp_save_state.fp_eip;
480 user_fp_state->fp_cs = ifps->fp_save_state.fp_cs;
481 user_fp_state->fp_opcode = ifps->fp_save_state.fp_opcode;
482 user_fp_state->fp_dp = ifps->fp_save_state.fp_dp;
483 user_fp_state->fp_ds = ifps->fp_save_state.fp_ds;
484 *user_fp_regs = ifps->fp_regs;
485 }
486 }
487 simple_unlock(&pcb->lock);
488
489 return KERN_SUCCESS0;
490}
491
492/*
493 * Initialize FPU.
494 *
495 * Raise exceptions for:
496 * invalid operation
497 * divide by zero
498 * overflow
499 *
500 * Use 53-bit precision.
501 */
502void fpinit()
503{
504 unsigned short control;
505
506ASSERT_IPL(SPL0);
507 clear_ts()asm volatile("clts");
508 fninit()asm volatile("fninit");
509 fnstcw(&control)asm("fnstcw %0" : "=m" (*(unsigned short *)(&control)));
510 control &= ~(FPC_PC0x0300|FPC_RC0x0c00); /* Clear precision & rounding control */
511 control |= (FPC_PC_530x0200 | /* Set precision */
512 FPC_RC_RN0x0000 | /* round-to-nearest */
513 FPC_ZE0x0004 | /* Suppress zero-divide */
514 FPC_OE0x0008 | /* and overflow */
515 FPC_UE0x0010 | /* underflow */
516 FPC_IE0x0001 | /* Allow NaNQs and +-INF */
517 FPC_DE0x0002 | /* Allow denorms as operands */
518 FPC_PE0x0020); /* No trap for precision loss */
519 fldcw(control)asm volatile("fldcw %0" : : "m" (*(unsigned short *) &(control
)) )
;
520}
521
522/*
523 * Coprocessor not present.
524 */
525void
526fpnoextflt()
527{
528 /*
529 * Enable FPU use.
530 */
531ASSERT_IPL(SPL0);
532 clear_ts()asm volatile("clts");
533#if NCPUS1 == 1
534
535 /*
536 * If this thread`s state is in the FPU, we are done.
537 */
538 if (fp_thread == current_thread()(active_threads[(0)]))
539 return;
540
541 /* Make sure we don't do fpsave() in fp_intr while doing fpsave()
542 * here if the current fpu instruction generates an error.
543 */
544 fwait()asm("fwait");;
545 /*
546 * If another thread`s state is in the FPU, save it.
547 */
548 if (fp_thread != THREAD_NULL((thread_t) 0)) {
549 fp_save(fp_thread);
550 }
551
552 /*
553 * Give this thread the FPU.
554 */
555 fp_thread = current_thread()(active_threads[(0)]);
556
557#endif /* NCPUS == 1 */
558
559 /*
560 * Load this thread`s state into the FPU.
561 */
562 fp_load(current_thread()(active_threads[(0)]));
563}
564
565/*
566 * FPU overran end of segment.
567 * Re-initialize FPU. Floating point state is not valid.
568 */
569void
570fpextovrflt()
571{
572 register thread_t thread = current_thread()(active_threads[(0)]);
573 register pcb_t pcb;
574 register struct i386_fpsave_state *ifps;
575
576#if NCPUS1 == 1
577
578 /*
579 * Is exception for the currently running thread?
580 */
581 if (fp_thread != thread) {
582 /* Uh oh... */
583 panic("fpextovrflt");
584 }
585#endif
586
587 /*
588 * This is a non-recoverable error.
589 * Invalidate the thread`s FPU state.
590 */
591 pcb = thread->pcb;
592 simple_lock(&pcb->lock);
593 ifps = pcb->ims.ifps;
594 pcb->ims.ifps = 0;
595 simple_unlock(&pcb->lock);
596
597 /*
598 * Re-initialize the FPU.
599 */
600 clear_ts()asm volatile("clts");
601 fninit()asm volatile("fninit");
602
603 /*
604 * And disable access.
605 */
606 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
607
608 if (ifps)
609 kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
610
611 /*
612 * Raise exception.
613 */
614 i386_exception(EXC_BAD_ACCESS1, VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_EXECUTE((vm_prot_t) 0x04), 0);
615 /*NOTREACHED*/
616}
617
618static int
619fphandleerr()
620{
621 register thread_t thread = current_thread()(active_threads[(0)]);
622
623 /*
624 * Save the FPU context to the thread using it.
625 */
626#if NCPUS1 == 1
627 if (fp_thread == THREAD_NULL((thread_t) 0)) {
2
Assuming 'fp_thread' is not equal to null
3
Taking false branch
628 printf("fphandleerr: FPU not belonging to anyone!\n");
629 clear_ts()asm volatile("clts");
630 fninit()asm volatile("fninit");
631 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
632 return 1;
633 }
634
635 if (fp_thread != thread) {
4
Taking true branch
636 /*
637 * FPU exception is for a different thread.
638 * When that thread again uses the FPU an exception will be
639 * raised in fp_load. Remember the condition in fp_valid (== 2).
640 */
641 clear_ts()asm volatile("clts");
642 fp_save(fp_thread);
5
Calling 'fp_save'
8
Returning from 'fp_save'
643 fp_thread->pcb->ims.ifps->fp_valid = 2;
9
Access to field 'fp_valid' results in a dereference of a null pointer (loaded from field 'ifps')
644 fninit()asm volatile("fninit");
645 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
646 /* leave fp_intr_thread THREAD_NULL */
647 return 1;
648 }
649#endif /* NCPUS == 1 */
650
651 /*
652 * Save the FPU state and turn off the FPU.
653 */
654 clear_ts()asm volatile("clts");
655 fp_save(thread);
656 fninit()asm volatile("fninit");
657 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
658
659 return 0;
660}
661
662/*
663 * FPU error. Called by exception handler.
664 */
665void
666fpexterrflt()
667{
668 register thread_t thread = current_thread()(active_threads[(0)]);
669
670 if (fphandleerr())
671 return;
672
673 /*
674 * Raise FPU exception.
675 * Locking not needed on pcb->ims.ifps,
676 * since thread is running.
677 */
678 i386_exception(EXC_ARITHMETIC3,
679 EXC_I386_EXTERR5,
680 fp_kind == FP_387X4 ?
681 thread->pcb->ims.ifps->xfp_save_state.fp_status :
682 thread->pcb->ims.ifps->fp_save_state.fp_status);
683 /*NOTREACHED*/
684}
685
686#ifndef MACH_RING1
687/*
688 * FPU error. Called by AST.
689 */
690void
691fpastintr()
692{
693 register thread_t thread = current_thread()(active_threads[(0)]);
694
695ASSERT_IPL(SPL0);
696#if NCPUS1 == 1
697 /*
698 * Since FPU errors only occur on ESC or WAIT instructions,
699 * the current thread should own the FPU. If it didn`t,
700 * we should have gotten the task-switched interrupt first.
701 */
702 if (fp_thread != THREAD_NULL((thread_t) 0)) {
703 panic("fpexterrflt");
704 return;
705 }
706
707 /*
708 * Check if we got a context switch between the interrupt and the AST
709 * This can happen if the interrupt arrived after the FPU AST was
710 * checked. In this case, raise the exception in fp_load when this
711 * thread next time uses the FPU. Remember exception condition in
712 * fp_valid (extended boolean 2).
713 */
714 if (fp_intr_thread != thread) {
715 if (fp_intr_thread == THREAD_NULL((thread_t) 0)) {
716 panic("fpexterrflt: fp_intr_thread == THREAD_NULL");
717 return;
718 }
719 fp_intr_thread->pcb->ims.ifps->fp_valid = 2;
720 fp_intr_thread = THREAD_NULL((thread_t) 0);
721 return;
722 }
723 fp_intr_thread = THREAD_NULL((thread_t) 0);
724#else /* NCPUS == 1 */
725 /*
726 * Save the FPU state and turn off the FPU.
727 */
728 fp_save(thread);
729#endif /* NCPUS == 1 */
730
731 /*
732 * Raise FPU exception.
733 * Locking not needed on pcb->ims.ifps,
734 * since thread is running.
735 */
736 i386_exception(EXC_ARITHMETIC3,
737 EXC_I386_EXTERR5,
738 fp_kind == FP_387X4 ?
739 thread->pcb->ims.ifps->xfp_save_state.fp_status :
740 thread->pcb->ims.ifps->fp_save_state.fp_status);
741 /*NOTREACHED*/
742}
743#endif /* MACH_RING1 */
744
745/*
746 * Save FPU state.
747 *
748 * Locking not needed:
749 * . if called from fpu_get_state, pcb already locked.
750 * . if called from fpnoextflt or fp_intr, we are single-cpu
751 * . otherwise, thread is running.
752 */
753void
754fp_save(thread)
755 register thread_t thread;
756{
757 register pcb_t pcb = thread->pcb;
758 register struct i386_fpsave_state *ifps = pcb->ims.ifps;
759
760 if (ifps != 0 && !ifps->fp_valid) {
6
Assuming 'ifps' is equal to null
7
Assuming pointer value is null
761 /* registers are in FPU */
762 ifps->fp_valid = TRUE((boolean_t) 1);
763 if (fp_kind == FP_387X4)
764 fxsave(&ifps->xfp_save_state)asm volatile("fxsave %0" : "=m" (*&ifps->xfp_save_state
))
;
765 else
766 fnsave(&ifps->fp_save_state)asm volatile("fnsave %0" : "=m" (*&ifps->fp_save_state
))
;
767 }
768}
769
770/*
771 * Restore FPU state from PCB.
772 *
773 * Locking not needed; always called on the current thread.
774 */
775void
776fp_load(thread)
777 register thread_t thread;
778{
779 register pcb_t pcb = thread->pcb;
780 register struct i386_fpsave_state *ifps;
781
782ASSERT_IPL(SPL0);
783 ifps = pcb->ims.ifps;
784 if (ifps == 0) {
785 ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
786 memset(ifps, 0, sizeof *ifps);
787 pcb->ims.ifps = ifps;
788 fpinit();
789#if 1
790/*
791 * I'm not sure this is needed. Does the fpu regenerate the interrupt in
792 * frstor or not? Without this code we may miss some exceptions, with it
793 * we might send too many exceptions.
794 */
795 } else if (ifps->fp_valid == 2) {
796 /* delayed exception pending */
797
798 ifps->fp_valid = TRUE((boolean_t) 1);
799 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
800 /*
801 * Raise FPU exception.
802 * Locking not needed on pcb->ims.ifps,
803 * since thread is running.
804 */
805 i386_exception(EXC_ARITHMETIC3,
806 EXC_I386_EXTERR5,
807 fp_kind == FP_387X4 ?
808 thread->pcb->ims.ifps->xfp_save_state.fp_status :
809 thread->pcb->ims.ifps->fp_save_state.fp_status);
810 /*NOTREACHED*/
811#endif
812 } else if (! ifps->fp_valid) {
813 printf("fp_load: invalid FPU state!\n");
814 fninit ()asm volatile("fninit");
815 } else {
816 if (fp_kind == FP_387X4)
817 fxrstor(ifps->xfp_save_state)asm volatile("fxrstor %0" : : "m" (ifps->xfp_save_state));
818 else
819 frstor(ifps->fp_save_state)asm volatile("frstor %0" : : "m" (ifps->fp_save_state));
820 }
821 ifps->fp_valid = FALSE((boolean_t) 0); /* in FPU */
822}
823
824/*
825 * Allocate and initialize FP state for current thread.
826 * Don't load state.
827 *
828 * Locking not needed; always called on the current thread.
829 */
830void
831fp_state_alloc()
832{
833 pcb_t pcb = current_thread()(active_threads[(0)])->pcb;
834 struct i386_fpsave_state *ifps;
835
836 ifps = (struct i386_fpsave_state *)kmem_cache_alloc(&ifps_cache);
837 memset(ifps, 0, sizeof *ifps);
838 pcb->ims.ifps = ifps;
839
840 ifps->fp_valid = TRUE((boolean_t) 1);
841
842 if (fp_kind == FP_387X4) {
843 ifps->xfp_save_state.fp_control = (0x037f
844 & ~(FPC_IM0x0001|FPC_ZM0x0004|FPC_OM0x0008|FPC_PC0x0300))
845 | (FPC_PC_530x0200|FPC_IC_AFF0x1000);
846 ifps->xfp_save_state.fp_status = 0;
847 ifps->xfp_save_state.fp_tag = 0xffff; /* all empty */
848 if (CPU_HAS_FEATURE(CPU_FEATURE_SSE)(cpu_features[(25) / 32] & (1 << ((25) % 32))))
849 ifps->xfp_save_state.fp_mxcsr = 0x1f80;
850 } else {
851 ifps->fp_save_state.fp_control = (0x037f
852 & ~(FPC_IM0x0001|FPC_ZM0x0004|FPC_OM0x0008|FPC_PC0x0300))
853 | (FPC_PC_530x0200|FPC_IC_AFF0x1000);
854 ifps->fp_save_state.fp_status = 0;
855 ifps->fp_save_state.fp_tag = 0xffff; /* all empty */
856 }
857}
858
859#if AT3861 && !defined(MACH_XEN)
860/*
861 * Handle a coprocessor error interrupt on the AT386.
862 * This comes in on line 5 of the slave PIC at SPL1.
863 */
864void
865fpintr(int unit)
866{
867 spl_t s;
868 thread_t thread = current_thread()(active_threads[(0)]);
869
870ASSERT_IPL(SPL1);
871 /*
872 * Turn off the extended 'busy' line.
873 */
874 outb(0xf0, 0){ asm volatile("outb %0, %1" : : "a" ((unsigned char)(0)) , "d"
((unsigned short)(0xf0))); }
;
875
876 if (fphandleerr())
1
Calling 'fphandleerr'
877 return;
878
879#if NCPUS1 == 1
880 if (fp_intr_thread != THREAD_NULL((thread_t) 0) && fp_intr_thread != thread)
881 panic("fp_intr: already caught intr");
882 fp_intr_thread = thread;
883#endif /* NCPUS == 1 */
884
885 /*
886 * Since we are running on the interrupt stack, we must
887 * signal the thread to take the exception when we return
888 * to user mode. Use an AST to do this.
889 *
890 * Don`t set the thread`s AST field. If the thread is
891 * descheduled before it takes the AST, it will notice
892 * the FPU error when it reloads its FPU state.
893 */
894 s = splsched();
895 ast_on(cpu_number(), AST_I386_FP)({ if ((need_ast[(0)] |= (0x80000000)) != 0x0) { ; } });
896 splx(s);
897}
898#endif /* AT386 */