1*4882a593Smuzhiyun #ifndef __GLX_unpack_h__ 2*4882a593Smuzhiyun #define __GLX_unpack_h__ 3*4882a593Smuzhiyun 4*4882a593Smuzhiyun /* 5*4882a593Smuzhiyun * SGI FREE SOFTWARE LICENSE B (Version 2.0, Sept. 18, 2008) 6*4882a593Smuzhiyun * Copyright (C) 1991-2000 Silicon Graphics, Inc. All Rights Reserved. 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 9*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"), 10*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation 11*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the 13*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions: 14*4882a593Smuzhiyun * 15*4882a593Smuzhiyun * The above copyright notice including the dates of first publication and 16*4882a593Smuzhiyun * either this permission notice or a reference to 17*4882a593Smuzhiyun * http://oss.sgi.com/projects/FreeB/ 18*4882a593Smuzhiyun * shall be included in all copies or substantial portions of the Software. 19*4882a593Smuzhiyun * 20*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21*4882a593Smuzhiyun * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23*4882a593Smuzhiyun * SILICON GRAPHICS, INC. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 24*4882a593Smuzhiyun * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF 25*4882a593Smuzhiyun * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 26*4882a593Smuzhiyun * SOFTWARE. 27*4882a593Smuzhiyun * 28*4882a593Smuzhiyun * Except as contained in this notice, the name of Silicon Graphics, Inc. 29*4882a593Smuzhiyun * shall not be used in advertising or otherwise to promote the sale, use or 30*4882a593Smuzhiyun * other dealings in this Software without prior written authorization from 31*4882a593Smuzhiyun * Silicon Graphics, Inc. 32*4882a593Smuzhiyun */ 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun #define __GLX_PAD(s) (((s)+3) & (GLuint)~3) 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun /* 37*4882a593Smuzhiyun ** Fetch the context-id out of a SingleReq request pointed to by pc. 38*4882a593Smuzhiyun */ 39*4882a593Smuzhiyun #define __GLX_GET_SINGLE_CONTEXT_TAG(pc) (((xGLXSingleReq*)pc)->contextTag) 40*4882a593Smuzhiyun #define __GLX_GET_VENDPRIV_CONTEXT_TAG(pc) (((xGLXVendorPrivateReq*)pc)->contextTag) 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun /* 43*4882a593Smuzhiyun ** Fetch a double from potentially unaligned memory. 44*4882a593Smuzhiyun */ 45*4882a593Smuzhiyun #ifdef __GLX_ALIGN64 46*4882a593Smuzhiyun #define __GLX_MEM_COPY(dst,src,n) memmove(dst,src,n) 47*4882a593Smuzhiyun #define __GLX_GET_DOUBLE(dst,src) __GLX_MEM_COPY(&dst,src,8) 48*4882a593Smuzhiyun #else 49*4882a593Smuzhiyun #define __GLX_GET_DOUBLE(dst,src) (dst) = *((GLdouble*)(src)) 50*4882a593Smuzhiyun #endif 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun extern void __glXMemInit(void); 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun extern xGLXSingleReply __glXReply; 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun #define __GLX_BEGIN_REPLY(size) \ 57*4882a593Smuzhiyun __glXReply.length = __GLX_PAD(size) >> 2; \ 58*4882a593Smuzhiyun __glXReply.type = X_Reply; \ 59*4882a593Smuzhiyun __glXReply.sequenceNumber = client->sequence; 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun #define __GLX_SEND_HEADER() \ 62*4882a593Smuzhiyun WriteToClient (client, sz_xGLXSingleReply, &__glXReply); 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun #define __GLX_PUT_RETVAL(a) \ 65*4882a593Smuzhiyun __glXReply.retval = (a); 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun #define __GLX_PUT_SIZE(a) \ 68*4882a593Smuzhiyun __glXReply.size = (a); 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun #define __GLX_PUT_RENDERMODE(m) \ 71*4882a593Smuzhiyun __glXReply.pad3 = (m) 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun /* 74*4882a593Smuzhiyun ** Get a buffer to hold returned data, with the given alignment. If we have 75*4882a593Smuzhiyun ** to realloc, allocate size+align, in case the pointer has to be bumped for 76*4882a593Smuzhiyun ** alignment. The answerBuffer should already be aligned. 77*4882a593Smuzhiyun ** 78*4882a593Smuzhiyun ** NOTE: the cast (long)res below assumes a long is large enough to hold a 79*4882a593Smuzhiyun ** pointer. 80*4882a593Smuzhiyun */ 81*4882a593Smuzhiyun #define __GLX_GET_ANSWER_BUFFER(res,cl,size,align) \ 82*4882a593Smuzhiyun if ((size) > sizeof(answerBuffer)) { \ 83*4882a593Smuzhiyun int bump; \ 84*4882a593Smuzhiyun if ((cl)->returnBufSize < (size)+(align)) { \ 85*4882a593Smuzhiyun (cl)->returnBuf = (GLbyte*)realloc((cl)->returnBuf, \ 86*4882a593Smuzhiyun (size)+(align)); \ 87*4882a593Smuzhiyun if (!(cl)->returnBuf) { \ 88*4882a593Smuzhiyun return BadAlloc; \ 89*4882a593Smuzhiyun } \ 90*4882a593Smuzhiyun (cl)->returnBufSize = (size)+(align); \ 91*4882a593Smuzhiyun } \ 92*4882a593Smuzhiyun res = (char*)cl->returnBuf; \ 93*4882a593Smuzhiyun bump = (long)(res) % (align); \ 94*4882a593Smuzhiyun if (bump) res += (align) - (bump); \ 95*4882a593Smuzhiyun } else { \ 96*4882a593Smuzhiyun res = (char *)answerBuffer; \ 97*4882a593Smuzhiyun } 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun #define __GLX_PUT_BYTE() \ 100*4882a593Smuzhiyun *(GLbyte *)&__glXReply.pad3 = *(GLbyte *)answer 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun #define __GLX_PUT_SHORT() \ 103*4882a593Smuzhiyun *(GLshort *)&__glXReply.pad3 = *(GLshort *)answer 104*4882a593Smuzhiyun 105*4882a593Smuzhiyun #define __GLX_PUT_INT() \ 106*4882a593Smuzhiyun *(GLint *)&__glXReply.pad3 = *(GLint *)answer 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun #define __GLX_PUT_FLOAT() \ 109*4882a593Smuzhiyun *(GLfloat *)&__glXReply.pad3 = *(GLfloat *)answer 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun #define __GLX_PUT_DOUBLE() \ 112*4882a593Smuzhiyun *(GLdouble *)&__glXReply.pad3 = *(GLdouble *)answer 113*4882a593Smuzhiyun 114*4882a593Smuzhiyun #define __GLX_SEND_BYTE_ARRAY(len) \ 115*4882a593Smuzhiyun WriteToClient(client, __GLX_PAD((len)*__GLX_SIZE_INT8), answer) 116*4882a593Smuzhiyun 117*4882a593Smuzhiyun #define __GLX_SEND_SHORT_ARRAY(len) \ 118*4882a593Smuzhiyun WriteToClient(client, __GLX_PAD((len)*__GLX_SIZE_INT16), answer) 119*4882a593Smuzhiyun 120*4882a593Smuzhiyun #define __GLX_SEND_INT_ARRAY(len) \ 121*4882a593Smuzhiyun WriteToClient(client, (len)*__GLX_SIZE_INT32, answer) 122*4882a593Smuzhiyun 123*4882a593Smuzhiyun #define __GLX_SEND_FLOAT_ARRAY(len) \ 124*4882a593Smuzhiyun WriteToClient(client, (len)*__GLX_SIZE_FLOAT32, answer) 125*4882a593Smuzhiyun 126*4882a593Smuzhiyun #define __GLX_SEND_DOUBLE_ARRAY(len) \ 127*4882a593Smuzhiyun WriteToClient(client, (len)*__GLX_SIZE_FLOAT64, answer) 128*4882a593Smuzhiyun 129*4882a593Smuzhiyun #define __GLX_SEND_VOID_ARRAY(len) __GLX_SEND_BYTE_ARRAY(len) 130*4882a593Smuzhiyun #define __GLX_SEND_UBYTE_ARRAY(len) __GLX_SEND_BYTE_ARRAY(len) 131*4882a593Smuzhiyun #define __GLX_SEND_USHORT_ARRAY(len) __GLX_SEND_SHORT_ARRAY(len) 132*4882a593Smuzhiyun #define __GLX_SEND_UINT_ARRAY(len) __GLX_SEND_INT_ARRAY(len) 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun /* 135*4882a593Smuzhiyun ** PERFORMANCE NOTE: 136*4882a593Smuzhiyun ** Machine dependent optimizations abound here; these swapping macros can 137*4882a593Smuzhiyun ** conceivably be replaced with routines that do the job faster. 138*4882a593Smuzhiyun */ 139*4882a593Smuzhiyun #define __GLX_DECLARE_SWAP_VARIABLES \ 140*4882a593Smuzhiyun GLbyte sw 141*4882a593Smuzhiyun 142*4882a593Smuzhiyun #define __GLX_DECLARE_SWAP_ARRAY_VARIABLES \ 143*4882a593Smuzhiyun GLbyte *swapPC; \ 144*4882a593Smuzhiyun GLbyte *swapEnd 145*4882a593Smuzhiyun 146*4882a593Smuzhiyun #define __GLX_SWAP_INT(pc) \ 147*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[0]; \ 148*4882a593Smuzhiyun ((GLbyte *)(pc))[0] = ((GLbyte *)(pc))[3]; \ 149*4882a593Smuzhiyun ((GLbyte *)(pc))[3] = sw; \ 150*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[1]; \ 151*4882a593Smuzhiyun ((GLbyte *)(pc))[1] = ((GLbyte *)(pc))[2]; \ 152*4882a593Smuzhiyun ((GLbyte *)(pc))[2] = sw; 153*4882a593Smuzhiyun 154*4882a593Smuzhiyun #define __GLX_SWAP_SHORT(pc) \ 155*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[0]; \ 156*4882a593Smuzhiyun ((GLbyte *)(pc))[0] = ((GLbyte *)(pc))[1]; \ 157*4882a593Smuzhiyun ((GLbyte *)(pc))[1] = sw; 158*4882a593Smuzhiyun 159*4882a593Smuzhiyun #define __GLX_SWAP_DOUBLE(pc) \ 160*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[0]; \ 161*4882a593Smuzhiyun ((GLbyte *)(pc))[0] = ((GLbyte *)(pc))[7]; \ 162*4882a593Smuzhiyun ((GLbyte *)(pc))[7] = sw; \ 163*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[1]; \ 164*4882a593Smuzhiyun ((GLbyte *)(pc))[1] = ((GLbyte *)(pc))[6]; \ 165*4882a593Smuzhiyun ((GLbyte *)(pc))[6] = sw; \ 166*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[2]; \ 167*4882a593Smuzhiyun ((GLbyte *)(pc))[2] = ((GLbyte *)(pc))[5]; \ 168*4882a593Smuzhiyun ((GLbyte *)(pc))[5] = sw; \ 169*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[3]; \ 170*4882a593Smuzhiyun ((GLbyte *)(pc))[3] = ((GLbyte *)(pc))[4]; \ 171*4882a593Smuzhiyun ((GLbyte *)(pc))[4] = sw; 172*4882a593Smuzhiyun 173*4882a593Smuzhiyun #define __GLX_SWAP_FLOAT(pc) \ 174*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[0]; \ 175*4882a593Smuzhiyun ((GLbyte *)(pc))[0] = ((GLbyte *)(pc))[3]; \ 176*4882a593Smuzhiyun ((GLbyte *)(pc))[3] = sw; \ 177*4882a593Smuzhiyun sw = ((GLbyte *)(pc))[1]; \ 178*4882a593Smuzhiyun ((GLbyte *)(pc))[1] = ((GLbyte *)(pc))[2]; \ 179*4882a593Smuzhiyun ((GLbyte *)(pc))[2] = sw; 180*4882a593Smuzhiyun 181*4882a593Smuzhiyun #define __GLX_SWAP_INT_ARRAY(pc, count) \ 182*4882a593Smuzhiyun swapPC = ((GLbyte *)(pc)); \ 183*4882a593Smuzhiyun swapEnd = ((GLbyte *)(pc)) + (count)*__GLX_SIZE_INT32;\ 184*4882a593Smuzhiyun while (swapPC < swapEnd) { \ 185*4882a593Smuzhiyun __GLX_SWAP_INT(swapPC); \ 186*4882a593Smuzhiyun swapPC += __GLX_SIZE_INT32; \ 187*4882a593Smuzhiyun } 188*4882a593Smuzhiyun 189*4882a593Smuzhiyun #define __GLX_SWAP_SHORT_ARRAY(pc, count) \ 190*4882a593Smuzhiyun swapPC = ((GLbyte *)(pc)); \ 191*4882a593Smuzhiyun swapEnd = ((GLbyte *)(pc)) + (count)*__GLX_SIZE_INT16;\ 192*4882a593Smuzhiyun while (swapPC < swapEnd) { \ 193*4882a593Smuzhiyun __GLX_SWAP_SHORT(swapPC); \ 194*4882a593Smuzhiyun swapPC += __GLX_SIZE_INT16; \ 195*4882a593Smuzhiyun } 196*4882a593Smuzhiyun 197*4882a593Smuzhiyun #define __GLX_SWAP_DOUBLE_ARRAY(pc, count) \ 198*4882a593Smuzhiyun swapPC = ((GLbyte *)(pc)); \ 199*4882a593Smuzhiyun swapEnd = ((GLbyte *)(pc)) + (count)*__GLX_SIZE_FLOAT64;\ 200*4882a593Smuzhiyun while (swapPC < swapEnd) { \ 201*4882a593Smuzhiyun __GLX_SWAP_DOUBLE(swapPC); \ 202*4882a593Smuzhiyun swapPC += __GLX_SIZE_FLOAT64; \ 203*4882a593Smuzhiyun } 204*4882a593Smuzhiyun 205*4882a593Smuzhiyun #define __GLX_SWAP_FLOAT_ARRAY(pc, count) \ 206*4882a593Smuzhiyun swapPC = ((GLbyte *)(pc)); \ 207*4882a593Smuzhiyun swapEnd = ((GLbyte *)(pc)) + (count)*__GLX_SIZE_FLOAT32;\ 208*4882a593Smuzhiyun while (swapPC < swapEnd) { \ 209*4882a593Smuzhiyun __GLX_SWAP_FLOAT(swapPC); \ 210*4882a593Smuzhiyun swapPC += __GLX_SIZE_FLOAT32; \ 211*4882a593Smuzhiyun } 212*4882a593Smuzhiyun 213*4882a593Smuzhiyun #define __GLX_SWAP_REPLY_HEADER() \ 214*4882a593Smuzhiyun __GLX_SWAP_SHORT(&__glXReply.sequenceNumber); \ 215*4882a593Smuzhiyun __GLX_SWAP_INT(&__glXReply.length); 216*4882a593Smuzhiyun 217*4882a593Smuzhiyun #define __GLX_SWAP_REPLY_RETVAL() \ 218*4882a593Smuzhiyun __GLX_SWAP_INT(&__glXReply.retval) 219*4882a593Smuzhiyun 220*4882a593Smuzhiyun #define __GLX_SWAP_REPLY_SIZE() \ 221*4882a593Smuzhiyun __GLX_SWAP_INT(&__glXReply.size) 222*4882a593Smuzhiyun 223*4882a593Smuzhiyun #endif /* !__GLX_unpack_h__ */ 224