cvmx-fau.h revision 210284
1/***********************license start***************
2 *  Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
3 *  reserved.
4 *
5 *
6 *  Redistribution and use in source and binary forms, with or without
7 *  modification, are permitted provided that the following conditions are
8 *  met:
9 *
10 *      * Redistributions of source code must retain the above copyright
11 *        notice, this list of conditions and the following disclaimer.
12 *
13 *      * Redistributions in binary form must reproduce the above
14 *        copyright notice, this list of conditions and the following
15 *        disclaimer in the documentation and/or other materials provided
16 *        with the distribution.
17 *
18 *      * Neither the name of Cavium Networks nor the names of
19 *        its contributors may be used to endorse or promote products
20 *        derived from this software without specific prior written
21 *        permission.
22 *
23 *  TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 *  AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 *  OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 *  RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 *  REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 *  DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 *  OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 *  PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 *  POSSESSION OR CORRESPONDENCE TO DESCRIPTION.  THE ENTIRE RISK ARISING OUT
32 *  OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33 *
34 *
35 *  For any questions regarding licensing please contact marketing@caviumnetworks.com
36 *
37 ***********************license end**************************************/
38
39
40
41
42
43
44/**
45 * @file
46 *
47 * Interface to the hardware Fetch and Add Unit.
48 *
49 * <hr>$Revision: 41586 $<hr>
50 */
51
52#ifndef __CVMX_FAU_H__
53#define __CVMX_FAU_H__
54
55#ifndef CVMX_DONT_INCLUDE_CONFIG
56#include "cvmx-config.h"
57#else
58typedef int cvmx_fau_reg_64_t;
59typedef int cvmx_fau_reg_32_t;
60typedef int cvmx_fau_reg_16_t;
61typedef int cvmx_fau_reg_8_t;
62#endif
63
64#ifdef	__cplusplus
65extern "C" {
66#endif
67
68/*
69 * Octeon Fetch and Add Unit (FAU)
70 */
71
72#define CVMX_FAU_LOAD_IO_ADDRESS    cvmx_build_io_address(0x1e, 0)
73#define CVMX_FAU_BITS_SCRADDR       63,56
74#define CVMX_FAU_BITS_LEN           55,48
75#define CVMX_FAU_BITS_INEVAL        35,14
76#define CVMX_FAU_BITS_TAGWAIT       13,13
77#define CVMX_FAU_BITS_NOADD         13,13
78#define CVMX_FAU_BITS_SIZE          12,11
79#define CVMX_FAU_BITS_REGISTER      10,0
80
81
82typedef enum {
83   CVMX_FAU_OP_SIZE_8  = 0,
84   CVMX_FAU_OP_SIZE_16 = 1,
85   CVMX_FAU_OP_SIZE_32 = 2,
86   CVMX_FAU_OP_SIZE_64 = 3
87} cvmx_fau_op_size_t;
88
89/**
90 * Tagwait return definition. If a timeout occurs, the error
91 * bit will be set. Otherwise the value of the register before
92 * the update will be returned.
93 */
94typedef struct
95{
96    uint64_t    error   : 1;
97    int64_t     value   : 63;
98} cvmx_fau_tagwait64_t;
99
100/**
101 * Tagwait return definition. If a timeout occurs, the error
102 * bit will be set. Otherwise the value of the register before
103 * the update will be returned.
104 */
105typedef struct
106{
107    uint64_t    error   : 1;
108    int32_t     value   : 31;
109} cvmx_fau_tagwait32_t;
110
111/**
112 * Tagwait return definition. If a timeout occurs, the error
113 * bit will be set. Otherwise the value of the register before
114 * the update will be returned.
115 */
116typedef struct
117{
118    uint64_t    error   : 1;
119    int16_t     value   : 15;
120} cvmx_fau_tagwait16_t;
121
122/**
123 * Tagwait return definition. If a timeout occurs, the error
124 * bit will be set. Otherwise the value of the register before
125 * the update will be returned.
126 */
127typedef struct
128{
129    uint64_t    error   : 1;
130    int8_t     value    : 7;
131} cvmx_fau_tagwait8_t;
132
133/**
134 * Asynchronous tagwait return definition. If a timeout occurs,
135 * the error bit will be set. Otherwise the value of the
136 * register before the update will be returned.
137 */
138typedef union {
139   uint64_t        u64;
140   struct {
141      uint64_t     invalid: 1;
142      uint64_t     data   :63; // unpredictable if invalid is set
143   } s;
144} cvmx_fau_async_tagwait_result_t;
145
146
147/**
148 * @INTERNAL
149 * Builds a store I/O address for writing to the FAU
150 *
151 * @param noadd  0 = Store value is atomically added to the current value
152 *               1 = Store value is atomically written over the current value
153 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
154 *               - Step by 2 for 16 bit access.
155 *               - Step by 4 for 32 bit access.
156 *               - Step by 8 for 64 bit access.
157 * @return Address to store for atomic update
158 */
159static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
160{
161    return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
162            cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
163            cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
164}
165
166
167/**
168 * @INTERNAL
169 * Builds a I/O address for accessing the FAU
170 *
171 * @param tagwait Should the atomic add wait for the current tag switch
172 *                operation to complete.
173 *                - 0 = Don't wait
174 *                - 1 = Wait for tag switch to complete
175 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
176 *                - Step by 2 for 16 bit access.
177 *                - Step by 4 for 32 bit access.
178 *                - Step by 8 for 64 bit access.
179 * @param value   Signed value to add.
180 *                Note: When performing 32 and 64 bit access, only the low
181 *                22 bits are available.
182 * @return Address to read from for atomic update
183 */
184static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg, int64_t value)
185{
186    return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
187            cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
188            cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
189            cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
190}
191
192
193/**
194 * Perform an atomic 64 bit add
195 *
196 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
197 *                - Step by 8 for 64 bit access.
198 * @param value   Signed value to add.
199 *                Note: Only the low 22 bits are available.
200 * @return Value of the register before the update
201 */
202static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
203{
204    return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
205}
206
207
208/**
209 * Perform an atomic 32 bit add
210 *
211 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
212 *                - Step by 4 for 32 bit access.
213 * @param value   Signed value to add.
214 *                Note: Only the low 22 bits are available.
215 * @return Value of the register before the update
216 */
217static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
218{
219    return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
220}
221
222
223/**
224 * Perform an atomic 16 bit add
225 *
226 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
227 *                - Step by 2 for 16 bit access.
228 * @param value   Signed value to add.
229 * @return Value of the register before the update
230 */
231static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
232{
233    return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
234}
235
236
237/**
238 * Perform an atomic 8 bit add
239 *
240 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
241 * @param value   Signed value to add.
242 * @return Value of the register before the update
243 */
244static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
245{
246    return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
247}
248
249
250/**
251 * Perform an atomic 64 bit add after the current tag switch
252 * completes
253 *
254 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
255 *               - Step by 8 for 64 bit access.
256 * @param value  Signed value to add.
257 *               Note: Only the low 22 bits are available.
258 * @return If a timeout occurs, the error bit will be set. Otherwise
259 *         the value of the register before the update will be
260 *         returned
261 */
262static inline cvmx_fau_tagwait64_t cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
263{
264    union
265    {
266        uint64_t i64;
267        cvmx_fau_tagwait64_t t;
268    } result;
269    result.i64 = cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
270    return result.t;
271}
272
273
274/**
275 * Perform an atomic 32 bit add after the current tag switch
276 * completes
277 *
278 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
279 *               - Step by 4 for 32 bit access.
280 * @param value  Signed value to add.
281 *               Note: Only the low 22 bits are available.
282 * @return If a timeout occurs, the error bit will be set. Otherwise
283 *         the value of the register before the update will be
284 *         returned
285 */
286static inline cvmx_fau_tagwait32_t cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
287{
288    union
289    {
290        uint64_t i32;
291        cvmx_fau_tagwait32_t t;
292    } result;
293    result.i32 = cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
294    return result.t;
295}
296
297
298/**
299 * Perform an atomic 16 bit add after the current tag switch
300 * completes
301 *
302 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
303 *               - Step by 2 for 16 bit access.
304 * @param value  Signed value to add.
305 * @return If a timeout occurs, the error bit will be set. Otherwise
306 *         the value of the register before the update will be
307 *         returned
308 */
309static inline cvmx_fau_tagwait16_t cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
310{
311    union
312    {
313        uint64_t i16;
314        cvmx_fau_tagwait16_t t;
315    } result;
316    result.i16 = cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
317    return result.t;
318}
319
320
321/**
322 * Perform an atomic 8 bit add after the current tag switch
323 * completes
324 *
325 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
326 * @param value  Signed value to add.
327 * @return If a timeout occurs, the error bit will be set. Otherwise
328 *         the value of the register before the update will be
329 *         returned
330 */
331static inline cvmx_fau_tagwait8_t cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
332{
333    union
334    {
335        uint64_t i8;
336        cvmx_fau_tagwait8_t t;
337    } result;
338    result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
339    return result.t;
340}
341
342
343/**
344 * @INTERNAL
345 * Builds I/O data for async operations
346 *
347 * @param scraddr Scratch pad byte addres to write to.  Must be 8 byte aligned
348 * @param value   Signed value to add.
349 *                Note: When performing 32 and 64 bit access, only the low
350 *                22 bits are available.
351 * @param tagwait Should the atomic add wait for the current tag switch
352 *                operation to complete.
353 *                - 0 = Don't wait
354 *                - 1 = Wait for tag switch to complete
355 * @param size    The size of the operation:
356 *                - CVMX_FAU_OP_SIZE_8  (0) = 8 bits
357 *                - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
358 *                - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
359 *                - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
360 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
361 *                - Step by 2 for 16 bit access.
362 *                - Step by 4 for 32 bit access.
363 *                - Step by 8 for 64 bit access.
364 * @return Data to write using cvmx_send_single
365 */
366static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value, uint64_t tagwait,
367                                          cvmx_fau_op_size_t size, uint64_t reg)
368{
369    return (CVMX_FAU_LOAD_IO_ADDRESS |
370                      cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr>>3) |
371                      cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
372                      cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
373                      cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
374                      cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
375                      cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
376}
377
378
379/**
380 * Perform an async atomic 64 bit add. The old value is
381 * placed in the scratch memory at byte address scraddr.
382 *
383 * @param scraddr Scratch memory byte address to put response in.
384 *                Must be 8 byte aligned.
385 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
386 *                - Step by 8 for 64 bit access.
387 * @param value   Signed value to add.
388 *                Note: Only the low 22 bits are available.
389 * @return Placed in the scratch pad register
390 */
391static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
392{
393    cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
394}
395
396
397/**
398 * Perform an async atomic 32 bit add. The old value is
399 * placed in the scratch memory at byte address scraddr.
400 *
401 * @param scraddr Scratch memory byte address to put response in.
402 *                Must be 8 byte aligned.
403 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
404 *                - Step by 4 for 32 bit access.
405 * @param value   Signed value to add.
406 *                Note: Only the low 22 bits are available.
407 * @return Placed in the scratch pad register
408 */
409static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
410{
411    cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
412}
413
414
415/**
416 * Perform an async atomic 16 bit add. The old value is
417 * placed in the scratch memory at byte address scraddr.
418 *
419 * @param scraddr Scratch memory byte address to put response in.
420 *                Must be 8 byte aligned.
421 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
422 *                - Step by 2 for 16 bit access.
423 * @param value   Signed value to add.
424 * @return Placed in the scratch pad register
425 */
426static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
427{
428    cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
429}
430
431
432/**
433 * Perform an async atomic 8 bit add. The old value is
434 * placed in the scratch memory at byte address scraddr.
435 *
436 * @param scraddr Scratch memory byte address to put response in.
437 *                Must be 8 byte aligned.
438 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
439 * @param value   Signed value to add.
440 * @return Placed in the scratch pad register
441 */
442static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
443{
444    cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
445}
446
447
448/**
449 * Perform an async atomic 64 bit add after the current tag
450 * switch completes.
451 *
452 * @param scraddr Scratch memory byte address to put response in.
453 *                Must be 8 byte aligned.
454 *                If a timeout occurs, the error bit (63) will be set. Otherwise
455 *                the value of the register before the update will be
456 *                returned
457 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
458 *                - Step by 8 for 64 bit access.
459 * @param value   Signed value to add.
460 *                Note: Only the low 22 bits are available.
461 * @return Placed in the scratch pad register
462 */
463static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
464{
465    cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
466}
467
468
469/**
470 * Perform an async atomic 32 bit add after the current tag
471 * switch completes.
472 *
473 * @param scraddr Scratch memory byte address to put response in.
474 *                Must be 8 byte aligned.
475 *                If a timeout occurs, the error bit (63) will be set. Otherwise
476 *                the value of the register before the update will be
477 *                returned
478 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
479 *                - Step by 4 for 32 bit access.
480 * @param value   Signed value to add.
481 *                Note: Only the low 22 bits are available.
482 * @return Placed in the scratch pad register
483 */
484static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
485{
486    cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
487}
488
489
490/**
491 * Perform an async atomic 16 bit add after the current tag
492 * switch completes.
493 *
494 * @param scraddr Scratch memory byte address to put response in.
495 *                Must be 8 byte aligned.
496 *                If a timeout occurs, the error bit (63) will be set. Otherwise
497 *                the value of the register before the update will be
498 *                returned
499 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
500 *                - Step by 2 for 16 bit access.
501 * @param value   Signed value to add.
502 * @return Placed in the scratch pad register
503 */
504static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
505{
506    cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
507}
508
509
510/**
511 * Perform an async atomic 8 bit add after the current tag
512 * switch completes.
513 *
514 * @param scraddr Scratch memory byte address to put response in.
515 *                Must be 8 byte aligned.
516 *                If a timeout occurs, the error bit (63) will be set. Otherwise
517 *                the value of the register before the update will be
518 *                returned
519 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
520 * @param value   Signed value to add.
521 * @return Placed in the scratch pad register
522 */
523static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
524{
525    cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
526}
527
528
529
530
531/**
532 * Perform an atomic 64 bit add
533 *
534 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
535 *                - Step by 8 for 64 bit access.
536 * @param value   Signed value to add.
537 */
538static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
539{
540    cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
541}
542
543
544/**
545 * Perform an atomic 32 bit add
546 *
547 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
548 *                - Step by 4 for 32 bit access.
549 * @param value   Signed value to add.
550 */
551static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
552{
553    cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
554}
555
556
557/**
558 * Perform an atomic 16 bit add
559 *
560 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
561 *                - Step by 2 for 16 bit access.
562 * @param value   Signed value to add.
563 */
564static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
565{
566    cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
567}
568
569
570/**
571 * Perform an atomic 8 bit add
572 *
573 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
574 * @param value   Signed value to add.
575 */
576static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
577{
578    cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
579}
580
581
582/**
583 * Perform an atomic 64 bit write
584 *
585 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
586 *                - Step by 8 for 64 bit access.
587 * @param value   Signed value to write.
588 */
589static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
590{
591    cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
592}
593
594
595/**
596 * Perform an atomic 32 bit write
597 *
598 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
599 *                - Step by 4 for 32 bit access.
600 * @param value   Signed value to write.
601 */
602static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
603{
604    cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
605}
606
607
608/**
609 * Perform an atomic 16 bit write
610 *
611 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
612 *                - Step by 2 for 16 bit access.
613 * @param value   Signed value to write.
614 */
615static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
616{
617    cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
618}
619
620
621/**
622 * Perform an atomic 8 bit write
623 *
624 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
625 * @param value   Signed value to write.
626 */
627static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
628{
629    cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
630}
631
632#ifdef	__cplusplus
633}
634#endif
635
636#endif  /* __CVMX_FAU_H__ */
637