tensorflow-core-framework-bfloat16.h 2019-05-31 894 tensorflow-core-framework ```cpp #ifndef TENSORFLOW_CORE_FRAMEWORK_BFLOAT16_H_ #define TENSORFLOW_CORE_FRAMEWORK_BFLOAT16_H_ #include "tensorflow/core/framework/numeric_types.h" #include "tensorflow/core/platform/byte_order.h" #include "tensorflow/core/platform/types.h" #if defined(PLATFORM_WINDOWS) #include "tensorflow/core/platform/windows/cpu_info.h" #endif // Compact 16-bit encoding of floating point numbers. This representation uses // 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa. It // is assumed that floats are in IEEE 754 format so the representation is just // bits 16-31 of a single precision float. // // NOTE: The IEEE floating point standard defines a float16 format that // is different than this format (it has fewer bits of exponent and more // bits of mantissa). We don't use that format here because conversion // to/from 32-bit floats is more complex for that format, and the // conversion for this format is very simple. // // Because of the existing IEEE float16 type, we do not name our representation // "float16" but just use "uint16". // // <-----our 16bits float-------> // s e e e e e e e e f f f f f f f f f f f f f f f f f f f f f f f // <------------------------------float--------------------------> // 3 3 2 2 1 1 0 // 1 0 3 2 5 4 0 // // // This type only supports conversion back and forth with float. // // This file must be compilable by nvcc. // // The type is defined in framework/numeric_types.h. namespace tensorflow { // Conversion routines between an array of float and bfloat16 of // "size". void FloatToBFloat16(const float* src, bfloat16* dst, int64 size); void BFloat16ToFloat(const bfloat16* src, float* dst, int64 size); } // namespace tensorflow #endif // TENSORFLOW_CORE_FRAMEWORK_BFLOAT16_H_ ``` 本文链接: http://codeeyes.net/archives/tensorflow-core-framework-bfloat16_h.html