| 1 | /* |
| 2 | Open Asset Import Library (assimp) |
| 3 | ---------------------------------------------------------------------- |
| 4 | |
| 5 | Copyright (c) 2006-2019, assimp team |
| 6 | |
| 7 | |
| 8 | All rights reserved. |
| 9 | |
| 10 | Redistribution and use of this software in source and binary forms, |
| 11 | with or without modification, are permitted provided that the |
| 12 | following conditions are met: |
| 13 | |
| 14 | * Redistributions of source code must retain the above |
| 15 | copyright notice, this list of conditions and the |
| 16 | following disclaimer. |
| 17 | |
| 18 | * Redistributions in binary form must reproduce the above |
| 19 | copyright notice, this list of conditions and the |
| 20 | following disclaimer in the documentation and/or other |
| 21 | materials provided with the distribution. |
| 22 | |
| 23 | * Neither the name of the assimp team, nor the names of its |
| 24 | contributors may be used to endorse or promote products |
| 25 | derived from this software without specific prior |
| 26 | written permission of the assimp team. |
| 27 | |
| 28 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 29 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 30 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 31 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 32 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 33 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 34 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 35 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 36 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 37 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 38 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 39 | |
| 40 | ---------------------------------------------------------------------- |
| 41 | */ |
| 42 | |
| 43 | /** @file FBXTokenizer.cpp |
| 44 | * @brief Implementation of the FBX broadphase lexer |
| 45 | */ |
| 46 | |
| 47 | #ifndef ASSIMP_BUILD_NO_FBX_IMPORTER |
| 48 | |
| 49 | // tab width for logging columns |
| 50 | #define ASSIMP_FBX_TAB_WIDTH 4 |
| 51 | |
| 52 | #include <assimp/ParsingUtils.h> |
| 53 | |
| 54 | #include "FBXTokenizer.h" |
| 55 | #include "FBXUtil.h" |
| 56 | #include <assimp/Exceptional.h> |
| 57 | |
| 58 | namespace Assimp { |
| 59 | namespace FBX { |
| 60 | |
| 61 | // ------------------------------------------------------------------------------------------------ |
| 62 | Token::Token(const char* sbegin, const char* send, TokenType type, unsigned int line, unsigned int column) |
| 63 | : |
| 64 | #ifdef DEBUG |
| 65 | contents(sbegin, static_cast<size_t>(send-sbegin)), |
| 66 | #endif |
| 67 | sbegin(sbegin) |
| 68 | , send(send) |
| 69 | , type(type) |
| 70 | , line(line) |
| 71 | , column(column) |
| 72 | { |
| 73 | ai_assert(sbegin); |
| 74 | ai_assert(send); |
| 75 | |
| 76 | // tokens must be of non-zero length |
| 77 | ai_assert(static_cast<size_t>(send-sbegin) > 0); |
| 78 | } |
| 79 | |
| 80 | // ------------------------------------------------------------------------------------------------ |
| 81 | Token::~Token() |
| 82 | { |
| 83 | } |
| 84 | |
| 85 | namespace { |
| 86 | |
| 87 | // ------------------------------------------------------------------------------------------------ |
| 88 | // signal tokenization error, this is always unrecoverable. Throws DeadlyImportError. |
| 89 | AI_WONT_RETURN void TokenizeError(const std::string& message, unsigned int line, unsigned int column) AI_WONT_RETURN_SUFFIX; |
| 90 | AI_WONT_RETURN void TokenizeError(const std::string& message, unsigned int line, unsigned int column) |
| 91 | { |
| 92 | throw DeadlyImportError(Util::AddLineAndColumn(prefix: "FBX-Tokenize" ,text: message,line,column)); |
| 93 | } |
| 94 | |
| 95 | |
| 96 | // process a potential data token up to 'cur', adding it to 'output_tokens'. |
| 97 | // ------------------------------------------------------------------------------------------------ |
| 98 | void ProcessDataToken( TokenList& output_tokens, const char*& start, const char*& end, |
| 99 | unsigned int line, |
| 100 | unsigned int column, |
| 101 | TokenType type = TokenType_DATA, |
| 102 | bool must_have_token = false) |
| 103 | { |
| 104 | if (start && end) { |
| 105 | // sanity check: |
| 106 | // tokens should have no whitespace outside quoted text and [start,end] should |
| 107 | // properly delimit the valid range. |
| 108 | bool in_double_quotes = false; |
| 109 | for (const char* c = start; c != end + 1; ++c) { |
| 110 | if (*c == '\"') { |
| 111 | in_double_quotes = !in_double_quotes; |
| 112 | } |
| 113 | |
| 114 | if (!in_double_quotes && IsSpaceOrNewLine(in: *c)) { |
| 115 | TokenizeError(message: "unexpected whitespace in token" , line, column); |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | if (in_double_quotes) { |
| 120 | TokenizeError(message: "non-terminated double quotes" , line, column); |
| 121 | } |
| 122 | |
| 123 | output_tokens.push_back(new_Token(start,end + 1,type,line,column)); |
| 124 | } |
| 125 | else if (must_have_token) { |
| 126 | TokenizeError(message: "unexpected character, expected data token" , line, column); |
| 127 | } |
| 128 | |
| 129 | start = end = NULL; |
| 130 | } |
| 131 | |
| 132 | } |
| 133 | |
| 134 | // ------------------------------------------------------------------------------------------------ |
| 135 | void Tokenize(TokenList& output_tokens, const char* input) |
| 136 | { |
| 137 | ai_assert(input); |
| 138 | |
| 139 | // line and column numbers numbers are one-based |
| 140 | unsigned int line = 1; |
| 141 | unsigned int column = 1; |
| 142 | |
| 143 | bool = false; |
| 144 | bool in_double_quotes = false; |
| 145 | bool pending_data_token = false; |
| 146 | |
| 147 | const char* token_begin = NULL, *token_end = NULL; |
| 148 | for (const char* cur = input;*cur;column += (*cur == '\t' ? ASSIMP_FBX_TAB_WIDTH : 1), ++cur) { |
| 149 | const char c = *cur; |
| 150 | |
| 151 | if (IsLineEnd(in: c)) { |
| 152 | comment = false; |
| 153 | |
| 154 | column = 0; |
| 155 | ++line; |
| 156 | } |
| 157 | |
| 158 | if(comment) { |
| 159 | continue; |
| 160 | } |
| 161 | |
| 162 | if(in_double_quotes) { |
| 163 | if (c == '\"') { |
| 164 | in_double_quotes = false; |
| 165 | token_end = cur; |
| 166 | |
| 167 | ProcessDataToken(output_tokens,start&: token_begin,end&: token_end,line,column); |
| 168 | pending_data_token = false; |
| 169 | } |
| 170 | continue; |
| 171 | } |
| 172 | |
| 173 | switch(c) |
| 174 | { |
| 175 | case '\"': |
| 176 | if (token_begin) { |
| 177 | TokenizeError(message: "unexpected double-quote" , line, column); |
| 178 | } |
| 179 | token_begin = cur; |
| 180 | in_double_quotes = true; |
| 181 | continue; |
| 182 | |
| 183 | case ';': |
| 184 | ProcessDataToken(output_tokens,start&: token_begin,end&: token_end,line,column); |
| 185 | comment = true; |
| 186 | continue; |
| 187 | |
| 188 | case '{': |
| 189 | ProcessDataToken(output_tokens,start&: token_begin,end&: token_end, line, column); |
| 190 | output_tokens.push_back(new_Token(cur,cur+1,TokenType_OPEN_BRACKET,line,column)); |
| 191 | continue; |
| 192 | |
| 193 | case '}': |
| 194 | ProcessDataToken(output_tokens,start&: token_begin,end&: token_end,line,column); |
| 195 | output_tokens.push_back(new_Token(cur,cur+1,TokenType_CLOSE_BRACKET,line,column)); |
| 196 | continue; |
| 197 | |
| 198 | case ',': |
| 199 | if (pending_data_token) { |
| 200 | ProcessDataToken(output_tokens,start&: token_begin,end&: token_end,line,column,type: TokenType_DATA,must_have_token: true); |
| 201 | } |
| 202 | output_tokens.push_back(new_Token(cur,cur+1,TokenType_COMMA,line,column)); |
| 203 | continue; |
| 204 | |
| 205 | case ':': |
| 206 | if (pending_data_token) { |
| 207 | ProcessDataToken(output_tokens,start&: token_begin,end&: token_end,line,column,type: TokenType_KEY,must_have_token: true); |
| 208 | } |
| 209 | else { |
| 210 | TokenizeError(message: "unexpected colon" , line, column); |
| 211 | } |
| 212 | continue; |
| 213 | } |
| 214 | |
| 215 | if (IsSpaceOrNewLine(in: c)) { |
| 216 | |
| 217 | if (token_begin) { |
| 218 | // peek ahead and check if the next token is a colon in which |
| 219 | // case this counts as KEY token. |
| 220 | TokenType type = TokenType_DATA; |
| 221 | for (const char* peek = cur; *peek && IsSpaceOrNewLine(in: *peek); ++peek) { |
| 222 | if (*peek == ':') { |
| 223 | type = TokenType_KEY; |
| 224 | cur = peek; |
| 225 | break; |
| 226 | } |
| 227 | } |
| 228 | |
| 229 | ProcessDataToken(output_tokens,start&: token_begin,end&: token_end,line,column,type); |
| 230 | } |
| 231 | |
| 232 | pending_data_token = false; |
| 233 | } |
| 234 | else { |
| 235 | token_end = cur; |
| 236 | if (!token_begin) { |
| 237 | token_begin = cur; |
| 238 | } |
| 239 | |
| 240 | pending_data_token = true; |
| 241 | } |
| 242 | } |
| 243 | } |
| 244 | |
| 245 | } // !FBX |
| 246 | } // !Assimp |
| 247 | |
| 248 | #endif |
| 249 | |