227 lines
7.1 KiB
CMake
227 lines
7.1 KiB
CMake
#
|
|
# This program source code file is part of KICAD, a free EDA CAD application.
|
|
#
|
|
# Copyright (C) 2010 Wayne Stambaugh <stambaughw@verizon.net>
|
|
# Copyright (C) 2010 Kicad Developers, see AUTHORS.txt for contributors.
|
|
#
|
|
# This program is free software; you can redistribute it and/or
|
|
# modify it under the terms of the GNU General Public License
|
|
# as published by the Free Software Foundation; either version 2
|
|
# of the License, or (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program; if not, you may find one here:
|
|
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
|
|
# or you may search the http://www.gnu.org website for the version 2 license,
|
|
# or you may write to the Free Software Foundation, Inc.,
|
|
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
|
#
|
|
#
|
|
# This script converts a plain text file with a line feed separated list
|
|
# of token names into the appropriate source and header files required by
|
|
# the DSN lexer. See files "<base_source_path>/common/dsnlexer.cpp" and
|
|
# "<base_source_path>/include/dsnlexer.h" for more information about how
|
|
# the DSN lexer works. The token list file format requires a single token
|
|
# per line. Tokens can only contain lower case letters, numbers, and
|
|
# underscores. The first letter of each token must be a lower case letter.
|
|
# Tokens must be unique. If any of the above criteria are not met, the
|
|
# source and header files will not be generated and a build error will
|
|
# occur.
|
|
#
|
|
# Valid tokens: a a1 foo_1 foo_bar2
|
|
# Invalid tokens: 1 A _foo bar_ foO
|
|
#
|
|
# Usage:
|
|
#
|
|
# add_custom_command(
|
|
# OUTPUT ${CMAKE_BINARY_DIR}/cmp_library_keywords.h
|
|
# ${CMAKE_BINARY_DIR}/cmp_library_keywords.cpp
|
|
# COMMAND ${CMAKE_COMMAND}
|
|
# -Denum=YOURTOK_T
|
|
# -DinputFile=${CMAKE_CURRENT_SOURCE_DIR}/cmp_library.keywords
|
|
# -P ${CMAKE_MODULE_PATH}/TokenList2DsnLexer.cmake
|
|
# DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/cmp_library.keywords
|
|
# )
|
|
#
|
|
# Input parameters:
|
|
#
|
|
# enum - The name of the enum to generate, defaults to DSN_T, but
|
|
# you'll get collisions if you don't override it.
|
|
# inputFile - The name of the token list file.
|
|
# outputPath - Optional output path to save the generated files. If not defined,
|
|
# the output path is the same path as the token list file path.
|
|
|
|
|
|
set( tokens "" )
|
|
set( lineCount 0 )
|
|
set( dsnErrorMsg "DSN token file generator failure:" )
|
|
|
|
|
|
if( NOT EXISTS ${inputFile} )
|
|
message( FATAL_ERROR "${dsnErrorMsg} file ${inputFile} cannot be found." )
|
|
endif( NOT EXISTS ${inputFile} )
|
|
|
|
if( NOT EXISTS ${outputPath} )
|
|
get_filename_component( outputPath "${inputFile}" PATH )
|
|
endif( NOT EXISTS ${outputPath} )
|
|
|
|
if( NOT DEFINED enum )
|
|
set( enum DSN_T )
|
|
endif()
|
|
#message( STATUS "enum: ${enum}" )
|
|
|
|
|
|
# Separate the file name without extension from the full file path.
|
|
get_filename_component( result "${inputFile}" NAME_WE )
|
|
|
|
message( STATUS "Extracted file name ${result} from path ${inputFile}" )
|
|
|
|
# Create include and source file name from the list file name.
|
|
set( includeFileName "${outputPath}/${result}_keywords.h" )
|
|
set( sourceFileName "${outputPath}/${result}_keywords.cpp" )
|
|
|
|
# Create tag for generating header file.
|
|
string( TOUPPER "${result}" fileNameTag )
|
|
set( headerTag "_${fileNameTag}_H_" )
|
|
|
|
set( includeFileHeader
|
|
"
|
|
/*
|
|
* Do not modify this file it was automatically generated by the TokenList2DsnLexer CMake
|
|
* script.
|
|
*/
|
|
|
|
#ifndef ${headerTag}
|
|
#define ${headerTag}
|
|
|
|
#include \"dsnlexer.h\"
|
|
|
|
namespace DSN {
|
|
|
|
enum ${enum} {
|
|
|
|
// these first few are negative special ones for syntax, and are
|
|
// inherited from DSNLEXER.
|
|
T_NONE = DSN_NONE,
|
|
T_COMMENT = DSN_COMMENT,
|
|
T_STRING_QUOTE = DSN_STRING_QUOTE,
|
|
T_QUOTE_DEF = DSN_QUOTE_DEF,
|
|
T_DASH = DSN_DASH,
|
|
T_SYMBOL = DSN_SYMBOL,
|
|
T_NUMBER = DSN_NUMBER,
|
|
T_RIGHT = DSN_RIGHT, // right bracket, ')'
|
|
T_LEFT = DSN_LEFT, // left bracket, '('
|
|
T_STRING = DSN_STRING, // a quoted string, stripped of the quotes
|
|
T_EOF = DSN_EOF, // special case for end of file
|
|
|
|
"
|
|
)
|
|
|
|
set( sourceFileHeader
|
|
"
|
|
/*
|
|
* Do not modify this file it was automatically generated by the TokenList2DsnLexer CMake
|
|
* script.
|
|
*
|
|
* Include this file in your lexer class to provide the keywords for you DSN lexer.
|
|
*/
|
|
|
|
#include \"fctsys.h\"
|
|
#include \"macros.h\"
|
|
|
|
#include \"${result}_keywords.h\"
|
|
|
|
|
|
namespace DSN {
|
|
|
|
#define TOKDEF(x) { #x, T_##x }
|
|
|
|
const KEYWORD ${result}_keywords[] = {
|
|
"
|
|
)
|
|
|
|
file( STRINGS ${inputFile} tmpTokens NO_HEX_CONVERSION )
|
|
|
|
foreach( tmpToken ${tmpTokens} )
|
|
math( EXPR lineCount "${lineCount} + 1" )
|
|
|
|
string( STRIP tmpToken "${tmpToken}" )
|
|
|
|
# Ignore empty lines.
|
|
if( tmpToken )
|
|
# Make sure token is valid.
|
|
string( REGEX MATCH "[a-z][_0-9a-z]*[0-9a-z]$" validToken "${tmpToken}" )
|
|
if( validToken STREQUAL tmpToken )
|
|
list( APPEND tokens "${validToken}" )
|
|
else( validToken STREQUAL tmpToken )
|
|
message( FATAL_ERROR
|
|
"Invalid token string \"${tmpToken}\" at line ${lineCount} in file "
|
|
"<${inputFile}>." )
|
|
endif( validToken STREQUAL tmpToken )
|
|
endif( tmpToken )
|
|
endforeach( tmpToken ${tmpTokens} )
|
|
|
|
list( SORT tokens )
|
|
|
|
# Check for duplicates.
|
|
list( LENGTH tokens tokensBefore )
|
|
list( REMOVE_DUPLICATES tokens )
|
|
list( LENGTH tokens tokensAfter )
|
|
|
|
if( NOT ( tokensBefore EQUAL tokensAfter ) )
|
|
message( FATAL_ERROR "Duplicate tokens found in file <${inputFile}>." )
|
|
endif( NOT ( tokensBefore EQUAL tokensAfter ) )
|
|
|
|
file( WRITE "${includeFileName}" "${includeFileHeader}" )
|
|
file( WRITE "${sourceFileName}" "${sourceFileHeader}" )
|
|
|
|
set( lineCount 1 )
|
|
|
|
foreach( token ${tokens} )
|
|
if( lineCount EQUAL 1 )
|
|
file( APPEND "${includeFileName}" " T_${token} = 0" )
|
|
else( lineCount EQUAL 1 )
|
|
file( APPEND "${includeFileName}" " T_${token}" )
|
|
endif( lineCount EQUAL 1 )
|
|
|
|
file(APPEND "${sourceFileName}" " TOKDEF( ${token} )" )
|
|
|
|
if( lineCount EQUAL tokensAfter )
|
|
file( APPEND "${includeFileName}" "\n" )
|
|
file( APPEND "${sourceFileName}" "\n" )
|
|
else( lineCount EQUAL tokensAfter )
|
|
file( APPEND "${includeFileName}" ",\n" )
|
|
file( APPEND "${sourceFileName}" ",\n" )
|
|
endif( lineCount EQUAL tokensAfter )
|
|
math( EXPR lineCount "${lineCount} + 1" )
|
|
endforeach( token ${tokens} )
|
|
|
|
file( APPEND "${includeFileName}"
|
|
"};
|
|
|
|
extern const KEYWORD ${result}_keywords[];
|
|
extern const unsigned ${result}_keyword_count;
|
|
|
|
} // End namespace DSN
|
|
|
|
|
|
#endif // End ${headerTag}
|
|
"
|
|
)
|
|
|
|
file( APPEND "${sourceFileName}"
|
|
"};
|
|
|
|
|
|
const unsigned ${result}_keyword_count = DIM( ${result}_keywords );
|
|
|
|
|
|
} // End namespace DSN
|
|
"
|
|
)
|