Compare commits
No commits in common. "e711767d248d9270be46bd4d3bbbb7821e85ae57" and "62c3c27ddd59dd348b13c9efa005f912a6ba3275" have entirely different histories.
e711767d24
...
62c3c27ddd
12
README.md
12
README.md
|
@ -4,12 +4,6 @@ General-purpose text classifier in Python
|
|||
|
||||
GPTC provides both a CLI tool and a Python library.
|
||||
|
||||
## Installation
|
||||
|
||||
pip install gptc[emoji] # handles emojis! (see section "Emoji")
|
||||
# Or, if you don't need emoji support,
|
||||
pip install gptc # no dependencies!
|
||||
|
||||
## CLI Tool
|
||||
|
||||
### Classifying text
|
||||
|
@ -78,12 +72,6 @@ reduced to the one used when compiling the model.
|
|||
Models compiled with older versions of GPTC which did not support ngrams are
|
||||
handled the same way as models compiled with `max_ngram_length=1`.
|
||||
|
||||
## Emoji
|
||||
|
||||
If the [`emoji`](https://pypi.org/project/emoji/) package is installed, GPTC
|
||||
will automatically handle emojis the same way as words. If it is not installed,
|
||||
GPTC will still work but will ignore emojis.
|
||||
|
||||
## Model format
|
||||
|
||||
This section explains the raw model format, which is how you should create and
|
||||
|
|
|
@ -33,9 +33,7 @@ print(
|
|||
)
|
||||
|
||||
|
||||
classifier = gptc.Classifier(
|
||||
gptc.compile(raw_model, max_ngram_length), max_ngram_length
|
||||
)
|
||||
classifier = gptc.Classifier(gptc.compile(raw_model, max_ngram_length), max_ngram_length)
|
||||
print(
|
||||
"Average classification time over",
|
||||
classify_iterations,
|
||||
|
|
|
@ -2,10 +2,6 @@
|
|||
|
||||
"""General-Purpose Text Classifier"""
|
||||
|
||||
from gptc.compiler import compile as compile
|
||||
from gptc.classifier import Classifier as Classifier
|
||||
from gptc.exceptions import (
|
||||
GPTCError as GPTCError,
|
||||
ModelError as ModelError,
|
||||
UnsupportedModelError as UnsupportedModelError,
|
||||
)
|
||||
from gptc.compiler import compile
|
||||
from gptc.classifier import Classifier
|
||||
from gptc.exceptions import *
|
||||
|
|
|
@ -6,8 +6,7 @@ import json
|
|||
import sys
|
||||
import gptc
|
||||
|
||||
|
||||
def main() -> None:
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="General Purpose Text Classifier", prog="gptc"
|
||||
)
|
||||
|
@ -15,23 +14,11 @@ def main() -> None:
|
|||
|
||||
compile_parser = subparsers.add_parser("compile", help="compile a raw model")
|
||||
compile_parser.add_argument("model", help="raw model to compile")
|
||||
compile_parser.add_argument(
|
||||
"--max-ngram-length",
|
||||
"-n",
|
||||
help="maximum ngram length",
|
||||
type=int,
|
||||
default=1,
|
||||
)
|
||||
compile_parser.add_argument("--max-ngram-length", "-n", help="maximum ngram length", type=int, default=1)
|
||||
|
||||
classify_parser = subparsers.add_parser("classify", help="classify text")
|
||||
classify_parser.add_argument("model", help="compiled model to use")
|
||||
classify_parser.add_argument(
|
||||
"--max-ngram-length",
|
||||
"-n",
|
||||
help="maximum ngram length",
|
||||
type=int,
|
||||
default=1,
|
||||
)
|
||||
classify_parser.add_argument("--max-ngram-length", "-n", help="maximum ngram length", type=int, default=1)
|
||||
group = classify_parser.add_mutually_exclusive_group()
|
||||
group.add_argument(
|
||||
"-j",
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
import gptc.tokenizer, gptc.compiler, gptc.exceptions, gptc.weighting
|
||||
import warnings
|
||||
from typing import Dict, Union, cast, List
|
||||
|
||||
|
||||
class Classifier:
|
||||
|
@ -25,14 +24,17 @@ class Classifier:
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self, model: gptc.compiler.MODEL, max_ngram_length: int = 1):
|
||||
def __init__(self, model, max_ngram_length=1):
|
||||
if model.get("__version__", 0) != 3:
|
||||
raise gptc.exceptions.UnsupportedModelError(f"unsupported model version")
|
||||
raise gptc.exceptions.UnsupportedModelError(
|
||||
f"unsupported model version"
|
||||
)
|
||||
self.model = model
|
||||
model_ngrams = cast(int, model.get("__ngrams__", 1))
|
||||
self.max_ngram_length = min(max_ngram_length, model_ngrams)
|
||||
self.max_ngram_length = min(
|
||||
max_ngram_length, model.get("__ngrams__", 1)
|
||||
)
|
||||
|
||||
def confidence(self, text: str) -> Dict[str, float]:
|
||||
def confidence(self, text):
|
||||
"""Classify text with confidence.
|
||||
|
||||
Parameters
|
||||
|
@ -50,28 +52,29 @@ class Classifier:
|
|||
|
||||
model = self.model
|
||||
|
||||
tokens = gptc.tokenizer.tokenize(text, self.max_ngram_length)
|
||||
numbered_probs: Dict[int, float] = {}
|
||||
for word in tokens:
|
||||
text = gptc.tokenizer.tokenize(text, self.max_ngram_length)
|
||||
probs = {}
|
||||
for word in text:
|
||||
try:
|
||||
weighted_numbers = gptc.weighting.weight(
|
||||
[i / 65535 for i in cast(List[float], model[word])]
|
||||
weight, weighted_numbers = gptc.weighting.weight(
|
||||
[i / 65535 for i in model[word]]
|
||||
)
|
||||
for category, value in enumerate(weighted_numbers):
|
||||
try:
|
||||
numbered_probs[category] += value
|
||||
probs[category] += value
|
||||
except KeyError:
|
||||
numbered_probs[category] = value
|
||||
probs[category] = value
|
||||
except KeyError:
|
||||
pass
|
||||
total = sum(numbered_probs.values())
|
||||
probs: Dict[str, float] = {
|
||||
cast(List[str], model["__names__"])[category]: value / total
|
||||
for category, value in numbered_probs.items()
|
||||
probs = {
|
||||
model["__names__"][category]: value
|
||||
for category, value in probs.items()
|
||||
}
|
||||
total = sum(probs.values())
|
||||
probs = {category: value / total for category, value in probs.items()}
|
||||
return probs
|
||||
|
||||
def classify(self, text: str) -> Union[str, None]:
|
||||
def classify(self, text):
|
||||
"""Classify text.
|
||||
|
||||
Parameters
|
||||
|
@ -86,7 +89,7 @@ class Classifier:
|
|||
category in the model were found.
|
||||
|
||||
"""
|
||||
probs: Dict[str, float] = self.confidence(text)
|
||||
probs = self.confidence(text)
|
||||
try:
|
||||
return sorted(probs.items(), key=lambda x: x[1])[-1][0]
|
||||
except IndexError:
|
||||
|
|
|
@ -1,14 +1,9 @@
|
|||
# SPDX-License-Identifier: LGPL-3.0-or-later
|
||||
|
||||
import gptc.tokenizer
|
||||
from typing import Iterable, Mapping, List, Dict, Union
|
||||
|
||||
WEIGHTS_T = List[int]
|
||||
CONFIG_T = Union[List[str], int, str]
|
||||
MODEL = Dict[str, Union[WEIGHTS_T, CONFIG_T]]
|
||||
|
||||
|
||||
def compile(raw_model: Iterable[Mapping[str, str]], max_ngram_length: int = 1) -> MODEL:
|
||||
def compile(raw_model, max_ngram_length=1):
|
||||
"""Compile a raw model.
|
||||
|
||||
Parameters
|
||||
|
@ -26,7 +21,7 @@ def compile(raw_model: Iterable[Mapping[str, str]], max_ngram_length: int = 1) -
|
|||
|
||||
"""
|
||||
|
||||
categories: Dict[str, List[str]] = {}
|
||||
categories = {}
|
||||
|
||||
for portion in raw_model:
|
||||
text = gptc.tokenizer.tokenize(portion["text"], max_ngram_length)
|
||||
|
@ -36,7 +31,7 @@ def compile(raw_model: Iterable[Mapping[str, str]], max_ngram_length: int = 1) -
|
|||
except KeyError:
|
||||
categories[category] = text
|
||||
|
||||
categories_by_count: Dict[str, Dict[str, float]] = {}
|
||||
categories_by_count = {}
|
||||
|
||||
names = []
|
||||
|
||||
|
@ -47,10 +42,14 @@ def compile(raw_model: Iterable[Mapping[str, str]], max_ngram_length: int = 1) -
|
|||
categories_by_count[category] = {}
|
||||
for word in text:
|
||||
try:
|
||||
categories_by_count[category][word] += 1 / len(categories[category])
|
||||
categories_by_count[category][word] += 1 / len(
|
||||
categories[category]
|
||||
)
|
||||
except KeyError:
|
||||
categories_by_count[category][word] = 1 / len(categories[category])
|
||||
word_weights: Dict[str, Dict[str, float]] = {}
|
||||
categories_by_count[category][word] = 1 / len(
|
||||
categories[category]
|
||||
)
|
||||
word_weights = {}
|
||||
for category, words in categories_by_count.items():
|
||||
for word, value in words.items():
|
||||
try:
|
||||
|
@ -58,13 +57,14 @@ def compile(raw_model: Iterable[Mapping[str, str]], max_ngram_length: int = 1) -
|
|||
except KeyError:
|
||||
word_weights[word] = {category: value}
|
||||
|
||||
model: MODEL = {}
|
||||
model = {}
|
||||
for word, weights in word_weights.items():
|
||||
total = sum(weights.values())
|
||||
new_weights: List[int] = []
|
||||
model[word] = []
|
||||
for category in names:
|
||||
new_weights.append(round((weights.get(category, 0) / total) * 65535))
|
||||
model[word] = new_weights
|
||||
model[word].append(
|
||||
round((weights.get(category, 0) / total) * 65535)
|
||||
)
|
||||
|
||||
model["__names__"] = names
|
||||
model["__ngrams__"] = max_ngram_length
|
||||
|
|
|
@ -1,37 +1,13 @@
|
|||
# SPDX-License-Identifier: LGPL-3.0-or-later
|
||||
|
||||
from typing import List, Union
|
||||
|
||||
try:
|
||||
import emoji
|
||||
|
||||
has_emoji = True
|
||||
except ImportError:
|
||||
has_emoji = False
|
||||
|
||||
|
||||
def tokenize(text: str, max_ngram_length: int = 1) -> List[str]:
|
||||
def tokenize(text, max_ngram_length=1):
|
||||
"""Convert a string to a list of lemmas."""
|
||||
converted_text: Union[str, List[str]] = text.lower()
|
||||
|
||||
if has_emoji:
|
||||
parts = []
|
||||
highest_end = 0
|
||||
for emoji_part in emoji.emoji_list(text):
|
||||
parts += list(text[highest_end : emoji_part["match_start"]])
|
||||
parts.append(emoji_part["emoji"])
|
||||
highest_end = emoji_part["match_end"]
|
||||
parts += list(text[highest_end:])
|
||||
converted_text = [part for part in parts if part]
|
||||
|
||||
tokens = [""]
|
||||
|
||||
for char in converted_text:
|
||||
for char in text.lower():
|
||||
if char.isalpha() or char == "'":
|
||||
tokens[-1] += char
|
||||
elif has_emoji and emoji.is_emoji(char):
|
||||
tokens.append(char)
|
||||
tokens.append("")
|
||||
elif tokens[-1] != "":
|
||||
tokens.append("")
|
||||
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
# SPDX-License-Identifier: LGPL-3.0-or-later
|
||||
|
||||
import math
|
||||
from typing import Sequence, Union, Tuple, List
|
||||
|
||||
|
||||
def _mean(numbers: Sequence[float]) -> float:
|
||||
def _mean(numbers):
|
||||
"""Calculate the mean of a group of numbers
|
||||
|
||||
Parameters
|
||||
|
@ -20,7 +19,7 @@ def _mean(numbers: Sequence[float]) -> float:
|
|||
return sum(numbers) / len(numbers)
|
||||
|
||||
|
||||
def _standard_deviation(numbers: Sequence[float]) -> float:
|
||||
def _standard_deviation(numbers):
|
||||
"""Calculate the standard deviation of a group of numbers
|
||||
|
||||
Parameters
|
||||
|
@ -39,8 +38,8 @@ def _standard_deviation(numbers: Sequence[float]) -> float:
|
|||
return math.sqrt(_mean(squared_deviations))
|
||||
|
||||
|
||||
def weight(numbers: Sequence[float]) -> List[float]:
|
||||
def weight(numbers):
|
||||
standard_deviation = _standard_deviation(numbers)
|
||||
weight = standard_deviation * 2
|
||||
weighted_numbers = [i * weight for i in numbers]
|
||||
return weighted_numbers
|
||||
return weight, weighted_numbers
|
||||
|
|
Loading…
Reference in New Issue
Block a user