├── FuzzyTM
├── analysis
│ ├── __init__.py
│ └── topic_specificity.py
├── __pycache__
│ ├── FuzzyTM.cpython-38.pyc
│ ├── __init__.cpython-310.pyc
│ └── __init__.cpython-38.pyc
├── __init__.py
└── FuzzyTM.py
├── FuzzyTM.egg-info
├── dependency_links.txt
├── top_level.txt
├── requires.txt
├── SOURCES.txt
└── PKG-INFO
├── dist
├── FuzzyTM-1.0.0.tar.gz
├── FuzzyTM-2.0.1.tar.gz
├── FuzzyTM-2.0.2.tar.gz
├── FuzzyTM-2.0.3.tar.gz
├── FuzzyTM-2.0.4.tar.gz
├── FuzzyTM-2.0.5.tar.gz
├── FuzzyTM-2.0.6.tar.gz
├── FuzzyTM-2.0.7.tar.gz
├── FuzzyTM-2.0.8.tar.gz
├── FuzzyTM-1.0.0-py3-none-any.whl
├── FuzzyTM-2.0.1-py3-none-any.whl
├── FuzzyTM-2.0.2-py3-none-any.whl
├── FuzzyTM-2.0.3-py3-none-any.whl
├── FuzzyTM-2.0.4-py3-none-any.whl
├── FuzzyTM-2.0.5-py3-none-any.whl
├── FuzzyTM-2.0.6-py3-none-any.whl
├── FuzzyTM-2.0.7-py3-none-any.whl
└── FuzzyTM-2.0.8-py3-none-any.whl
├── setup.py
├── .github
└── workflows
│ └── python-publish.yml
├── README.md
└── LICENSE
/FuzzyTM/analysis/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/FuzzyTM.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/FuzzyTM.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | FuzzyTM
2 |
--------------------------------------------------------------------------------
/FuzzyTM.egg-info/requires.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | pandas
3 | scipy
4 | pyfume
5 |
--------------------------------------------------------------------------------
/dist/FuzzyTM-1.0.0.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-1.0.0.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.1.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.1.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.2.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.2.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.3.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.3.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.4.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.4.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.5.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.5.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.6.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.6.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.7.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.7.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.8.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.8.tar.gz
--------------------------------------------------------------------------------
/dist/FuzzyTM-1.0.0-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-1.0.0-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.1-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.1-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.2-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.2-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.3-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.3-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.4-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.4-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.5-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.5-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.6-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.6-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.7-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.7-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/FuzzyTM-2.0.8-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/dist/FuzzyTM-2.0.8-py3-none-any.whl
--------------------------------------------------------------------------------
/FuzzyTM/__pycache__/FuzzyTM.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/FuzzyTM/__pycache__/FuzzyTM.cpython-38.pyc
--------------------------------------------------------------------------------
/FuzzyTM/__pycache__/__init__.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/FuzzyTM/__pycache__/__init__.cpython-310.pyc
--------------------------------------------------------------------------------
/FuzzyTM/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ERijck/FuzzyTM/HEAD/FuzzyTM/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/FuzzyTM/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Dec 29 00:10:35 2021
4 |
5 | @author: 20200016
6 | """
7 |
8 | from .FuzzyTM import FuzzyTM, FLSA, FLSA_W, FLSA_V, FLSA_E
9 |
--------------------------------------------------------------------------------
/FuzzyTM.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | LICENSE
2 | README.md
3 | setup.py
4 | FuzzyTM/FuzzyTM.py
5 | FuzzyTM/__init__.py
6 | FuzzyTM.egg-info/PKG-INFO
7 | FuzzyTM.egg-info/SOURCES.txt
8 | FuzzyTM.egg-info/dependency_links.txt
9 | FuzzyTM.egg-info/requires.txt
10 | FuzzyTM.egg-info/top_level.txt
11 | FuzzyTM/analysis/__init__.py
12 | FuzzyTM/analysis/topic_specificity.py
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import pathlib
4 | from setuptools import setup, find_packages
5 |
6 | HERE = pathlib.Path(__file__).parent
7 |
8 | VERSION = '2.0.9'
9 | PACKAGE_NAME = 'FuzzyTM'
10 | AUTHOR = 'Emil Rijcken'
11 | AUTHOR_EMAIL = 'emil.rijcken@gmail.com'
12 | URL = 'https://github.com/ERijck/FuzzyTM'
13 |
14 | LICENSE = 'GNU General Public License v3.0'
15 | DESCRIPTION = 'A Python package for Fuzzy Topic Models'
16 | LONG_DESCRIPTION = (HERE / "README.md").read_text()
17 | LONG_DESC_TYPE = "text/markdown"
18 |
19 | INSTALL_REQUIRES = [
20 | 'numpy',
21 | 'pandas',
22 | 'scipy',
23 | 'pyfume',
24 | ]
25 |
26 | setup(name=PACKAGE_NAME,
27 | version=VERSION,
28 | description=DESCRIPTION,
29 | long_description=LONG_DESCRIPTION,
30 | long_description_content_type=LONG_DESC_TYPE,
31 | author=AUTHOR,
32 | license=LICENSE,
33 | author_email=AUTHOR_EMAIL,
34 | url=URL,
35 | install_requires=INSTALL_REQUIRES,
36 | packages=find_packages()
37 | )
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - uses: actions/checkout@v3
25 | - name: Set up Python
26 | uses: actions/setup-python@v3
27 | with:
28 | python-version: '3.x'
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install build
33 | - name: Build package
34 | run: python -m build
35 | - name: Publish package
36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
37 | with:
38 | user: __token__
39 | password: ${{ secrets.PYPI_API_TOKEN }}
40 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Fuzzy Topic Modeling - methods derived from Fuzzy Latent Semantic Analysis
2 | This is the Python code to train Fuzzy Latent Semantic Analysis-based topic models. The details of the original FLSA model can be found [here](https://link.springer.com/article/10.1007/s40815-017-0327-9). With my group, we have formulated two alternative topic modeling algorithms 'FLSA-W' and 'FLSA-V' , which are derived from FLSA. Once the paper is published (it has been accepted), we will place a link here too.
3 |
4 | ## Table of contents
5 | 1. Introduction to Topic Modeling
6 | 2. Explanation algorithms
7 | 3. Getting started
8 | * FLSA & FLSA-W
9 | * FLSA-W
10 | * Instructions to get map_file from Vosviewer
11 | 4. Class methods
12 | 5. Dependencies
13 |
14 | ## Introduction to Topic Modeling
15 | Topic modeling is a popular task within the domain of Natural Language Processing (NLP). Topic modeling is a type of statistical modeling for discovering the latent 'topics' occuring in a collection of documents. While humans typically describe the topic of something by a single word, topic modeling algorithms describe topics as a probability distribution over words.
16 |
17 | Various topic modeling algorithms exist, and one thing they have in common is that they all output two matrices:
18 | 1. Probability of a word given a topic. This is a *M x C (vocabulary size x number of topics)* matrix.
19 | 2. Probability of a topic given a document. This is a *C x N (number of topics x number of documents)* matrix.
20 |
21 | From the first matrix, the top *n* words per topic are taken to represent that topic.
22 |
23 | On top of finding the latent topics in a text, topic models can also be used for more expainable text classification. In that case, documents can be represented as a *'topic embedding'*; a *c*-length vector in which each cell represents a topic and contains a number that indicates the extend of which a topic is represented in the document. These topic embeddings can then be fed to machine learning classification models. Some machine learning classification models can show the weights they assigned to the input variables, based on which they made their decisions. The idea is that if the topics are interpretable, then the weights assigned to the topics reveal why a model made its decisions.
24 |
25 | ## Explanation algorithms
26 | The general approach to the algorithm(s) can be explained as follows:
27 |
28 | 1. Create a local term matrix. This is a *N x M (number of documents x vocabulary size)* matrix that gives the count of each word *i* in document *j*.
29 | 2. Create a global term matrix in which the words from different documents are also related to each other (the four options for weighting in the class are: 'normal', 'entropy','idf','probidf').
30 | 3. Project the data in a lower dimensional space (we use singular value decomposition).
31 | 4. Use fuzzy clustering to get the partition matrix.
32 | 5. Use Bayes' Theorem and matrix multiplication to get the needed matrices.
33 |
34 | ### FLSA
35 | The original FLSA approach aims to find clusters in the projected space of documents.
36 |
37 | ### FLSA-W
38 | Documents might contain multiple topics, making them difficult to cluster. Therefore, it might makes more sense to cluster on words instead of documents. That is what what we do with FLSA-W(ords).
39 |
40 | ### FLSA-E
41 | Trains a Word2Vec word embedding from the corpus. Then clusters in this embedding space to find topics.
42 |
43 | ### FLSA-V
44 | FLSA-W clusters on a projected space of words and implicitly assumes that the projections ensure that related words are located nearby each other. However, there is no optimization algorithm that ensures this is the case. With FLSA-V(os), we use the output from [Vosviewer](https://www.vosviewer.com/) as input to our model. Vosviewer is an open-source software tool used for bibliographic mapping that optimizes its projections such that related words are located nearby each other. Using Vosviewer's output, FLSA-V's calculations start with step 4 (yet, step 1 is used for calculating some probabilities).
45 |
46 |
47 |
48 | ## Getting started
49 | Many parameters have default settings, so that the algorithms can be called only setting the following two variables:
50 | * **input_file**, The data on which you want to train the topic model.
51 | * *Format*: list of lists of tokens.
52 | * *Example*: [['this','is','the','first','document'],['why','am','i','stuck','in','the','middle'],['save','the','best','for','last']].
53 |
54 | * **num_topics**, The number of topics you want the topic model to find.
55 | * *Format*: int (greater than zero).
56 | * *Example*: 15.
57 |
58 | Suppose, your data (list of lists of strings) is called `data` and you want to run a topic model with 10 topics. Run the following code to get the two matrices:
59 |
60 | flsa_model = FLSA(input_file = data, num_topics = 10)
61 | prob_word_given_topic, prob_topic_given_document = flsa_model.get_matrices()
62 |
63 | To see the words and probabilities corresponding to each topic, run:
64 |
65 | flsa_model.show_topics()
66 |
67 | Below is a description of the other parameters per algorithm.
68 |
69 | ### FLSA & FLSA-W
70 | * *num_words*, The number of words (top-*n*) per topic used to represent that topic.
71 | * *Format*: int (greater than zero).
72 | * *Default value*: 20
73 |
74 | * *word_weighting*, The method used for global term weighting (as describes in step 2 of the algorithm)
75 | * *Format*: str (choose between: 'entropy', 'idf', 'normal', 'probidf').
76 | * *Default value*: 'normal'
77 |
78 | * *cluster_method*, The (fuzzy) cluster method to be used.
79 | * *Format*: str (choose between: 'fcm', 'gk', 'fst-pso').
80 | * *Default value*: 'fcm'
81 |
82 | * *svd_factors*, The number of dimensions to project the data into.
83 | * *Format*: int (greater than zero).
84 | * *Default value*: 2.
85 |
86 | ### FLSA-V
87 | * *map_file*, The output file from Vosviewer.
88 | * *Format*: pd.DataFrame (The Dataframe needs to contain the following columns: '*id*','*x*','*y*')
89 | * *Example*:
90 |
91 | | id | x | y |
92 | | --- | --- | --- |
93 | | word_one | -0.4626 | 0.8213 |
94 | | word_two | 0.6318 | -0.2331 |
95 | | ... | ... | ... |
96 | | word_M | 0.9826 | 0.184 |
97 |
98 |
99 | * *num_words*, The number of words (top-*n*) per topic used to represent that topic.
100 | * *Format*: int (greater than zero).
101 | * *Default value*: 20
102 |
103 | * *cluster_method*, The (fuzzy) cluster method to be used.
104 | * *Format*: str (choose between: 'fcm', 'gk', 'fst-pso').
105 | * *Default value*: 'fcm'
106 |
107 | #### Instructions to get map_file from Vosviewer
108 |
109 | 1. Create a tab-separated file from your dataset in which you show for each word how often it appears with each other word.
110 | Format: *Word_1* \ *Word_2* \ Frequency.
111 | (Since this quickly leads to an unproccesable number of combinations, we recommend using only the words that appear in at least *x* documents; we used 100).
112 | 2. [Download Vosviewer](https://www.vosviewer.com/download).
113 | 3. *Vosviewer* \> *Create* \> *Create a map based on text data* \> *Read data from VOSviewer files*
114 | Under 'VOSviewer corpus file (required)' submit your .txt file from step 1 and click 'finish'.
115 | 4. The exported file is a tab-separated file, and can be loaded into Python as follows:
116 | Suppose the file is called `map_file.txt`:
117 | `map_file = pd.read_csv('/map_file.txt', delimiter = "\t")`
118 | 5. Please check the [Vosviewer manual](https://www.vosviewer.com/documentation/Manual_VOSviewer_1.6.8.pdf) for more information.
119 |
120 | ## Class Methods
121 |
122 | ## Dependencies
123 | numpy == 1.19.2
124 | pandas == 1.3.3
125 | scipy == 1.5.2
126 | pyfume == 0.2.0
127 |
--------------------------------------------------------------------------------
/FuzzyTM.egg-info/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 2.1
2 | Name: FuzzyTM
3 | Version: 2.0.8
4 | Summary: A Python package for Fuzzy Topic Models
5 | Home-page: https://github.com/ERijck/FuzzyTM
6 | Author: Emil Rijcken
7 | Author-email: emil.rijcken@gmail.com
8 | License: GNU General Public License v3.0
9 | Platform: UNKNOWN
10 | Description-Content-Type: text/markdown
11 | License-File: LICENSE
12 |
13 | # Fuzzy Topic Modeling - methods derived from Fuzzy Latent Semantic Analysis
14 | This is the Python code to train Fuzzy Latent Semantic Analysis-based topic models. The details of the original FLSA model can be found [here](https://link.springer.com/article/10.1007/s40815-017-0327-9). With my group, we have formulated two alternative topic modeling algorithms 'FLSA-W' and 'FLSA-V' , which are derived from FLSA. Once the paper is published (it has been accepted), we will place a link here too.
15 |
16 | ## Table of contents
17 | 1. Introduction to Topic Modeling
18 | 2. Explanation algorithms
19 | 3. Getting started
20 | * FLSA & FLSA-W
21 | * FLSA-W
22 | * Instructions to get map_file from Vosviewer
23 | 4. Class methods
24 | 5. Dependencies
25 |
26 | ## Introduction to Topic Modeling
27 | Topic modeling is a popular task within the domain of Natural Language Processing (NLP). Topic modeling is a type of statistical modeling for discovering the latent 'topics' occuring in a collection of documents. While humans typically describe the topic of something by a single word, topic modeling algorithms describe topics as a probability distribution over words.
28 |
29 | Various topic modeling algorithms exist, and one thing they have in common is that they all output two matrices:
30 | 1. Probability of a word given a topic. This is a *M x C (vocabulary size x number of topics)* matrix.
31 | 2. Probability of a topic given a document. This is a *C x N (number of topics x number of documents)* matrix.
32 |
33 | From the first matrix, the top *n* words per topic are taken to represent that topic.
34 |
35 | On top of finding the latent topics in a text, topic models can also be used for more expainable text classification. In that case, documents can be represented as a *'topic embedding'*; a *c*-length vector in which each cell represents a topic and contains a number that indicates the extend of which a topic is represented in the document. These topic embeddings can then be fed to machine learning classification models. Some machine learning classification models can show the weights they assigned to the input variables, based on which they made their decisions. The idea is that if the topics are interpretable, then the weights assigned to the topics reveal why a model made its decisions.
36 |
37 | ## Explanation algorithms
38 | The general approach to the algorithm(s) can be explained as follows:
39 |
40 | 1. Create a local term matrix. This is a *N x M (number of documents x vocabulary size)* matrix that gives the count of each word *i* in document *j*.
41 | 2. Create a global term matrix in which the words from different documents are also related to each other (the four options for weighting in the class are: 'normal', 'entropy','idf','probidf').
42 | 3. Project the data in a lower dimensional space (we use singular value decomposition).
43 | 4. Use fuzzy clustering to get the partition matrix.
44 | 5. Use Bayes' Theorem and matrix multiplication to get the needed matrices.
45 |
46 | ### FLSA
47 | The original FLSA approach aims to find clusters in the projected space of documents.
48 |
49 | ### FLSA-W
50 | Documents might contain multiple topics, making them difficult to cluster. Therefore, it might makes more sense to cluster on words instead of documents. That is what what we do with FLSA-W(ords).
51 |
52 | ### FLSA-E
53 | Trains a Word2Vec word embedding from the corpus. Then clusters in this embedding space to find topics.
54 |
55 | ### FLSA-V
56 | FLSA-W clusters on a projected space of words and implicitly assumes that the projections ensure that related words are located nearby each other. However, there is no optimization algorithm that ensures this is the case. With FLSA-V(os), we use the output from [Vosviewer](https://www.vosviewer.com/) as input to our model. Vosviewer is an open-source software tool used for bibliographic mapping that optimizes its projections such that related words are located nearby each other. Using Vosviewer's output, FLSA-V's calculations start with step 4 (yet, step 1 is used for calculating some probabilities).
57 |
58 |
59 |
60 | ## Getting started
61 | Many parameters have default settings, so that the algorithms can be called only setting the following two variables:
62 | * **input_file**, The data on which you want to train the topic model.
63 | * *Format*: list of lists of tokens.
64 | * *Example*: [['this','is','the','first','document'],['why','am','i','stuck','in','the','middle'],['save','the','best','for','last']].
65 |
66 | * **num_topics**, The number of topics you want the topic model to find.
67 | * *Format*: int (greater than zero).
68 | * *Example*: 15.
69 |
70 | Suppose, your data (list of lists of strings) is called `data` and you want to run a topic model with 10 topics. Run the following code to get the two matrices:
71 |
72 | flsa_model = FLSA(input_file = data, num_topics = 10)
73 | prob_word_given_topic, prob_topic_given_document = flsa_model.get_matrices()
74 |
75 | To see the words and probabilities corresponding to each topic, run:
76 |
77 | flsa_model.show_topics()
78 |
79 | Below is a description of the other parameters per algorithm.
80 |
81 | ### FLSA & FLSA-W
82 | * *num_words*, The number of words (top-*n*) per topic used to represent that topic.
83 | * *Format*: int (greater than zero).
84 | * *Default value*: 20
85 |
86 | * *word_weighting*, The method used for global term weighting (as describes in step 2 of the algorithm)
87 | * *Format*: str (choose between: 'entropy', 'idf', 'normal', 'probidf').
88 | * *Default value*: 'normal'
89 |
90 | * *cluster_method*, The (fuzzy) cluster method to be used.
91 | * *Format*: str (choose between: 'fcm', 'gk', 'fst-pso').
92 | * *Default value*: 'fcm'
93 |
94 | * *svd_factors*, The number of dimensions to project the data into.
95 | * *Format*: int (greater than zero).
96 | * *Default value*: 2.
97 |
98 | ### FLSA-V
99 | * *map_file*, The output file from Vosviewer.
100 | * *Format*: pd.DataFrame (The Dataframe needs to contain the following columns: '*id*','*x*','*y*')
101 | * *Example*:
102 |
103 | | id | x | y |
104 | | --- | --- | --- |
105 | | word_one | -0.4626 | 0.8213 |
106 | | word_two | 0.6318 | -0.2331 |
107 | | ... | ... | ... |
108 | | word_M | 0.9826 | 0.184 |
109 |
110 |
111 | * *num_words*, The number of words (top-*n*) per topic used to represent that topic.
112 | * *Format*: int (greater than zero).
113 | * *Default value*: 20
114 |
115 | * *cluster_method*, The (fuzzy) cluster method to be used.
116 | * *Format*: str (choose between: 'fcm', 'gk', 'fst-pso').
117 | * *Default value*: 'fcm'
118 |
119 | #### Instructions to get map_file from Vosviewer
120 |
121 | 1. Create a tab-separated file from your dataset in which you show for each word how often it appears with each other word.
122 | Format: *Word_1* \ *Word_2* \ Frequency.
123 | (Since this quickly leads to an unproccesable number of combinations, we recommend using only the words that appear in at least *x* documents; we used 100).
124 | 2. [Download Vosviewer](https://www.vosviewer.com/download).
125 | 3. *Vosviewer* \> *Create* \> *Create a map based on text data* \> *Read data from VOSviewer files*
126 | Under 'VOSviewer corpus file (required)' submit your .txt file from step 1 and click 'finish'.
127 | 4. The exported file is a tab-separated file, and can be loaded into Python as follows:
128 | Suppose the file is called `map_file.txt`:
129 | `map_file = pd.read_csv('/map_file.txt', delimiter = "\t")`
130 | 5. Please check the [Vosviewer manual](https://www.vosviewer.com/documentation/Manual_VOSviewer_1.6.8.pdf) for more information.
131 |
132 | ## Class Methods
133 |
134 | ## Dependencies
135 | numpy == 1.19.2
136 | pandas == 1.3.3
137 | scipy == 1.5.2
138 | pyfume == 0.2.0
139 |
140 |
141 |
--------------------------------------------------------------------------------
/FuzzyTM/analysis/topic_specificity.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Thu Jun 3 16:05:32 2021
4 |
5 | @author: 20200016
6 | """
7 |
8 | from collections import Counter
9 | import statistics
10 |
11 | class TopicSpecificity:
12 | """
13 | A class to calculate specificity measures of words within given topics.
14 |
15 | Attributes:
16 | topics (list): A list of topics, each represented by a list of words or tuples.
17 | data (list): A list of documents with word frequencies.
18 | word_dict (Counter): A counter mapping words to their frequencies in `data`.
19 | sorted_word_count (list): A list of tuples with word frequencies sorted in descending order.
20 | sorted_counts (list): A list of word counts sorted in descending order.
21 | num_words (int): The number of words in a topic.
22 | """
23 | def __init__(
24 | self,
25 | topics,
26 | data,
27 | ):
28 | assert isinstance(topics, list), "'topics' is not a list."
29 | assert isinstance(data, list), "'data' is not a list."
30 | if isinstance(topics[0], tuple):
31 | self.topics = self.__transform_lda_output_to_topic_list__(topics)
32 | else:
33 | self.topics = topics
34 | self.data = data
35 | self.word_dict, self.sorted_word_count = self._sorted_word_count__(data)
36 | self.sorted_counts = [tup[1] for tup in self.sorted_word_count]
37 | self.num_words = len(self.topics[0])
38 |
39 |
40 | def specificity_word(
41 | self, word, relative = True
42 | ):
43 | """
44 | Calculate the specificity of a word either relative to the total word count or as an absolute rank.
45 |
46 | Parameters:
47 | word (str): The word to calculate specificity for.
48 | relative (bool): If True, calculate specificity relative to the total word count.
49 |
50 | Returns:
51 | float: The relative specificity if `relative` is True; otherwise, the absolute rank.
52 | """
53 |
54 | if relative:
55 | return (self.sorted_counts.index(self.word_dict[word])+1)/len(self.sorted_counts)
56 | else:
57 | return (self.sorted_counts.index(self.word_dict[word])+1)
58 |
59 |
60 | def word_rank(
61 | self, word
62 | ):
63 | """
64 | Get the rank of a word based on its frequency.
65 |
66 | Parameters:
67 | word (str): The word to get the rank for.
68 |
69 | Returns:
70 | int: The rank of the word.
71 | """
72 |
73 | return next(i for i, (w, *_) in enumerate(self.sorted_word_count) if w == word)
74 |
75 |
76 | def word_frequency(
77 | self, word
78 | ):
79 | """
80 | Get the frequency of a word in the data.
81 |
82 | Parameters:
83 | word (str): The word to get the frequency for.
84 |
85 | Returns:
86 | int: The frequency of the word.
87 | """
88 |
89 | return next(i for (w,i) in self.sorted_word_count if w == word)
90 |
91 |
92 | def average_rank(self):
93 | """
94 | Calculate the average rank of all words.
95 |
96 | Returns:
97 | float: The average rank of the words.
98 | """
99 | count = 0
100 | for tup in self.sorted_word_count:
101 | count += tup[1]
102 |
103 | total_count = 0
104 | idx = 0
105 |
106 | while total_count < count/2:
107 | total_count+=self.sorted_word_count[idx][1]
108 | idx+=1
109 | return idx
110 |
111 |
112 | def average_frequency(self):
113 | """
114 | Calculate the average frequency of all words.
115 |
116 | Returns:
117 | float: The average frequency of the words.
118 | """
119 | return self.sorted_word_count[self.average_rank()][1]
120 |
121 |
122 | def topic_specificities_one_topic(
123 | self, topic_index, relative = True
124 | ):
125 | """
126 | Calculate the specificities of words in one topic.
127 |
128 | Parameters:
129 | topic_index (int): The index of the topic to calculate specificities for.
130 | relative (bool): If True, calculate specificities relative to the total word count.
131 |
132 | Returns:
133 | list: A list of specificities for the words in the topic.
134 | """
135 |
136 | word_specificities = []
137 |
138 | for word in self.topics[topic_index]:
139 | word_specificities.append(self.specificity_word(word, relative))
140 |
141 | return word_specificities
142 |
143 |
144 | def mean_topic_specificity_one_topic(
145 | self, topic_index, relative = True
146 | ):
147 | """
148 | Calculate the mean specificity of words in one topic.
149 |
150 | Parameters:
151 | topic_index (int): The index of the topic to calculate mean specificity for.
152 | relative (bool): If True, calculate mean specificity relative to the total word count.
153 |
154 | Returns:
155 | float: The mean specificity of the words in the topic.
156 | """
157 | specificities = self.topic_specificities_one_topic(topic_index, relative = relative)
158 | return sum(specificities)/len(specificities)
159 |
160 |
161 | def standard_deviation_topic_specificity_one_topic(
162 | self, topic_index, relative = True
163 | ):
164 | """
165 | Calculate the standard deviation of specificities for words in one topic.
166 |
167 | Parameters:
168 | idx (int): The index of the topic.
169 | relative (bool): If True, calculate specificities relative to the total word count.
170 |
171 | Returns:
172 | float: The standard deviation of specificities for the topic.
173 | """
174 | specificities = self.topic_specificities_one_topic(topic_index, relative = relative)
175 | return statistics.stdev(specificities)
176 |
177 |
178 | def topic_specificities_all_topics(
179 | self, relative = True
180 | ):
181 | """
182 | Calculate the specificities of all topics.
183 |
184 | Parameters:
185 | relative (bool): If True, calculate specificities relative to the total word count.
186 |
187 | Returns:
188 | list: A list containing specificities for all topics.
189 | """
190 | topic_list = []
191 | for i in range(len(self.topics)):
192 | topic_list.append(self.topic_specificities_one_topic(i, relative = True))
193 | return topic_list
194 |
195 |
196 | def mean_topic_specificity_all_topics(
197 | self, relative = True
198 | ):
199 | """
200 | Calculate the mean specificity across all topics.
201 |
202 | Parameters:
203 | relative (bool): If True, calculate mean specificity relative to the total word count.
204 |
205 | Returns:
206 | float: The mean specificity across all topics.
207 | """
208 | topic_specificities = self.topic_specificities_all_topics(relative = relative)
209 | count = 0
210 | total_words = 0
211 |
212 | for topic in topic_specificities:
213 | count += sum(topic)
214 | total_words += len(topic)
215 | return count/total_words
216 |
217 |
218 | def __transform_lda_output_to_topic_list__(
219 | self, topics
220 | ):
221 | """
222 | Private method to transform LDA output to a standard topic list.
223 |
224 | Parameters:
225 | topics (list): LDA output to be transformed.
226 |
227 | Returns:
228 | list: A list of topics with each topic represented as a list of words.
229 | """
230 | all_topics = []
231 | for i, top in enumerate(topics):
232 | topic_list = []
233 | for word in top[1].split('+'):
234 | topic_list.append(word.split('"')[1])
235 | all_topics.append(topic_list)
236 | return all_topics
237 |
238 |
239 | def _sorted_word_count__(
240 | self, data
241 | ):
242 | """
243 | Private method to get a sorted word count from the provided data.
244 |
245 | Parameters:
246 | data (list): The data to count words from.
247 |
248 | Returns:
249 | tuple: A counter of words and a list of word-count tuples sorted by frequency.
250 | """
251 | word_dict = Counter()
252 | for doc in data:
253 | word_dict.update(doc)
254 | return word_dict, word_dict.most_common()
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 2, June 1991
3 |
4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6 | Everyone is permitted to copy and distribute verbatim copies
7 | of this license document, but changing it is not allowed.
8 |
9 | Preamble
10 |
11 | The licenses for most software are designed to take away your
12 | freedom to share and change it. By contrast, the GNU General Public
13 | License is intended to guarantee your freedom to share and change free
14 | software--to make sure the software is free for all its users. This
15 | General Public License applies to most of the Free Software
16 | Foundation's software and to any other program whose authors commit to
17 | using it. (Some other Free Software Foundation software is covered by
18 | the GNU Lesser General Public License instead.) You can apply it to
19 | your programs, too.
20 |
21 | When we speak of free software, we are referring to freedom, not
22 | price. Our General Public Licenses are designed to make sure that you
23 | have the freedom to distribute copies of free software (and charge for
24 | this service if you wish), that you receive source code or can get it
25 | if you want it, that you can change the software or use pieces of it
26 | in new free programs; and that you know you can do these things.
27 |
28 | To protect your rights, we need to make restrictions that forbid
29 | anyone to deny you these rights or to ask you to surrender the rights.
30 | These restrictions translate to certain responsibilities for you if you
31 | distribute copies of the software, or if you modify it.
32 |
33 | For example, if you distribute copies of such a program, whether
34 | gratis or for a fee, you must give the recipients all the rights that
35 | you have. You must make sure that they, too, receive or can get the
36 | source code. And you must show them these terms so they know their
37 | rights.
38 |
39 | We protect your rights with two steps: (1) copyright the software, and
40 | (2) offer you this license which gives you legal permission to copy,
41 | distribute and/or modify the software.
42 |
43 | Also, for each author's protection and ours, we want to make certain
44 | that everyone understands that there is no warranty for this free
45 | software. If the software is modified by someone else and passed on, we
46 | want its recipients to know that what they have is not the original, so
47 | that any problems introduced by others will not reflect on the original
48 | authors' reputations.
49 |
50 | Finally, any free program is threatened constantly by software
51 | patents. We wish to avoid the danger that redistributors of a free
52 | program will individually obtain patent licenses, in effect making the
53 | program proprietary. To prevent this, we have made it clear that any
54 | patent must be licensed for everyone's free use or not licensed at all.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | GNU GENERAL PUBLIC LICENSE
60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
61 |
62 | 0. This License applies to any program or other work which contains
63 | a notice placed by the copyright holder saying it may be distributed
64 | under the terms of this General Public License. The "Program", below,
65 | refers to any such program or work, and a "work based on the Program"
66 | means either the Program or any derivative work under copyright law:
67 | that is to say, a work containing the Program or a portion of it,
68 | either verbatim or with modifications and/or translated into another
69 | language. (Hereinafter, translation is included without limitation in
70 | the term "modification".) Each licensee is addressed as "you".
71 |
72 | Activities other than copying, distribution and modification are not
73 | covered by this License; they are outside its scope. The act of
74 | running the Program is not restricted, and the output from the Program
75 | is covered only if its contents constitute a work based on the
76 | Program (independent of having been made by running the Program).
77 | Whether that is true depends on what the Program does.
78 |
79 | 1. You may copy and distribute verbatim copies of the Program's
80 | source code as you receive it, in any medium, provided that you
81 | conspicuously and appropriately publish on each copy an appropriate
82 | copyright notice and disclaimer of warranty; keep intact all the
83 | notices that refer to this License and to the absence of any warranty;
84 | and give any other recipients of the Program a copy of this License
85 | along with the Program.
86 |
87 | You may charge a fee for the physical act of transferring a copy, and
88 | you may at your option offer warranty protection in exchange for a fee.
89 |
90 | 2. You may modify your copy or copies of the Program or any portion
91 | of it, thus forming a work based on the Program, and copy and
92 | distribute such modifications or work under the terms of Section 1
93 | above, provided that you also meet all of these conditions:
94 |
95 | a) You must cause the modified files to carry prominent notices
96 | stating that you changed the files and the date of any change.
97 |
98 | b) You must cause any work that you distribute or publish, that in
99 | whole or in part contains or is derived from the Program or any
100 | part thereof, to be licensed as a whole at no charge to all third
101 | parties under the terms of this License.
102 |
103 | c) If the modified program normally reads commands interactively
104 | when run, you must cause it, when started running for such
105 | interactive use in the most ordinary way, to print or display an
106 | announcement including an appropriate copyright notice and a
107 | notice that there is no warranty (or else, saying that you provide
108 | a warranty) and that users may redistribute the program under
109 | these conditions, and telling the user how to view a copy of this
110 | License. (Exception: if the Program itself is interactive but
111 | does not normally print such an announcement, your work based on
112 | the Program is not required to print an announcement.)
113 |
114 | These requirements apply to the modified work as a whole. If
115 | identifiable sections of that work are not derived from the Program,
116 | and can be reasonably considered independent and separate works in
117 | themselves, then this License, and its terms, do not apply to those
118 | sections when you distribute them as separate works. But when you
119 | distribute the same sections as part of a whole which is a work based
120 | on the Program, the distribution of the whole must be on the terms of
121 | this License, whose permissions for other licensees extend to the
122 | entire whole, and thus to each and every part regardless of who wrote it.
123 |
124 | Thus, it is not the intent of this section to claim rights or contest
125 | your rights to work written entirely by you; rather, the intent is to
126 | exercise the right to control the distribution of derivative or
127 | collective works based on the Program.
128 |
129 | In addition, mere aggregation of another work not based on the Program
130 | with the Program (or with a work based on the Program) on a volume of
131 | a storage or distribution medium does not bring the other work under
132 | the scope of this License.
133 |
134 | 3. You may copy and distribute the Program (or a work based on it,
135 | under Section 2) in object code or executable form under the terms of
136 | Sections 1 and 2 above provided that you also do one of the following:
137 |
138 | a) Accompany it with the complete corresponding machine-readable
139 | source code, which must be distributed under the terms of Sections
140 | 1 and 2 above on a medium customarily used for software interchange; or,
141 |
142 | b) Accompany it with a written offer, valid for at least three
143 | years, to give any third party, for a charge no more than your
144 | cost of physically performing source distribution, a complete
145 | machine-readable copy of the corresponding source code, to be
146 | distributed under the terms of Sections 1 and 2 above on a medium
147 | customarily used for software interchange; or,
148 |
149 | c) Accompany it with the information you received as to the offer
150 | to distribute corresponding source code. (This alternative is
151 | allowed only for noncommercial distribution and only if you
152 | received the program in object code or executable form with such
153 | an offer, in accord with Subsection b above.)
154 |
155 | The source code for a work means the preferred form of the work for
156 | making modifications to it. For an executable work, complete source
157 | code means all the source code for all modules it contains, plus any
158 | associated interface definition files, plus the scripts used to
159 | control compilation and installation of the executable. However, as a
160 | special exception, the source code distributed need not include
161 | anything that is normally distributed (in either source or binary
162 | form) with the major components (compiler, kernel, and so on) of the
163 | operating system on which the executable runs, unless that component
164 | itself accompanies the executable.
165 |
166 | If distribution of executable or object code is made by offering
167 | access to copy from a designated place, then offering equivalent
168 | access to copy the source code from the same place counts as
169 | distribution of the source code, even though third parties are not
170 | compelled to copy the source along with the object code.
171 |
172 | 4. You may not copy, modify, sublicense, or distribute the Program
173 | except as expressly provided under this License. Any attempt
174 | otherwise to copy, modify, sublicense or distribute the Program is
175 | void, and will automatically terminate your rights under this License.
176 | However, parties who have received copies, or rights, from you under
177 | this License will not have their licenses terminated so long as such
178 | parties remain in full compliance.
179 |
180 | 5. You are not required to accept this License, since you have not
181 | signed it. However, nothing else grants you permission to modify or
182 | distribute the Program or its derivative works. These actions are
183 | prohibited by law if you do not accept this License. Therefore, by
184 | modifying or distributing the Program (or any work based on the
185 | Program), you indicate your acceptance of this License to do so, and
186 | all its terms and conditions for copying, distributing or modifying
187 | the Program or works based on it.
188 |
189 | 6. Each time you redistribute the Program (or any work based on the
190 | Program), the recipient automatically receives a license from the
191 | original licensor to copy, distribute or modify the Program subject to
192 | these terms and conditions. You may not impose any further
193 | restrictions on the recipients' exercise of the rights granted herein.
194 | You are not responsible for enforcing compliance by third parties to
195 | this License.
196 |
197 | 7. If, as a consequence of a court judgment or allegation of patent
198 | infringement or for any other reason (not limited to patent issues),
199 | conditions are imposed on you (whether by court order, agreement or
200 | otherwise) that contradict the conditions of this License, they do not
201 | excuse you from the conditions of this License. If you cannot
202 | distribute so as to satisfy simultaneously your obligations under this
203 | License and any other pertinent obligations, then as a consequence you
204 | may not distribute the Program at all. For example, if a patent
205 | license would not permit royalty-free redistribution of the Program by
206 | all those who receive copies directly or indirectly through you, then
207 | the only way you could satisfy both it and this License would be to
208 | refrain entirely from distribution of the Program.
209 |
210 | If any portion of this section is held invalid or unenforceable under
211 | any particular circumstance, the balance of the section is intended to
212 | apply and the section as a whole is intended to apply in other
213 | circumstances.
214 |
215 | It is not the purpose of this section to induce you to infringe any
216 | patents or other property right claims or to contest validity of any
217 | such claims; this section has the sole purpose of protecting the
218 | integrity of the free software distribution system, which is
219 | implemented by public license practices. Many people have made
220 | generous contributions to the wide range of software distributed
221 | through that system in reliance on consistent application of that
222 | system; it is up to the author/donor to decide if he or she is willing
223 | to distribute software through any other system and a licensee cannot
224 | impose that choice.
225 |
226 | This section is intended to make thoroughly clear what is believed to
227 | be a consequence of the rest of this License.
228 |
229 | 8. If the distribution and/or use of the Program is restricted in
230 | certain countries either by patents or by copyrighted interfaces, the
231 | original copyright holder who places the Program under this License
232 | may add an explicit geographical distribution limitation excluding
233 | those countries, so that distribution is permitted only in or among
234 | countries not thus excluded. In such case, this License incorporates
235 | the limitation as if written in the body of this License.
236 |
237 | 9. The Free Software Foundation may publish revised and/or new versions
238 | of the General Public License from time to time. Such new versions will
239 | be similar in spirit to the present version, but may differ in detail to
240 | address new problems or concerns.
241 |
242 | Each version is given a distinguishing version number. If the Program
243 | specifies a version number of this License which applies to it and "any
244 | later version", you have the option of following the terms and conditions
245 | either of that version or of any later version published by the Free
246 | Software Foundation. If the Program does not specify a version number of
247 | this License, you may choose any version ever published by the Free Software
248 | Foundation.
249 |
250 | 10. If you wish to incorporate parts of the Program into other free
251 | programs whose distribution conditions are different, write to the author
252 | to ask for permission. For software which is copyrighted by the Free
253 | Software Foundation, write to the Free Software Foundation; we sometimes
254 | make exceptions for this. Our decision will be guided by the two goals
255 | of preserving the free status of all derivatives of our free software and
256 | of promoting the sharing and reuse of software generally.
257 |
258 | NO WARRANTY
259 |
260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
268 | REPAIR OR CORRECTION.
269 |
270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
278 | POSSIBILITY OF SUCH DAMAGES.
279 |
280 | END OF TERMS AND CONDITIONS
281 |
282 | How to Apply These Terms to Your New Programs
283 |
284 | If you develop a new program, and you want it to be of the greatest
285 | possible use to the public, the best way to achieve this is to make it
286 | free software which everyone can redistribute and change under these terms.
287 |
288 | To do so, attach the following notices to the program. It is safest
289 | to attach them to the start of each source file to most effectively
290 | convey the exclusion of warranty; and each file should have at least
291 | the "copyright" line and a pointer to where the full notice is found.
292 |
293 |
294 | Copyright (C)
295 |
296 | This program is free software; you can redistribute it and/or modify
297 | it under the terms of the GNU General Public License as published by
298 | the Free Software Foundation; either version 2 of the License, or
299 | (at your option) any later version.
300 |
301 | This program is distributed in the hope that it will be useful,
302 | but WITHOUT ANY WARRANTY; without even the implied warranty of
303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
304 | GNU General Public License for more details.
305 |
306 | You should have received a copy of the GNU General Public License along
307 | with this program; if not, write to the Free Software Foundation, Inc.,
308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
309 |
310 | Also add information on how to contact you by electronic and paper mail.
311 |
312 | If the program is interactive, make it output a short notice like this
313 | when it starts in an interactive mode:
314 |
315 | Gnomovision version 69, Copyright (C) year name of author
316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
317 | This is free software, and you are welcome to redistribute it
318 | under certain conditions; type `show c' for details.
319 |
320 | The hypothetical commands `show w' and `show c' should show the appropriate
321 | parts of the General Public License. Of course, the commands you use may
322 | be called something other than `show w' and `show c'; they could even be
323 | mouse-clicks or menu items--whatever suits your program.
324 |
325 | You should also get your employer (if you work as a programmer) or your
326 | school, if any, to sign a "copyright disclaimer" for the program, if
327 | necessary. Here is a sample; alter the names:
328 |
329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program
330 | `Gnomovision' (which makes passes at compilers) written by James Hacker.
331 |
332 | , 1 April 1989
333 | Ty Coon, President of Vice
334 |
335 | This General Public License does not permit incorporating your program into
336 | proprietary programs. If your program is a subroutine library, you may
337 | consider it more useful to permit linking proprietary applications with the
338 | library. If this is what you want to do, use the GNU Lesser General
339 | Public License instead of this License.
340 |
--------------------------------------------------------------------------------
/FuzzyTM/FuzzyTM.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Oct 15 13:49:08 2021
4 |
5 | @author: Emil Rijcken
6 | """
7 | import math
8 | from collections import Counter
9 | import warnings
10 | import pickle
11 | import numpy as np
12 | import pandas as pd
13 | from scipy.sparse.linalg import svds
14 | from scipy.sparse import dok_matrix
15 | from pyfume import Clustering
16 | import gensim.corpora as corpora
17 | from gensim.models.coherencemodel import CoherenceModel
18 | from gensim.models import Word2Vec
19 |
20 | class FuzzyTM():
21 | """
22 | Parent class to train various FLSA-based topic models.
23 | """
24 | def __init__(
25 | self,
26 | input_file,
27 | num_topics,
28 | algorithm,
29 | num_words,
30 | cluster_method,
31 | word_weighting='normal',
32 | svd_factors=2,
33 | vector_size = None,
34 | window = None,
35 | min_count = None,
36 | workers = None,
37 | ):
38 | self.input_file = input_file #List in which each element is a list of tokens
39 | self.algorithm = algorithm
40 | self.num_topics = num_topics
41 | self.num_words = num_words
42 | self.word_weighting = word_weighting
43 | self.cluster_method = cluster_method
44 | self.svd_factors = svd_factors
45 | self._check_variables()
46 | self.vector_size = vector_size
47 | self.window = window
48 | self.min_count = min_count
49 | self.workers = workers
50 |
51 | self._vocabulary, self._vocabulary_size = self._create_vocabulary(self.input_file)
52 | self._word_to_index, self._index_to_word = self._create_index_dicts(self._vocabulary)
53 | self._sum_words = self._create_sum_words(self.input_file)
54 | self._prob_word_i = None
55 | self._prob_document_j = None
56 | self._prob_topic_k = None
57 | self._prob_word_given_topic = None
58 | self._prob_word_given_document = None
59 | self.coherence_score = None
60 | self.diversity_score = None
61 |
62 | def _check_variables(self):
63 | """
64 | Check whether the input data has the right format.
65 |
66 | Correct format: list of list of str (tokens)
67 | The function raises an error if the format is incorrect.
68 | """
69 | if not isinstance(self.input_file, list):
70 | raise TypeError("Input file is not a list")
71 | for i, doc in enumerate(self.input_file):
72 | if not isinstance(doc,list):
73 | raise TypeError("input_file variable at index ",
74 | str(i),
75 | " is not a list")
76 | if not len(doc) > 0:
77 | raise ValueError(
78 | "The input_file has an empty list at index ",
79 | str(i),
80 | " and should contain at least one str value")
81 | for j, word in enumerate(doc):
82 | if not isinstance(word, str):
83 | raise TypeError(f"Word {j} of document {i} is not a str")
84 | if not isinstance(self.num_topics, int) or self.num_topics < 1:
85 | raise ValueError("Please use a positive int for num_topics")
86 | if not isinstance(self.num_words, int) or self.num_words <1 :
87 | raise ValueError("Please use a positive int for num_words")
88 | if self.algorithm in ["flsa", "flsa-w"] and self.word_weighting not in [
89 | "entropy",
90 | "idf",
91 | "normal",
92 | "probidf",
93 | ]:
94 | raise ValueError("Invalid word weighting method. Please choose between: 'entropy','idf','normal' and'probidf'")
95 | if self.cluster_method not in [
96 | "fcm",
97 | "fst-pso",
98 | "gk",
99 | ]:
100 | raise ValueError(
101 | "Invalid 'cluster_method. Please choose: 'fcm', 'fst-pso' or 'gk'")
102 | if not isinstance(self.svd_factors, int) and self.svd_factors >0:
103 | raise ValueError("Please use a positive int for svd_factors")
104 |
105 | @staticmethod
106 | def _create_vocabulary(input_file):
107 | """
108 | Create the vocabulary from 'input_file'.
109 |
110 | Parameters
111 | ----------
112 | input_file : list of lists of str
113 | The input file used to initialize the model.
114 |
115 | Returns
116 | -------
117 | set of str
118 | All the vocabulary words.
119 | """
120 | vocabulary = set(el for lis in input_file for el in lis)
121 | return vocabulary, len(vocabulary)
122 |
123 | @staticmethod
124 | def _create_index_dicts(vocabulary):
125 | """
126 | Create the dictionaries with mappings between words and indices.
127 |
128 | Parameters
129 | ----------
130 | vocabulary : set of str
131 | All the words in the corpus.
132 |
133 | Returns
134 | -------
135 | dict of {str : int}
136 | Dictionary that maps a vocabulary word to and index number.
137 | dict of {int : str}
138 | Dictionary that maps an index number to each vocabulary word.
139 | """
140 | if not isinstance(vocabulary, set):
141 | raise ValueError("Please use a 'set' type for 'vocabulary'.")
142 | word_to_index = dict()
143 | index_to_word = dict()
144 | for i, word in enumerate(vocabulary):
145 | word_to_index[word] = i
146 | index_to_word[i] = word
147 | return word_to_index, index_to_word
148 |
149 | @staticmethod
150 | def _create_sum_words (input_file):
151 | """
152 | Creates a Counter object that stores the count of each word in the corpus (input_file).
153 |
154 | Parameters
155 | ----------
156 | input_file : list of lists of str
157 | The input file used to initialize the model.
158 |
159 | Returns
160 | -------
161 | collections.Counter {str : int}
162 | The count of each word in the corpus.
163 | """
164 | sum_words = Counter()
165 | for document in input_file:
166 | sum_words.update(Counter(document))
167 | return sum_words
168 |
169 | @staticmethod
170 | def _create_sparse_local_term_weights(
171 | input_file, vocabulary_size, word_to_index,
172 | ):
173 | """
174 | Creates a sparse matrix showing the frequency of each words in documents.
175 |
176 | (See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.html)
177 | Axes:
178 | rows: documents (size: number of documents in corpus)
179 | columns: words (size: vocabulary length)
180 |
181 | Parameters
182 | ----------
183 | input_file : list of lists of str
184 | The input file used to initialize the model.
185 | vocabulary_size : int
186 | Number of unique words in the corpus.
187 | word_to_index: dict {str : int}
188 | Maps each unique vocabulary word to a unique index number.
189 |
190 | Returns
191 | -------
192 | scipy.sparse.dok_matrix
193 | sparse matrix representation of the local term weights.
194 | """
195 | sparse_local_term_weights = dok_matrix(
196 | (
197 | len(input_file),
198 | vocabulary_size,
199 | ),
200 | dtype=np.float32,
201 | )
202 | for document_index, document in enumerate(input_file):
203 | document_counter = Counter(document)
204 | for word in document_counter.keys():
205 | sparse_local_term_weights[
206 | document_index,
207 | word_to_index[word],
208 | ] = document_counter[word]
209 | return sparse_local_term_weights
210 |
211 | def _create_sparse_global_term_weights(
212 | self,
213 | input_file,
214 | word_weighting,
215 | vocabulary_size=None,
216 | sparse_local_term_weights=None,
217 | index_to_word=None,
218 | word_to_index=None,
219 | sum_words=None,
220 | ):
221 | """
222 | Apply a word_weighting method on the sparse_local_term_weights
223 | to create sparse_global_term_weights.
224 | (See: https://link.springer.com/article/10.1007/s40815-017-0327-9)
225 |
226 | Parameters
227 | ----------
228 | input_file : list of lists of str
229 | The input file used to initialize the model.
230 | word_weighting : str
231 | Indicates the method used for word_weighting. Choose from:
232 | - entropy
233 | - normal
234 | - idf
235 | - probidf
236 | vocabulary_size : int
237 | Number of unique words in the corpus.
238 | sparse_local_term_weights : scipy.sparse.dok_matrix
239 | A sparse matrix showing the frequency of each words in documents.
240 | word_to_index : dict {str : int}
241 | Maps each unique vocabulary word to a unique index number.
242 | index_to_word : dict {int : str}
243 | Maps each unique index number to a unique vocabulary word.
244 | sum_words : collections.Counter {str : int}
245 | The count of each word in the corpus.
246 |
247 | Returns
248 | -------
249 | scipy.sparse.dok_matrix
250 | sparse matrix representation of the global term weights.
251 | """
252 | num_documents = len(input_file)
253 | if word_weighting in ['entropy','normal']:
254 | if sparse_local_term_weights is None:
255 | raise ValueError("Please feed the algorithm 'sparse_local_term_weights'")
256 | if word_weighting in ['entropy']:
257 | if index_to_word is None:
258 | raise ValueError("Please feed the algorithm 'index_to_word'")
259 | if sum_words is None:
260 | raise ValueError("Please feed the algorithm 'sum_words'")
261 | if word_weighting in ['entropy', 'idf', 'probidf']:
262 | if vocabulary_size is None:
263 | raise ValueError("Please feed the algorithm 'vocabulary_size'")
264 | if word_weighting in ['idf', 'probidf']:
265 | if word_to_index is None:
266 | raise ValueError("Please feed the algorithm 'word_to_index'")
267 | if word_weighting == 'entropy':
268 | global_term_weights = self._calculate_entropy(
269 | num_documents,
270 | vocabulary_size,
271 | sparse_local_term_weights,
272 | index_to_word, sum_words,
273 | )
274 | elif word_weighting == 'idf':
275 | global_term_weights = self._calculate_idf(
276 | num_documents,
277 | vocabulary_size,
278 | input_file,
279 | word_to_index,
280 | )
281 | elif word_weighting == 'normal':
282 | global_term_weights = self._calculate_normal(sparse_local_term_weights)
283 | elif word_weighting == 'probidf':
284 | global_term_weights = self._calculate_probidf(
285 | num_documents,
286 | vocabulary_size,
287 | input_file,
288 | word_to_index,
289 | )
290 | else:
291 | raise ValueError('Invalid word weighting method')
292 | return sparse_local_term_weights.multiply(global_term_weights).tocsc()
293 |
294 | def _calculate_entropy(
295 | self,
296 | num_documents,
297 | vocabulary_size,
298 | sparse_local_term_weights,
299 | index_to_word,
300 | sum_words,
301 | ):
302 | """
303 | Use the entropy word weighting method.
304 |
305 | (See: https://link.springer.com/article/10.1007/s40815-017-0327-9)
306 |
307 | Parameters
308 | ----------
309 | num_documents : int
310 | The number of documents in the corpus.
311 | vocabulary_size : int
312 | Number of unique words in the corpus.
313 | sparse_local_term_weights : scipy.sparse.dok_matrix
314 | A sparse matrix showing the frequency of each words in documents.
315 | index_to_word : dict {int : str}
316 | Maps each unique index number to a unique vocabulary word.
317 | sum_words : collections.Counter {str : int}
318 | The count of each word in the corpus.
319 |
320 | Returns
321 | -------
322 | numpy.array : float
323 | """
324 | p_log_p_ij = self._create_p_log_p_ij(
325 | num_documents,
326 | vocabulary_size,
327 | sparse_local_term_weights,
328 | index_to_word,
329 | sum_words,
330 | )
331 | summed_p_log_p = p_log_p_ij.sum(0).tolist()[0]
332 | return np.array([1+ summed_p_log_p_i /np.log2(num_documents) for summed_p_log_p_i in summed_p_log_p])
333 |
334 | def _calculate_idf(
335 | self,
336 | num_documents,
337 | vocabulary_size,
338 | input_file,
339 | word_to_index,
340 | ):
341 | """
342 | Use the idf word weightingg method.
343 |
344 | (See: https://link.springer.com/article/10.1007/s40815-017-0327-9)
345 |
346 | Parameters
347 | ----------
348 | num_documents : int
349 | The number of documents in the corpus.
350 | vocabulary_size : int
351 | Number of unique words in the corpus.
352 | input_file : list of lists of str
353 | The input file used to initialize the model.
354 | word_to_index: dict {str : int}
355 | Maps each unique vocabulary word to a unique index number.
356 |
357 | Returns
358 | -------
359 | numpy.array : float
360 | """
361 | binary_sparse_dtm = self._create_sparse_binary_dtm(
362 | num_documents,
363 | vocabulary_size,
364 | input_file,
365 | word_to_index,
366 | )
367 | summed_words = binary_sparse_dtm.sum(0).tolist()[0]
368 | return np.array([np.log2(num_documents/word_count) for word_count in summed_words])
369 |
370 | @staticmethod
371 | def _calculate_normal(
372 | sparse_local_term_weights,
373 | ):
374 | """
375 | Use the normal word weightingg method.
376 |
377 | (See: https://link.springer.com/article/10.1007/s40815-017-0327-9)
378 |
379 | Parameters
380 | ----------
381 | sparse_local_term_weights : scipy.sparse.dok_matrix
382 | A sparse matrix showing the frequency of each words in documents.
383 |
384 | Returns
385 | -------
386 | numpy.array : float
387 | """
388 | squared_dtm = sparse_local_term_weights.multiply(sparse_local_term_weights)
389 | summed_words = squared_dtm.sum(0).tolist()[0]
390 | return np.array([1/(math.sqrt(word_count)) for word_count in summed_words])
391 |
392 | def _calculate_probidf (
393 | self,
394 | num_documents,
395 | vocabulary_size,
396 | input_file,
397 | word_to_index,
398 | ):
399 | """
400 | Use the probidf word weightingg method.
401 |
402 | (See: https://link.springer.com/article/10.1007/s40815-017-0327-9)
403 |
404 | Parameters
405 | ----------
406 | num_documents : int
407 | The number of documents in the corpus.
408 | vocabulary_size : int
409 | Number of unique words in the corpus.
410 | input_file : list of lists of str
411 | The input file used to initialize the model.
412 | word_to_index: dict {str : int}
413 | Maps each unique vocabulary word to a unique index number.
414 |
415 | Returns
416 | -------
417 | numpy.array : float
418 | """
419 | binary_sparse_dtm = self._create_sparse_binary_dtm(
420 | num_documents,
421 | vocabulary_size,
422 | input_file,
423 | word_to_index,
424 | )
425 | summed_binary_words_list = binary_sparse_dtm.sum(0).tolist()[0]
426 |
427 | return np.array([np.log2((num_documents - binary_word_count)/binary_word_count)
428 | for binary_word_count in summed_binary_words_list])
429 |
430 | @staticmethod
431 | def _create_p_log_p_ij (
432 | num_documents,
433 | vocabulary_size,
434 | sparse_local_term_weights,
435 | index_to_word,
436 | sum_words,
437 | ):
438 | """
439 | Create probability of word i in document j, multiplied by its base-2 logarithm.
440 |
441 | (See: https://link.springer.com/article/10.1007/s40815-017-0327-9)
442 |
443 | Parameters
444 | ----------
445 | num_documents : int
446 | The number of documents in the corpus.
447 | vocabulary_size : int
448 | Number of unique words in the corpus.
449 | sparse_local_term_weights : scipy.sparse.dok_matrix
450 | A sparse matrix showing the frequency of each words in documents.
451 | index_to_word : dict {int : str}
452 | Maps each unique index number to a unique vocabulary word.
453 | sum_words : collections.Counter {str : int}
454 | The count of each word in the corpus.
455 |
456 | Returns
457 | -------
458 | scipy.sparse.dok_matrix
459 | """
460 | p_log_p_ij = dok_matrix(
461 | (num_documents,vocabulary_size), dtype=np.float32,
462 | )
463 | for j in range(num_documents):
464 | row_counts = sparse_local_term_weights.getrow(j).toarray()[0]
465 | word_index = row_counts.nonzero()[0]
466 | non_zero_row_counts = row_counts[row_counts != 0]
467 | for i, count in enumerate(non_zero_row_counts):
468 | word = index_to_word[word_index[i]]
469 | prob_ij = count/sum_words[word]
470 | p_log_p_ij[j,word_index[i]] = prob_ij * np.log2(prob_ij)
471 | return p_log_p_ij
472 |
473 | @staticmethod
474 | def _create_sparse_binary_dtm(
475 | num_documents,
476 | vocabulary_size,
477 | input_file,
478 | word_to_index,
479 | ):
480 | """
481 | Create a binary sparse document-term-matrix (used for idf and probidf).
482 |
483 | (See: https://link.springer.com/article/10.1007/s40815-017-0327-9)
484 |
485 | Parameters
486 | ----------
487 | num_documents : int
488 | The number of documents in the corpus.
489 | vocabulary_size : int
490 | Number of unique words in the corpus.
491 | input_file : list of lists of str
492 | The input file used to initialize the model.
493 | word_to_index: dict {str : int}
494 | Maps each unique vocabulary word to a unique index number.
495 |
496 | Returns
497 | -------
498 | scipy.sparse.dok_matrix
499 | """
500 | binary_sparse_dtm = dok_matrix(
501 | (num_documents,vocabulary_size), dtype=np.float32,
502 | )
503 | for doc_index, document in enumerate(input_file):
504 | binary_document_counter = dict.fromkeys(document, 1)
505 | for word in set(document):
506 | binary_sparse_dtm[doc_index,
507 | word_to_index[word]] = binary_document_counter[word]
508 | return binary_sparse_dtm
509 |
510 | @staticmethod
511 | def _create_projected_data(
512 | algorithm,
513 | sparse_weighted_matrix,
514 | svd_factors,
515 | ):
516 | """
517 | Perform singular decomposition for dimensionality reduction.
518 |
519 | (See: https://web.mit.edu/be.400/www/SVD/Singular_Value_Decomposition.htm)
520 | For SVD on a sparse matrix, the sparsesvd package is used (https://pypi.org/project/sparsesvd/)
521 |
522 | Parameters
523 | ----------
524 | algorithm : str
525 | Indicator for which algorithm is being trained ('flsa' or 'flsa-w').
526 | sparse_weighted_matrix : scipy.sparse.dok_matrix
527 | Sparse global term matrix.
528 | svd_factors : int
529 | The number of singular values to include.
530 |
531 | Returns
532 | -------
533 | numpy.array : float
534 | """
535 | svd_u, _, svd_v = svds(
536 | sparse_weighted_matrix,
537 | svd_factors,
538 | )
539 | if algorithm in ['flsa']:
540 | return svd_u
541 | if algorithm in ['flsa-w']:
542 | return svd_v.T
543 | raise ValueError('Invalid algorithm selected.',
544 | 'Only "flsa" ans "flsa-w" are currently supported.')
545 |
546 | @staticmethod
547 | def _create_partition_matrix(
548 | data,
549 | number_of_clusters,
550 | method = 'fcm',
551 | ):
552 | """
553 | Perform clustering on the projected data.
554 |
555 | The pyFUME package is used for clustering:
556 | (https://pyfume.readthedocs.io/en/latest/Clustering.html)
557 |
558 | Parameters
559 | ----------
560 | data: numpy.array
561 | The output from self._create_projected_data().
562 | number_of_clusters : int
563 | The number of clusters (topics).
564 | method : str
565 | The cluster method, choose from: 'fcm', 'gk', 'fst-pso'.
566 | Returns
567 | -------
568 | numpy.array : float
569 | """
570 | clusterer = Clustering.Clusterer(
571 | nr_clus = number_of_clusters,
572 | data= data,
573 | )
574 | _, partition_matrix, _ = clusterer.cluster(method= method)
575 | return partition_matrix
576 |
577 | @staticmethod
578 | def _create_prob_document_j(sparse_matrix):
579 | """
580 | Get the probability of document j.
581 |
582 | Parameters
583 | ----------
584 | sparse_matrix : scipy.sparse.dok_matrix
585 | A sparse matrix representation of the global term weights.
586 |
587 | Returns
588 | -------
589 | numpy.array : float
590 | (shape: number of documents x 1)
591 | """
592 | #Vector with the length of num_document,
593 | #each cell represents the sum of all weights of a document
594 | document_sum = np.array([doc[0] for doc in sparse_matrix.sum(1).tolist()])
595 | #sum of all the elements in the weighted matrix
596 | total_sum_d = sum(sparse_matrix.sum(0).tolist()[0])
597 | return document_sum/total_sum_d #normalized probability
598 |
599 | @staticmethod
600 | def _create_prob_word_i(sparse_matrix):
601 | """
602 | Get the probability of word i.
603 |
604 | Parameters
605 | ----------
606 | sparse_matrix : scipy.sparse.dok_matrix
607 | A sparse matrix representation of the global term weights.
608 |
609 | Returns
610 | -------
611 | numpy.array : float
612 | (shape: vocabulary_size x 1)
613 | """
614 | word_sum = np.array(sparse_matrix.sum(0).tolist())
615 | #Sum of all the elements in the weighted matrix
616 | total_sum_w = sum(sparse_matrix.sum(0).tolist()[0])
617 | return (word_sum / total_sum_w)[0] #normalized probability
618 |
619 | @staticmethod
620 | def _create_prob_topic_k(
621 | prob_topic_given_word_transpose,
622 | prob_word_i,
623 | ):
624 | """
625 | Get the probability of topic k.
626 |
627 | Parameters
628 | ----------
629 | prob_topic_given_word_transpose : numpy.array : float
630 | The output from self._create_partition_matrix().
631 | prob_word_i : numpy.array : float
632 | The output from self._create_prob_word_i().
633 |
634 | Returns
635 | -------
636 | numpy.array : float
637 | (shape: 1 x number of topics)
638 | """
639 |
640 | return np.matmul(prob_topic_given_word_transpose.T,prob_word_i)
641 |
642 | @staticmethod
643 | def _check_passed_variables(
644 | algorithm,
645 | prob_topic_given_document_transpose,
646 | prob_topic_given_word_transpose,
647 | local_term_weights,
648 | global_term_weights,
649 | ):
650 | """
651 | Check whether the algorithms are being fed the right attributes.
652 | """
653 | if algorithm in [
654 | 'flsa',
655 | ]:
656 | if prob_topic_given_document_transpose is None:
657 | raise ValueError("Please feed the method",
658 | "'prob_topic_given_document_transpose' to run flsa")
659 | if global_term_weights is None:
660 | raise ValueError("Please feed the method 'global_term_weights', to run flsa")
661 | elif algorithm in [
662 | 'flsa-w',
663 | ]:
664 | if prob_topic_given_word_transpose is None:
665 | raise ValueError("Please feed the method",
666 | "'prob_topic_given_word_transpose' to run flsa-w")
667 | if global_term_weights is None:
668 | raise ValueError("Please feed the method 'global_term_weights'",
669 | " to run flsa-w")
670 | elif algorithm in [
671 | 'flsa-v',
672 | 'flsa-e',
673 | ]:
674 | if prob_topic_given_word_transpose is None:
675 | raise ValueError("Please feed the method",
676 | "'prob_topic_given_word_transpose' to run flsa-v")
677 | if local_term_weights is None:
678 | raise ValueError("Please feed the method 'local_term_weights', to run flsa-v")
679 |
680 | else:
681 | raise ValueError('Your algorithm is currently not supported')
682 |
683 | def _create_probability_matrices(
684 | self,
685 | algorithm,
686 | prob_topic_given_document_transpose = None,
687 | prob_topic_given_word_transpose = None,
688 | local_term_weights = None,
689 | global_term_weights = None,
690 | ):
691 | """
692 | Method that performs matrix multiplications to obtain the output matrices.
693 |
694 | The 'algorithm' parameter is generic and the other ones depend on the selected algorithm.
695 | The other parameters passed into this method depend on the used algorithm.
696 |
697 | Parameters
698 | ----------
699 | algorithm : str
700 | Indicator for which algorithm is being trained ('flsa' or 'flsa-w').
701 | global_term_weights : scipy.sparse.dok_matrix
702 | The output from self._create_partition_matrix().
703 | prob_topic_given_document_transpose : numpy.array : float
704 | The output from self._create_partition_matrix() (flsa)
705 | prob_topic_given_word_transpose : numpy.array : float
706 | (flsa-w)
707 |
708 | Returns
709 | -------
710 | numpy.array : float
711 | The prbability of a word given a topic.
712 | numpy.array : float
713 | The prbability of a topic given a document.
714 | """
715 | #Chech whether the right variable are passed into the method.
716 | self._check_passed_variables(
717 | algorithm,
718 | prob_topic_given_document_transpose,
719 | prob_topic_given_word_transpose,
720 | local_term_weights,
721 | global_term_weights,
722 | )
723 |
724 | #Calculate the initial probabilities
725 | if algorithm in [
726 | 'flsa',
727 | 'flsa-w',
728 | ]:
729 | self._prob_word_i = self._create_prob_word_i(global_term_weights)
730 | self._prob_document_j = self._create_prob_document_j(global_term_weights)
731 | if algorithm in [
732 | 'flsa-w',
733 | ]:
734 | self._prob_topic_k = self._create_prob_topic_k(
735 | prob_topic_given_word_transpose,
736 | self._prob_word_i,
737 | )
738 | elif algorithm in [
739 | 'flsa-v',
740 | 'flsa-e',
741 | ]:
742 | self._prob_word_i = self._create_prob_word_i(local_term_weights)
743 | self._prob_document_j = self._create_prob_document_j(local_term_weights)
744 | self._prob_topic_k = self._create_prob_topic_k(
745 | prob_topic_given_word_transpose, self._prob_word_i,
746 | )
747 | if algorithm in [
748 | 'flsa',
749 | ]:
750 | prob_document_and_topic = (prob_topic_given_document_transpose.T * self._prob_document_j).T
751 | prob_document_given_topic = prob_document_and_topic / prob_document_and_topic.sum(axis=0)
752 | self._prob_word_given_document = global_term_weights / global_term_weights.sum(1)
753 | self._prob_word_given_topic = self._prob_word_given_document.T.dot(
754 | prob_document_given_topic
755 | )
756 | prob_topic_given_document = prob_topic_given_document_transpose.T
757 | return self._prob_word_given_topic, prob_topic_given_document
758 |
759 | elif algorithm in [
760 | 'flsa-w',
761 | 'flsa-v',
762 | 'flsa-e'
763 | ]:
764 | prob_word_and_topic = (prob_topic_given_word_transpose.T * self._prob_word_i).T
765 | self._prob_word_given_topic = prob_word_and_topic / prob_word_and_topic.sum(axis=0)
766 | if algorithm in [
767 | 'flsa-w',
768 | ]:
769 | self._prob_word_given_document = (global_term_weights / global_term_weights.sum(1)).T
770 | elif algorithm in [
771 | 'flsa-v',
772 | 'flsa-e',
773 | ]:
774 | self._prob_word_given_document = np.asarray(local_term_weights / local_term_weights.sum(1)).T
775 | prob_document_given_word = self._prob_word_given_document.T.multiply(np.reshape(self._prob_document_j, (-1, 1))).toarray() /np.reshape(np.array(self._prob_word_i), (1, -1))
776 | prob_document_given_topic = prob_document_given_word.dot(
777 | self._prob_word_given_topic
778 | )
779 | prob_topic_given_document = ((prob_document_given_topic * self._prob_topic_k).T/
780 | self._prob_document_j)
781 | return self._prob_word_given_topic, prob_topic_given_document
782 | raise ValueError('"algorithm" is unknown.')
783 |
784 | def show_topics(
785 | self,
786 | prob_word_given_topic = None,
787 | num_words = -1,
788 | index_to_word = None, representation = 'both',
789 | ):
790 | """
791 | Show the top-n words associated with each topic.
792 |
793 | Parameters
794 | ----------
795 | prob_word_given_topic : numpy.array : float
796 | Matrix that gives the probability of a word given a topic.
797 | num_words : int
798 | Indicates how many words per topic should be shown.
799 | index_to_word : dict {int : str}
800 | Maps each unique index number to a unique vocabulary word.
801 | representation : str
802 | Indicates whether only words are returned or a combination
803 | of words and probabilities per word (either 'both' or 'words').
804 |
805 | Returns
806 | -------
807 | list of tuples (int, str)
808 | The produced topics.
809 | """
810 | if prob_word_given_topic is None:
811 | prob_word_given_topic = self._prob_word_given_topic
812 | if num_words < 0:
813 | num_words = self.num_words
814 | if index_to_word is None:
815 | index_to_word = self._index_to_word
816 | if representation not in [
817 | 'both',
818 | 'words',
819 | ]:
820 | raise ValueError("Invalid representation. Choose between 'both' or 'words'")
821 | if not isinstance(prob_word_given_topic,np.ndarray):
822 | raise TypeError("Please feed the algorithm 'prob_word_given_topic' as a np.ndarray")
823 | if not isinstance(index_to_word, dict):
824 | raise TypeError("Please feed the algorithm 'index_to_word' as a dict")
825 | if not isinstance(num_words, int) or num_words <= 0:
826 | raise TypeError("Please use a positive int for 'num_words'.")
827 | if prob_word_given_topic.shape[0] < prob_word_given_topic.shape[1]:
828 | raise ValueError("'prob_word_given_topic' has more columns then rows,",
829 | " probably you need to take the transpose.")
830 | warning = ["It seems like 'prob_word_given_topic' and 'index_to_word",
831 | "are not aligned. The number of vocabulary words in",
832 | "'prob_word_given_topic' deviate from the ",
833 | "number of words in 'index_to_word'."]
834 | if prob_word_given_topic.shape[0] != len(index_to_word.keys()):
835 | warnings.warn(' '.join(warning))
836 | topic_list = []
837 | if representation == 'both':
838 | for topic_index in range(prob_word_given_topic.shape[1]):
839 | weight_words = ""
840 | sorted_highest_weight_indices = prob_word_given_topic[:,topic_index].argsort()[-num_words:][::-1]
841 | for word_index in sorted_highest_weight_indices:
842 | weight_words += (str(round(prob_word_given_topic[word_index,topic_index],4)) +
843 | '*"' + index_to_word[word_index] + '" + ')
844 | topic_list.append((topic_index, weight_words[:-3]))
845 | return topic_list
846 | else:
847 | for topic_index in range(prob_word_given_topic.shape[1]):
848 | word_list = []
849 | sorted_highest_weight_indices = prob_word_given_topic[:,topic_index].argsort()[-num_words:][::-1]
850 | for word_index in sorted_highest_weight_indices:
851 | word_list.append(index_to_word[word_index])
852 | topic_list.append(word_list)
853 | return topic_list
854 |
855 | def get_topic_embedding(
856 | self,
857 | input_file,
858 | prob_word_given_topic=None,
859 | method = 'topn',
860 | topn = 20,
861 | perc=0.05,
862 | ):
863 | """
864 | Create a topic embedding for each input document, to be used as input to predictive models.
865 |
866 | Parameters
867 | ----------
868 | input_file : list of lists of str
869 | The input file used to initialize the model.
870 | prob_word_given_topic : numpy.array : float
871 | Matrix that gives the probability of a word given a topic.
872 | method : str
873 | Method to select words to be included in the embedding.
874 | (choose from 'topn', 'percentile'):
875 | - topn: for each topic the top n words with the highest
876 | probability are included.
877 | - percentile: for each topic all words with highest
878 | probabilities are assigned while the cumulative
879 | probability is lower than the percentile.
880 | topn : int
881 | The top-n words to include (needs only to be used when 'method=topn'.
882 | perc: float
883 | The benchmark percentile until which words need to be added (between 0 and 1).
884 |
885 | Returns
886 | -------
887 | numpy.array : float
888 | Array in which each row gives the topic embedding for the associated document.
889 | """
890 | self._check_variables()
891 | if prob_word_given_topic is None:
892 | prob_word_given_topic = self._prob_word_given_topic
893 | top_dist = []
894 | if method not in ['topn','percentile']:
895 | raise ValueError(method, "is not a valid option for 'method'.",
896 | " Choose either 'topn' or 'percentile'")
897 | if method == 'topn':
898 | dictlist = self._create_dictlist_topn(
899 | topn,
900 | prob_word_given_topic,
901 | self._index_to_word,
902 | )
903 | else:
904 | dictlist = self._create_dictlist_percentile(
905 | perc,
906 | prob_word_given_topic,
907 | self._index_to_word,
908 | )
909 | for doc in input_file:
910 | topic_weights = [0] * prob_word_given_topic.shape[1]
911 | for word in doc:
912 | for i in range(prob_word_given_topic.shape[1]):
913 | topic_weights[i] += dictlist[i].get(word, 0)
914 | top_dist.append(topic_weights)
915 | return np.array(top_dist)
916 |
917 | @staticmethod
918 | def _create_dictlist_topn(
919 | topn,
920 | prob_word_given_topic,
921 | index_to_word,
922 | ):
923 | """
924 | Creates a list with dictionaries of word probabilities per topic based on the top-n words.
925 |
926 | Parameters
927 | ----------
928 | topn : int
929 | The top-n words to include (needs only to be used when 'method=topn'.
930 | prob_word_given_topic : numpy.array : float
931 | Matrix that gives the probability of a word given a topic.
932 | index_to_word : dict {int : str}
933 | Maps each unique index number to a unique vocabulary word.
934 |
935 | Returns
936 | -------
937 | list of dicts {int : float}
938 | Keys: all the indices of words from prob_word_given_topic who's weight's
939 | are amongst the top percentage.
940 | Values: the probability associated to a word.
941 | """
942 | if not isinstance(topn, int) and topn >0:
943 | raise ValueError("Please choose a positive integer for 'topn'")
944 | top_dictionaries = []
945 | for topic_index in range(prob_word_given_topic.shape[1]):
946 | new_dict = dict()
947 | highest_weight_indices = prob_word_given_topic[:,topic_index].argsort()[-topn:]
948 | for word_index in highest_weight_indices:
949 | new_dict[index_to_word[word_index]] = prob_word_given_topic[
950 | word_index,
951 | topic_index,
952 | ]
953 | top_dictionaries.append(new_dict)
954 | return top_dictionaries
955 |
956 | @staticmethod
957 | def _create_dictlist_percentile(
958 | perc,
959 | prob_word_given_topic,
960 | index_to_word,
961 | ):
962 | """
963 | Create a list with dictionaries of word probabilities per topic based on the percentile.
964 | - Keys: all the indices of words from prob_word_given_topic
965 | who's weight's are amongst the top percentage.
966 | - Values: the probability associated to a word.
967 |
968 | Parameters
969 | ----------
970 | perc : float
971 | The top percentile words to include (needs only to be used when 'method=percentile'.
972 | prob_word_given_topic : numpy.array : float
973 | Matrix that gives the probability of a word given a topic.
974 | index_to_word : dict {int : str}
975 | Maps each unique index number to a unique vocabulary word.
976 |
977 | Returns
978 | -------
979 | list of dicts {int : float}
980 | Keys: all the indices of words from prob_word_given_topic who's weight's
981 | are amongst the top percentage.
982 | Values: the probability associated to a word.
983 | """
984 | if not isinstance(perc, float) and 0 <= perc <= 1:
985 | raise ValueError("Please choose a number between 0 and 1 for 'perc'")
986 | top_list = []
987 | for top in range(prob_word_given_topic.shape[1]):
988 | new_dict = dict()
989 | count = 0
990 | i = 0
991 | weights = np.sort(prob_word_given_topic[:,top])[::-1]
992 | word_indices = np.argsort(prob_word_given_topic[:,top])[::-1]
993 | while count < perc:
994 | new_dict[index_to_word[word_indices[i]]] = weights[i]
995 | count+=weights[i]
996 | i+=1
997 | top_list.append(new_dict)
998 | return top_list
999 |
1000 | def get_coherence_score(
1001 | self,
1002 | input_file=None,
1003 | topics=None,
1004 | coherence = 'c_v',
1005 | ):
1006 | """
1007 | Calculate the coherence score for the generated topic.
1008 |
1009 | Parameters
1010 | ----------
1011 | input_file : list of lists of str
1012 | The input file used to initialize the model.
1013 | topics : list of lists of str
1014 | The words per topics, equivalent to self.show_topics(representation='words').
1015 | coherence : str
1016 | The type of coherence to be calculated.
1017 | Choose from: 'u_mass', 'c_v', 'c_uci', 'c_npmi'.
1018 |
1019 | Returns
1020 | -------
1021 | float
1022 | The coherence score.
1023 | """
1024 | if input_file is None and topics is None:
1025 | input_file=self.input_file
1026 | topics=self.show_topics(representation='words')
1027 |
1028 | id2word = corpora.Dictionary(input_file)
1029 | corpus = [id2word.doc2bow(text) for text in input_file]
1030 | self.coherence_score = CoherenceModel(
1031 | topics=topics, texts = input_file, corpus=corpus,
1032 | dictionary=id2word, coherence= coherence,
1033 | topn=len(topics[0])).get_coherence()
1034 | return self.coherence_score
1035 |
1036 | def get_diversity_score(
1037 | self,
1038 | topics=None,
1039 | ):
1040 | """''
1041 | Calculate the diversity score for the generated topic.
1042 |
1043 | Diversity = number of unique words / number of total words.
1044 | See: https://tinyurl.com/2bs84zd8
1045 |
1046 | Parameters
1047 | ----------
1048 | topics : list of lists of str
1049 | The words per topics, equivalent to self.show_topics(representation='words').
1050 |
1051 | Returns
1052 | -------
1053 | float
1054 | The diversity score.
1055 | """
1056 | if topics is None:
1057 | topics = self.show_topics(representation='words')
1058 | unique_words = set()
1059 | total_words = 0
1060 | for top in topics:
1061 | unique_words.update(top)
1062 | total_words += len(top)
1063 | self.diversity_score = len(unique_words)/total_words
1064 | return self.diversity_score
1065 |
1066 | def get_interpretability_score(
1067 | self,
1068 | input_file=None,
1069 | topics=None,
1070 | coherence='c_v',
1071 | ):
1072 | """''
1073 | Calculate the interpretability score for the generated topics.
1074 |
1075 | Interpretability = coherence * diversity.
1076 | (see: https://tinyurl.com/2bs84zd8)
1077 |
1078 | Parameters
1079 | ----------
1080 | input_file : list of lists of str
1081 | The input file used to initialize the model.
1082 | topics : list of lists of str
1083 | The words per topics, equivalent to self.show_topics(representation='words').
1084 | coherence : str
1085 | The type of coherence to be calculated.
1086 | Choose from: 'u_mass', 'c_v', 'c_uci', 'c_npmi'.
1087 |
1088 | Returns
1089 | -------
1090 | float
1091 | The interpretability score.
1092 | """
1093 | if input_file is None and topics is None:
1094 | input_file=self.input_file
1095 | topics=self.show_topics(representation='words')
1096 | if self.coherence_score is None:
1097 | self.coherence_score = self.get_coherence_score(
1098 | input_file,
1099 | topics, coherence,
1100 | )
1101 | if self.diversity_score is None:
1102 | self.diversity_score = self.get_diversity_score(topics)
1103 | return self.coherence_score * self.diversity_score
1104 |
1105 | def get_vocabulary(self):
1106 | """
1107 | Returns a set of all the words in the corpus
1108 |
1109 | Example:
1110 | After initializing an instance of the FuzzyTM models as 'model'
1111 |
1112 | corpus= [['this','is','the','first','file'],
1113 | ['and','this','is','second','file']]
1114 |
1115 | model.get_vocabulary()
1116 |
1117 | >>> {'this','is','the','first','file','and','second'}
1118 | """
1119 | return self._vocabulary
1120 |
1121 | def get_vocabulary_size(self):
1122 | """
1123 | Returns the number of words in the vocabulary
1124 |
1125 | Example:
1126 | After initializing an instance of the FuzzyTM models as 'model'
1127 |
1128 | corpus= [['this','is','the','first','file'],
1129 | ['and','this','is','second','file']]
1130 |
1131 | model.get_vocabulary_size()
1132 |
1133 | >>> 7
1134 | """
1135 | return self._vocabulary_size
1136 |
1137 | def get_word_to_index(self):
1138 | """
1139 | Obtain a dictionary that maps each vocabulary word to an index.
1140 |
1141 | Returns
1142 | -------
1143 | dict of {str : int}
1144 | word to int mapping.
1145 | """
1146 | return self._word_to_index
1147 |
1148 | def get_index_to_word(self):
1149 | """
1150 | Obtain a dictionary that maps index numbers to vocabulary words.
1151 |
1152 | Returns
1153 | -------
1154 | dict of {int : str}
1155 | int to word mapping.
1156 | """
1157 | return self._index_to_word
1158 |
1159 | def get_input_file(self):
1160 | """
1161 | Return the input file.
1162 |
1163 | Returns
1164 | -------
1165 | list of list of str
1166 | The input file 'input_file'.
1167 | """
1168 | return self.input_file
1169 |
1170 | def get_prob_word_i(self):
1171 | """
1172 | Return the probabilities per word.
1173 |
1174 | Returns
1175 | -------
1176 | np.array of float
1177 | The probabilities per word.
1178 | """
1179 | return self._prob_word_i
1180 |
1181 | def get_prob_document_j(self):
1182 | """
1183 | Return the probabilities per document.
1184 |
1185 | Returns
1186 | -------
1187 | np.array of float
1188 | The probabilities per document.
1189 | """
1190 | return self._prob_document_j
1191 |
1192 | def get_prob_topic_k(self):
1193 | """
1194 | Return the probabilities per topic.
1195 |
1196 | Returns
1197 | -------
1198 | np.array of float
1199 | The probabilities per topic.
1200 | """
1201 | return self._prob_topic_k
1202 |
1203 | def save(
1204 | self,
1205 | filepath,
1206 | ):
1207 | """''
1208 | Saves the object to the drive, using the pickle library
1209 |
1210 | Parameters
1211 | ----------
1212 | filepath : str
1213 | The directory in which the file should be stored,
1214 | either with or without
1215 |
1216 | """
1217 | if not isinstance(filepath, str):
1218 | raise ValueError('Make sure that "filepath" has type "str"')
1219 | if filepath.endswith('.pickle'):
1220 | pickle_out = open(filepath, 'wb')
1221 | elif filepath.endswith('/'):
1222 | pickle_out = open(filepath+'model.pickle', 'wb')
1223 | else:
1224 | pickle_out = open(filepath+'.pickle', 'wb')
1225 | pickle.dump(self, pickle_out)
1226 | pickle_out.close()
1227 |
1228 | def load(
1229 | self,
1230 | filepath,
1231 | ):
1232 | """''
1233 | Loads the object from the drive, using the pickle library
1234 |
1235 | Parameters
1236 | ----------
1237 | filepath : str
1238 | The directory in which the file should be stored,
1239 | either with or without
1240 |
1241 | Returns
1242 | -------
1243 | float
1244 | The interpretability score.
1245 | """
1246 | if not isinstance(filepath, str):
1247 | raise ValueError('Make sure that "filepath" has type "str"')
1248 | if not filepath.endswith('.pickle'):
1249 | if filepath.endswith('/'):
1250 | filepath+='model.pickle'
1251 | else:
1252 | filepath+='/model.pickle'
1253 | infile = open(filepath,'rb')
1254 | self.__dict__ = pickle.load(infile).__dict__
1255 | infile.close()
1256 |
1257 |
1258 | class FLSA(FuzzyTM):
1259 | """
1260 | Algorithm to run the FLSA algorithm (see: https://tinyurl.com/mskjaeuu).
1261 |
1262 | Parameters
1263 | ----------
1264 | input_file : list of lists of str
1265 | The input file used to initialize the model.
1266 | num_topics : int
1267 | The number of topics that the model should train.
1268 | num_words : int
1269 | Indicates how many words per topic should be shown.
1270 | word_weighting : str
1271 | Indicates the method used for word_weighting. Choose from:
1272 | - entropy
1273 | - normal
1274 | - idf
1275 | - probidf
1276 | svd_factors : int
1277 | The number of singular values to include.
1278 | cluster_method : str
1279 | The cluster algorithm to be used ('fcm', 'gk', 'fst-pso').
1280 | """
1281 | def __init__(
1282 | self,
1283 | input_file,
1284 | num_topics,
1285 | num_words=10,
1286 | word_weighting='normal',
1287 | svd_factors=2,
1288 | cluster_method='fcm',
1289 | ):
1290 | super().__init__(
1291 | input_file=input_file,
1292 | num_topics=num_topics,
1293 | algorithm='flsa',
1294 | num_words=num_words,
1295 | word_weighting = word_weighting,
1296 | cluster_method=cluster_method,
1297 | svd_factors=svd_factors,
1298 | )
1299 |
1300 | def get_matrices(self):
1301 | """
1302 | Method to obtain the matrices after the model has been initialized.
1303 |
1304 | Returns
1305 | -------
1306 | numpy.array : float
1307 | The prbability of a word given a topic.
1308 | numpy.array : float
1309 | The prbability of a topic given a document.
1310 | """
1311 | sparse_document_term_matrix = self._create_sparse_local_term_weights(
1312 | self.input_file,
1313 | self._vocabulary_size,
1314 | self._word_to_index,
1315 | )
1316 | sparse_global_term_weighting = self._create_sparse_global_term_weights(
1317 | input_file = self.input_file,
1318 | word_weighting = self.word_weighting,
1319 | vocabulary_size=self._vocabulary_size,
1320 | sparse_local_term_weights = sparse_document_term_matrix,
1321 | index_to_word = self._index_to_word,
1322 | word_to_index = self._word_to_index,
1323 | sum_words = self._sum_words,
1324 | )
1325 | projected_data = self._create_projected_data(
1326 | algorithm = 'flsa',
1327 | sparse_weighted_matrix = sparse_global_term_weighting,
1328 | svd_factors = self.svd_factors,
1329 | )
1330 | partition_matrix = self._create_partition_matrix(
1331 | data = projected_data,
1332 | number_of_clusters = self.num_topics,
1333 | method = self.cluster_method
1334 | )
1335 | return self._create_probability_matrices(
1336 | algorithm='flsa',
1337 | prob_topic_given_document_transpose = partition_matrix,
1338 | global_term_weights = sparse_global_term_weighting,
1339 | )
1340 |
1341 | class FLSA_W(FuzzyTM):
1342 | """
1343 | Class to train the FLSA-W algorithm.
1344 |
1345 | See: https://ieeexplore.ieee.org/abstract/document/9660139
1346 |
1347 | Parameters
1348 | ----------
1349 | input_file : list of lists of str
1350 | The input file used to initialize the model.
1351 | num_topics : int
1352 | The number of topics that the model should train.
1353 | num_words : int
1354 | Indicates how many words per topic should be shown.
1355 | word_weighting : str
1356 | Indicates the method used for word_weighting. Choose from:
1357 | - entropy
1358 | - normal
1359 | - idf
1360 | - probidf
1361 | svd_factors : int
1362 | The number of singular values to include.
1363 | cluster_method : str
1364 | The cluster algorithm to be used ('fcm', 'gk', 'fst-pso').
1365 | """
1366 | def __init__(
1367 | self,
1368 | input_file,
1369 | num_topics,
1370 | num_words = 10,
1371 | word_weighting = 'normal',
1372 | svd_factors = 2,
1373 | cluster_method = 'fcm',
1374 | ):
1375 |
1376 | super().__init__(
1377 | input_file=input_file,
1378 | num_topics=num_topics,
1379 | algorithm='flsa-w',
1380 | num_words=num_words,
1381 | word_weighting = word_weighting,
1382 | cluster_method=cluster_method,
1383 | svd_factors=svd_factors,
1384 | )
1385 |
1386 | def get_matrices(self):
1387 | """
1388 | Method to obtain the matrices after the model has been initialized.
1389 |
1390 | Returns
1391 | -------
1392 | numpy.array : float
1393 | The prbability of a word given a topic.
1394 | numpy.array : float
1395 | The prbability of a topic given a document.
1396 | """
1397 | sparse_document_term_matrix = self._create_sparse_local_term_weights(
1398 | self.input_file,
1399 | self._vocabulary_size,
1400 | self._word_to_index,
1401 | )
1402 | sparse_global_term_weighting = self._create_sparse_global_term_weights(
1403 | input_file = self.input_file,
1404 | word_weighting = self.word_weighting,
1405 | vocabulary_size=self._vocabulary_size,
1406 | sparse_local_term_weights = sparse_document_term_matrix,
1407 | index_to_word = self._index_to_word,
1408 | word_to_index = self._word_to_index,
1409 | sum_words = self._sum_words,
1410 | )
1411 | projected_data = self._create_projected_data(
1412 | algorithm = 'flsa-w',
1413 | sparse_weighted_matrix = sparse_global_term_weighting,
1414 | svd_factors = self.svd_factors,
1415 | )
1416 | partition_matrix = self._create_partition_matrix(
1417 | data = projected_data,
1418 | number_of_clusters = self.num_topics,
1419 | method = self.cluster_method,
1420 | )
1421 | return self._create_probability_matrices(
1422 | algorithm='flsa-w',
1423 | prob_topic_given_word_transpose = partition_matrix,
1424 | global_term_weights = sparse_global_term_weighting,
1425 | )
1426 |
1427 | class FLSA_V(FuzzyTM):
1428 | """
1429 | Class to train the FLSA-V algorithm.
1430 |
1431 | See: https://ieeexplore.ieee.org/abstract/document/9660139
1432 |
1433 | Parameters
1434 | ----------
1435 | input_file : list of lists of str
1436 | The input file used to initialize the model.
1437 | map_file: pd.DataFrame
1438 | The output file from Vosviewer.
1439 | The Dataframe needs to contain the following columns: 'id','x','y').
1440 | num_topics : int
1441 | The number of topics that the model should train.
1442 | num_words : int
1443 | Indicates how many words per topic should be shown.
1444 | cluster_method : str
1445 | The cluster algorithm to be used ('fcm', 'gk', 'fst-pso').
1446 | """
1447 | def __init__(
1448 | self,
1449 | input_file,
1450 | map_file,
1451 | num_topics,
1452 | num_words = 10,
1453 | cluster_method='fcm',
1454 | ):
1455 | self.map_file = map_file
1456 | if not isinstance(self.map_file, pd.DataFrame):
1457 | raise TypeError("'map_file' should be a pd.DataFrame, but should be")
1458 |
1459 | if not 'x' in self.map_file.columns:
1460 | raise ValueError("map_file has no 'x' column")
1461 | if not 'y' in self.map_file.columns:
1462 | raise ValueError("map_file has no 'y' column")
1463 | if not 'id' in self.map_file.columns:
1464 | raise ValueError("map_file has no 'id' column")
1465 |
1466 | self.filtered_input = self._get_filtered_input(
1467 | input_file,
1468 | self.map_file,
1469 | )
1470 | self.map_file = self._filter_map_file(
1471 | self.map_file,
1472 | self.filtered_input,
1473 | )
1474 |
1475 | super().__init__(
1476 | input_file=self.filtered_input,
1477 | num_topics=num_topics,
1478 | algorithm='flsa-v',
1479 | num_words=num_words,
1480 | cluster_method=cluster_method,
1481 | )
1482 |
1483 | vocab = self._create_vocabulary(self.filtered_input)[0]
1484 | self.coordinates = self._get_coordinates_from_map_file(
1485 | self.map_file, vocab,
1486 | )
1487 |
1488 | @staticmethod
1489 | def _get_filtered_input(
1490 | input_file, map_file,
1491 | ):
1492 | '''
1493 | Filter out words from the input_file that are not present in the map_file
1494 |
1495 | Parameters
1496 | ----------
1497 | input_file : list of lists of str
1498 | The input file used to initialize the model.
1499 | map_file: pd.DataFrame
1500 | The output file from Vosviewer.
1501 | The Dataframe needs to contain the following columns: 'id','x','y').
1502 |
1503 | Returns:
1504 | -------
1505 | list of list of tokens
1506 | a filtered input_file
1507 | '''
1508 | filtered_input_file = []
1509 | for doc in input_file:
1510 | filtered_input_file.append(list(np.intersect1d(map_file['id'], doc)))
1511 | return filtered_input_file
1512 |
1513 | @staticmethod
1514 | def _filter_map_file(
1515 | map_file,
1516 | filtered_input,
1517 | ):
1518 | """
1519 | Function to reduce words in map_file
1520 |
1521 | Parameters
1522 | ----------
1523 | map_file : pd.DataFrame
1524 | VOSviewer output.
1525 | filtered_input : list of lists of strings
1526 | Output from '_get_filtered_input()'
1527 |
1528 | Returns:
1529 | -------
1530 | pd.DataFrame
1531 | The filtered map file.
1532 |
1533 | """
1534 | map_words = map_file['label'].tolist()
1535 | filtered_set = set()
1536 | for text in filtered_input:
1537 | filtered_set.update(set(text))
1538 | filtered_list = list(filtered_set)
1539 | intersect = list(np.intersect1d(map_words, filtered_list))
1540 | return map_file[map_file.id.isin(intersect)]
1541 |
1542 | @staticmethod
1543 | def _get_coordinates_from_map_file(
1544 | map_file, vocab,
1545 | ):
1546 | """
1547 | Filter words that are in the map_file but not in the input file,
1548 | retrieve the coordinates from the map_file and convert them into a Numpy array.
1549 |
1550 | Parameters
1551 | ----------
1552 | map_file : pd.DataFrame
1553 | VOSviewer output.
1554 | vocab : set of str
1555 | All the vocabulary words.
1556 |
1557 | Returns:
1558 | -------
1559 | numpy.array : float
1560 | The coordinates from the map_file
1561 |
1562 | """
1563 | for word in map_file['id']:
1564 | if word not in vocab:
1565 | map_file = map_file[map_file.id != word]
1566 | x_axis = np.array(map_file['x'])
1567 | y_axis = np.array(map_file['y'])
1568 | return np.array([x_axis,y_axis]).T
1569 |
1570 | def get_matrices(self):
1571 | """
1572 | Method to run after the FlsaV class has been initialized to obtain the output matrices.
1573 |
1574 | Returns
1575 | -------
1576 | numpy.array : float
1577 | The prbability of a word given a topic.
1578 | numpy.array : float
1579 | The prbability of a topic given a document.
1580 | """
1581 | sparse_document_term_matrix = self._create_sparse_local_term_weights(
1582 | self.filtered_input,
1583 | self._vocabulary_size,
1584 | self._word_to_index,
1585 | )
1586 |
1587 | partition_matrix = self._create_partition_matrix(
1588 | data = self.coordinates,
1589 | number_of_clusters = self.num_topics,
1590 | method = self.cluster_method,
1591 | )
1592 |
1593 | return self._create_probability_matrices(
1594 | algorithm='flsa-v',
1595 | prob_topic_given_word_transpose = partition_matrix,
1596 | local_term_weights = sparse_document_term_matrix,
1597 | )
1598 |
1599 | class FLSA_E(FuzzyTM):
1600 | """
1601 | Class to train the FLSA-E algorithm.
1602 |
1603 | See: https://tinyurl.com/5n8utppk
1604 |
1605 | Parameters
1606 | ----------
1607 | corpus : list of lists of str
1608 | The input file used to initialize the model.
1609 | num_topics : int
1610 | The number of topics that the model should train.
1611 | num_words : int
1612 | Indicates how many words per topic should be shown.
1613 | cluster_method : str
1614 | The cluster algorithm to be used ('fcm', 'gk', 'fst-pso').
1615 | min_count : int
1616 | Ignores all words with total frequency lower than this.
1617 | window : int
1618 | Maximum distance between the current and predicted word within a sentence.
1619 | vector_size : int
1620 | Dimensionality of the word vectors.
1621 | workers : int
1622 | Use these many worker threads to train the model (=faster training with multicore machines).
1623 | """
1624 | def __init__(
1625 | self,
1626 | input_file,
1627 | num_topics,
1628 | num_words = 10,
1629 | cluster_method = 'fcm',
1630 | min_count = 1,
1631 | window = 5,
1632 | vector_size = 20,
1633 | workers = 4,
1634 | ):
1635 |
1636 | self.model=...
1637 | self.word_embedding=...
1638 |
1639 | super().__init__(
1640 | algorithm = 'flsa-e',
1641 | input_file=input_file,
1642 | num_topics=num_topics,
1643 | num_words=num_words,
1644 | cluster_method=cluster_method,
1645 | vector_size = vector_size,
1646 | window = window,
1647 | min_count = min_count,
1648 | workers = workers,
1649 | )
1650 |
1651 | def get_word_embedding(
1652 | self,
1653 | data,
1654 | vector_size,
1655 | window,
1656 | min_count,
1657 | workers,
1658 | ):
1659 | """
1660 | Method to train a word embedding on the corpus.
1661 |
1662 | Parameters
1663 | ----------
1664 | data : list of lists of str
1665 | The input file used to initialize the model.
1666 | min_count : int
1667 | Ignores all words with total frequency lower than this.
1668 | window : int
1669 | Maximum distance between the current and predicted word within a sentence.
1670 | vector_size : int
1671 | Dimensionality of the word vectors.
1672 | workers : int
1673 | Use these many worker threads to train the model (=faster training with multicore machines).
1674 | """
1675 | self.model = Word2Vec(
1676 | sentences=data,
1677 | vector_size=vector_size,
1678 | window=window,
1679 | min_count=min_count,
1680 | workers = workers,
1681 | )
1682 |
1683 | return self.model.wv.vectors
1684 |
1685 | def get_matrices(
1686 | self,
1687 | ):
1688 | '''
1689 | Method to run after the FLSA_E class has been initialized to obtain the output matrices.
1690 |
1691 | Returns:
1692 | - Numpy array: prob_word_given_topic
1693 | - Numpy array: prob_topic_given_document
1694 | '''
1695 | sparse_document_term_matrix = self._create_sparse_local_term_weights(
1696 | self.input_file,
1697 | self._vocabulary_size,
1698 | self._word_to_index,
1699 | )
1700 |
1701 | self.word_embedding = self.get_word_embedding(
1702 | data = self.input_file,
1703 | min_count= self.min_count,
1704 | vector_size = self.vector_size,
1705 | window = self.window,
1706 | workers = self.workers,
1707 | )
1708 |
1709 | partition_matrix = self._create_partition_matrix(
1710 | data = self.word_embedding,
1711 | number_of_clusters = self.num_topics,
1712 | method = self.cluster_method,
1713 | )
1714 |
1715 | return self._create_probability_matrices(
1716 | algorithm='flsa-e',
1717 | prob_topic_given_word_transpose = partition_matrix,
1718 | local_term_weights = sparse_document_term_matrix,
1719 | )
1720 |
--------------------------------------------------------------------------------