Location>code7788 >text

Python performs lottery and double color ball number selection (LSTM prediction and random number selection)

Popularity:360 ℃/2025-02-24 11:22:40

Articles are for reference only

predict

First, crawl the data

This is a big lottery, from 2007 to the latest issue

import requests
from bs4 import BeautifulSoup
import csv

#Target URL
url = 'http://datachart./dlt/history/newinc/?start=07001'

#Send HTTP request
response = (url)
 = 'utf-8'  #Make sure the code is correct

#Parsing HTML content
soup = BeautifulSoup(, '')

#Positioning tables containing lottery data
tbody = ('tbody', id="tdata")

#List of lottery data stored
lottery_data = []

#Iterate through each row of data
for tr in tbody.find_all('tr'):
    tds = tr.find_all('td')
    if tds:
        #Extract data and add to the list
        lottery_data.append([ for td in tds])

#Write to CSV file
with open('dlt_lottery_data.csv', 'w', newline='', encoding='utf-8') as csvfile:
    writer = (csvfile)
    #Write to the title line
    #(['Issue No.', 'Number 1', 'Number 2', 'Number 3', 'Number 4', 'Number 5', 'Number 6', 'Number 7'])
    #Write data rows
    (lottery_data)

print('The data scraping is completed and saved to the dlt_lottery_data.csv file.')

Below is the crawling double color ball

import requests
from bs4 import BeautifulSoup
import csv

#Target URL
url = f'http://datachart./ssq/history/newinc/?start=07001'

#Send HTTP request
response = (url)
 = 'utf-8'  #Make sure the code is correct

#Parsing HTML content
soup = BeautifulSoup(, '')

#Positioning tables containing lottery data
tbody = ('tbody', id="tdata")

#List of lottery data stored
lottery_data = []

#Iterate through each row of data
for tr in tbody.find_all('tr'):
    tds = tr.find_all('td')
    if tds:
        #Extract data and add to the list
        lottery_data.append([ for td in tds])

#Write to CSV file
with open('ssq_lottery_data.csv', 'w', newline='', encoding='utf-8') as csvfile:
    writer = (csvfile)
    #Write to the title line
    #(['Issue No.', 'Number 1', 'Number 2', 'Number 3', 'Number 4', 'Number 5', 'Number 6', 'Number 7'])
    #Write data rows
    (lottery_data)

print('The data scraping is completed and saved to the ssq_lottery_data.csv file.')

 

Process the crawled data

The lottery is 5+2, and the double color ball is 6+1. The two are different, please pay attention to distinguish them.

Lotto

import csv

import pandas as pd
def get_data(path):
    r_data = []
    b_data = []
    with open(path, 'r') as file:
        reader = (file)
        for row in reader:
            r_data.append(list(map(lambda x: int(x), row[1:7])))
            b_data.append(list(map(lambda x: int(x), row[7:8])))
    r_data.reverse()
    b_data.reverse()
    return r_data, b_data

def process_data():
    p = r"./ssq_lottery_data.csv"
    r_data, b_data = get_data(p)
    # print(b_data)
    return r_data, b_data

if __name__ == '__main__':

    process_data()

Below is the Double Color Ball

import csv

import pandas as pd
def get_data(path):
    r_data = []
    b_data = []
    with open(path, 'r') as file:
        reader = (file)
        for row in reader:
            r_data.append(list(map(lambda x: int(x), row[1:7])))
            b_data.append(list(map(lambda x: int(x), row[7:8])))
    r_data.reverse()
    b_data.reverse()
    return r_data, b_data

def process_data():
    p = r"./ssq_lottery_data.csv"
    r_data, b_data = get_data(p)
    # print(b_data)
    return r_data, b_data

if __name__ == '__main__':

    process_data()

 

 

Let's start defining the model

#Define the LSTM model
class LSTMModel():
    def __init__(self, input_size, hidden_size, output_size, num_layers=1):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
         = (input_size, hidden_size, num_layers, batch_first=True)
         = (hidden_size, output_size)

    def forward(self, x):
        h0 = (self.num_layers, (0), self.hidden_size).to()
        c0 = (self.num_layers, (0), self.hidden_size).to()

        out, _ = (x, (h0, c0))
        out = (out[:, -1, :])
        return out

 

Standardize the data before training and convert it into tensor format

def trans_process_data(seq_length):
    r_data, b_data = process_data()
    # print(r_data)

    # print(r_data)
    r_data = (r_data)
    b_data = (b_data)
    #Convert to PyTorch tensor
    r_data = (r_data, dtype=torch.float32)
    #Convert to PyTorch tensor
    b_data = (b_data, dtype=torch.float32)

    #standardization
    r_mean = r_data.mean(dim=0)
    r_std = r_data.std(dim=0)
    r_data = (r_data - r_mean) / r_std

    #standardization
    b_mean = b_data.mean(dim=0)
    b_std = b_data.std(dim=0)
    b_data = (b_data - b_mean) / b_std


    r_train = []
    r_target = []
    b_train = []
    b_target = []

    for i in range(len(r_data) - seq_length):
        r_train.append(r_data[i:i + seq_length])
        r_target.append(r_data[i + seq_length])
    r_train = (r_train)
    r_target = (r_target)

    for i in range(len(b_data) - seq_length):
        b_train.append(b_data[i:i + seq_length])
        b_target.append(b_data[i + seq_length])
    b_train = (b_train)
    b_target = (b_target)
    # print(r_train)
    return r_data, b_data, r_train, r_target, b_train, b_target, r_mean, r_std, b_mean, b_std

 

Training functions

def start_train(input_size, hidden_size, output_size, num_layers, train_data, target_data, num_epochs=100):
    model = LSTMModel(input_size, hidden_size, output_size, num_layers)
    criterion = ()
    optimizer = ((), lr=0.05)
    #Training the model
    for epoch in range(num_epochs):
        ()
        optimizer.zero_grad()
        #Forward communication
        outputs = model(train_data)
        loss = criterion(outputs, target_data)
        #Backpropagation and optimization
        ()
        ()
        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {():.4f}')
        if epoch == int(num_epochs/2):
            optimizer = ((), lr=0.01)
    return model

 

Prediction function

def start_predicted(model, predicted_data):
    ()
    with torch.no_grad():
        test_input = predicted_data.unsqueeze(0)  #Use the last seq_length time steps as input
        predicted = model(test_input)
        # print("Predicted:", predicted)
    return predicted

 

Red ball and basketball training prediction separately, start two training predictions

def start_all_train(hidden_size, num_layers, num_epochs, seq_length):
    r_data, b_data, r_train, r_target, b_train, b_target, r_mean, r_std, b_mean, b_std = trans_process_data(seq_length)
    # print(r_mean, r_std)
    r_size = 5
    r_model = start_train(r_size, hidden_size, r_size, num_layers, r_train, r_target, num_epochs)
    predicted_data = r_data[-seq_length:]
    r_predicted = start_predicted(r_model, predicted_data)
    print("--------------------------bbbbb-------------------------------------------")
    b_size = 2
    b_model = start_train(b_size, hidden_size, b_size, num_layers, b_train, b_target, num_epochs)
    predicted_data = b_data[-seq_length:]
    b_predicted = start_predicted(b_model, predicted_data)

    print(r_predicted)
    print(b_predicted)

    r_predicted = r_predicted * r_std + r_mean
    b_predicted = b_predicted * b_std + b_mean

    print(r_predicted)
    print(b_predicted)

    return r_predicted, b_predicted

 

Complete code

import os
import sys

BASE_DIR = ((__file__))
(BASE_DIR)

from data_process import process_data
import torch
import  as nn
import  as optim
import numpy as np


#Define the LSTM model
class LSTMModel():
    def __init__(self, input_size, hidden_size, output_size, num_layers=1):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
         = (input_size, hidden_size, num_layers, batch_first=True)
         = (hidden_size, output_size)

    def forward(self, x):
        h0 = (self.num_layers, (0), self.hidden_size).to()
        c0 = (self.num_layers, (0), self.hidden_size).to()

        out, _ = (x, (h0, c0))
        out = (out[:, -1, :])
        return out


def trans_process_data(seq_length):
    r_data, b_data = process_data()
    # print(r_data)

    # print(r_data)
    r_data = (r_data)
    b_data = (b_data)
    #Convert to PyTorch tensor
    r_data = (r_data, dtype=torch.float32)
    #Convert to PyTorch tensor
    b_data = (b_data, dtype=torch.float32)

    #standardization
    r_mean = r_data.mean(dim=0)
    r_std = r_data.std(dim=0)
    r_data = (r_data - r_mean) / r_std

    #standardization
    b_mean = b_data.mean(dim=0)
    b_std = b_data.std(dim=0)
    b_data = (b_data - b_mean) / b_std


    r_train = []
    r_target = []
    b_train = []
    b_target = []

    for i in range(len(r_data) - seq_length):
        r_train.append(r_data[i:i + seq_length])
        r_target.append(r_data[i + seq_length])
    r_train = (r_train)
    r_target = (r_target)

    for i in range(len(b_data) - seq_length):
        b_train.append(b_data[i:i + seq_length])
        b_target.append(b_data[i + seq_length])
    b_train = (b_train)
    b_target = (b_target)
    # print(r_train)
    return r_data, b_data, r_train, r_target, b_train, b_target, r_mean, r_std, b_mean, b_std


def start_train(input_size, hidden_size, output_size, num_layers, train_data, target_data, num_epochs=100):
    model = LSTMModel(input_size, hidden_size, output_size, num_layers)
    criterion = ()
    optimizer = ((), lr=0.05)
    #Training the model
    for epoch in range(num_epochs):
        ()
        optimizer.zero_grad()
        #Forward communication
        outputs = model(train_data)
        loss = criterion(outputs, target_data)
        #Backpropagation and optimization
        ()
        ()
        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {():.4f}')
        if epoch == int(num_epochs/2):
            optimizer = ((), lr=0.01)
    return model

def start_predicted(model, predicted_data):
    ()
    with torch.no_grad():
        test_input = predicted_data.unsqueeze(0)  #Use the last seq_length time steps as input
        predicted = model(test_input)
        # print("Predicted:", predicted)
    return predicted

def start_all_train(hidden_size, num_layers, num_epochs, seq_length):
    r_data, b_data, r_train, r_target, b_train, b_target, r_mean, r_std, b_mean, b_std = trans_process_data(seq_length)
    # print(r_mean, r_std)
    r_size = 5
    r_model = start_train(r_size, hidden_size, r_size, num_layers, r_train, r_target, num_epochs)
    predicted_data = r_data[-seq_length:]
    r_predicted = start_predicted(r_model, predicted_data)
    print("--------------------------bbbbb-------------------------------------------")
    b_size = 2
    b_model = start_train(b_size, hidden_size, b_size, num_layers, b_train, b_target, num_epochs)
    predicted_data = b_data[-seq_length:]
    b_predicted = start_predicted(b_model, predicted_data)

    print(r_predicted)
    print(b_predicted)

    r_predicted = r_predicted * r_std + r_mean
    b_predicted = b_predicted * b_std + b_mean

    print(r_predicted)
    print(b_predicted)

    return r_predicted, b_predicted



if __name__ == '__main__':
    hidden_size = 20
    num_layers = 3
    num_epochs = 1000
    seq_length = 10

    r_predicted, b_predicted = start_all_train(hidden_size, num_layers, num_epochs, seq_length)
    # print(r_predicted)
    # print(b_predicted)

 

2. Random prediction

Below is the random number selection prediction

import random
import numpy as np

from collections import Counter


#The Big Lotto is different from the Double Color Ball
r_len = 5
r_num = 35

b_len = 2
b_num = 12

#Double color ball
# r_len = 6
# r_num = 33
# 
# b_len = 1
# b_num = 16


number = 100000000
li = []

li_r = []
li_b = []
for i in range(number):

    r_li = (range(1, r_num+1), r_len)
    b_li = (range(1, b_num+1), b_len)
    li_r.extend(r_li)
    li_b.extend(b_li)
    print(i)

counter_li_r = Counter(li_r)
counter_li_b = Counter(li_b)


most_common_li_r = counter_li_r.most_common(r_len)
most_common_li_b = counter_li_b.most_common(b_len)
most_common_li_r = list(map(lambda x: x[0], most_common_li_r))
most_common_li_b = list(map(lambda x: x[0], most_common_li_b))
most_common_li_r.sort()
most_common_li_b.sort()

li = most_common_li_r
(most_common_li_b)
print("most: ", li)

most_least_li_r = counter_li_r.most_common()[-r_len-1:-1]
most_least_li_b = counter_li_b.most_common()[-b_len-1:-1]
most_least_li_r = list(map(lambda x: x[0], most_least_li_r))
most_least_li_b = list(map(lambda x: x[0], most_least_li_b))
most_least_li_r.sort()
most_least_li_b.sort()
li = most_least_li_r
(most_least_li_b)
print("least: ", li)

 

Good luck, congratulations on winning the first prize